diff --git a/cfgmgr/buffermgrdyn.cpp b/cfgmgr/buffermgrdyn.cpp index b3ce88c6f3..1c5b99a6f8 100644 --- a/cfgmgr/buffermgrdyn.cpp +++ b/cfgmgr/buffermgrdyn.cpp @@ -111,8 +111,11 @@ BufferMgrDynamic::BufferMgrDynamic(DBConnector *cfgDb, DBConnector *stateDb, DBC } catch (...) { - SWSS_LOG_ERROR("Lua scripts for buffer calculation were not loaded successfully, buffermgrd won't start"); - return; + if (platform != "mock_test") + { + SWSS_LOG_ERROR("Lua scripts for buffer calculation were not loaded successfully, buffermgrd won't start"); + return; + } } // Init timer @@ -718,7 +721,13 @@ void BufferMgrDynamic::recalculateSharedBufferPool() // - In case the shared headroom pool size is statically configured, as it is programmed to APPL_DB during buffer pool handling, // - any change from lua plugin will be ignored. // - will handle ingress_lossless_pool in the way all other pools are handled in this case - auto &pool = m_bufferPoolLookup[poolName]; + const auto &poolRef = m_bufferPoolLookup.find(poolName); + if (poolRef == m_bufferPoolLookup.end()) + { + SWSS_LOG_WARN("Unconfigured buffer pool %s got from lua plugin", poolName.c_str()); + continue; + } + auto &pool = poolRef->second; auto &poolSizeStr = pairs[1]; auto old_xoff = pool.xoff; bool xoff_updated = false; @@ -875,10 +884,8 @@ void BufferMgrDynamic::updateBufferProfileToDb(const string &name, const buffer_ } vector fvVector; - string mode = getPgPoolMode(); - // profile threshold field name - mode += "_th"; + const string &&mode = profile.threshold_mode.empty() ? getPgPoolMode() + "_th" : profile.threshold_mode; if (profile.lossless) { @@ -959,7 +966,7 @@ task_process_status BufferMgrDynamic::allocateProfile(const string &speed, const string mode = getPgPoolMode(); if (mode.empty()) { - SWSS_LOG_NOTICE("BUFFER_PROFILE %s cannot be created because the buffer pool isn't ready", profile_name.c_str()); + SWSS_LOG_INFO("BUFFER_PROFILE %s cannot be created because the buffer pool isn't ready", profile_name.c_str()); return task_process_status::task_need_retry; } @@ -1430,9 +1437,10 @@ task_process_status BufferMgrDynamic::refreshPgsForPort(const string &port, cons return task_process_status::task_success; } - if (!m_bufferPoolReady) + if (!m_bufferPoolReady || m_defaultThreshold.empty()) { - SWSS_LOG_INFO("Nothing to be done since the buffer pool is not ready"); + SWSS_LOG_INFO("Nothing to be done since either the buffer pool or default threshold is not ready"); + m_bufferObjectsPending = true; return task_process_status::task_success; } @@ -1454,6 +1462,12 @@ task_process_status BufferMgrDynamic::refreshPgsForPort(const string &port, cons if (portPg.dynamic_calculated) { + if (portInfo.state != PORT_READY) + { + SWSS_LOG_INFO("Nothing to be done for %s since port is not ready", key.c_str()); + continue; + } + string threshold; // Calculate new headroom size if (portPg.static_configured) @@ -1892,10 +1906,16 @@ task_process_status BufferMgrDynamic::handleBufferMaxParam(KeyOpFieldsValuesTupl task_process_status BufferMgrDynamic::handleDefaultLossLessBufferParam(KeyOpFieldsValuesTuple &tuple) { string op = kfvOp(tuple); - string newRatio = "0"; + string newRatio = ""; if (op == SET_COMMAND) { + if (m_bufferPoolLookup.find(INGRESS_LOSSLESS_PG_POOL_NAME) == m_bufferPoolLookup.end()) + { + SWSS_LOG_INFO("%s has not been configured, need to retry", INGRESS_LOSSLESS_PG_POOL_NAME); + return task_process_status::task_need_retry; + } + for (auto i : kfvFieldsValues(tuple)) { if (fvField(i) == "default_dynamic_th") @@ -1910,6 +1930,10 @@ task_process_status BufferMgrDynamic::handleDefaultLossLessBufferParam(KeyOpFiel } } } + else if (op == DEL_COMMAND) + { + newRatio = ""; + } else { SWSS_LOG_ERROR("Unsupported command %s received for DEFAULT_LOSSLESS_BUFFER_PARAMETER table", op.c_str()); @@ -2398,6 +2422,10 @@ task_process_status BufferMgrDynamic::handleBufferProfileTable(KeyOpFieldsValues // For set command: // 1. Create the corresponding table entries in APPL_DB // 2. Record the table in the internal cache m_bufferProfileLookup + + // If the profile did not exist, it will be created in the next line by the [] operator with incomplete data. + // In case the flow does not finish successfully, the incomplete profile should be removed + bool needRemoveOnFailure = (m_bufferProfileLookup.find(profileName) == m_bufferProfileLookup.end()); buffer_profile_t &profileApp = m_bufferProfileLookup[profileName]; profileApp.static_configured = true; @@ -2418,24 +2446,44 @@ task_process_status BufferMgrDynamic::handleBufferProfileTable(KeyOpFieldsValues if (!value.empty()) { auto &poolName = value; - if (poolName.empty()) - { - SWSS_LOG_ERROR("BUFFER_PROFILE: Invalid format of reference to pool: %s", value.c_str()); - return task_process_status::task_invalid_entry; - } - auto poolRef = m_bufferPoolLookup.find(poolName); if (poolRef == m_bufferPoolLookup.end()) { - SWSS_LOG_WARN("Pool %s hasn't been configured yet, need retry", poolName.c_str()); + SWSS_LOG_INFO("Pool %s hasn't been configured yet, need retry", poolName.c_str()); + if (needRemoveOnFailure) + { + m_bufferProfileLookup.erase(profileName); + } return task_process_status::task_need_retry; } profileApp.pool_name = poolName; profileApp.direction = poolRef->second.direction; + auto threshold_mode = poolRef->second.mode + "_th"; + if (profileApp.threshold_mode.empty()) + { + profileApp.threshold_mode = threshold_mode; + } + else if (profileApp.threshold_mode != threshold_mode) + { + SWSS_LOG_ERROR("Buffer profile %s's mode %s doesn't match with buffer pool %s whose mode is %s", + profileName.c_str(), + profileApp.threshold_mode.c_str(), + poolName.c_str(), + threshold_mode.c_str()); + if (needRemoveOnFailure) + { + m_bufferProfileLookup.erase(profileName); + } + return task_process_status::task_failed; + } } else { SWSS_LOG_ERROR("Pool for BUFFER_PROFILE %s hasn't been specified", field.c_str()); + if (needRemoveOnFailure) + { + m_bufferProfileLookup.erase(profileName); + } return task_process_status::task_failed; } } @@ -2456,12 +2504,25 @@ task_process_status BufferMgrDynamic::handleBufferProfileTable(KeyOpFieldsValues { profileApp.size = value; } - else if (field == buffer_dynamic_th_field_name) - { - profileApp.threshold = value; - } - else if (field == buffer_static_th_field_name) + else if (field == buffer_dynamic_th_field_name || field == buffer_static_th_field_name) { + if (profileApp.threshold_mode.empty()) + { + profileApp.threshold_mode = field; + } + else if (profileApp.threshold_mode != field) + { + SWSS_LOG_ERROR("Buffer profile %s's mode %s doesn't align with buffer pool %s whose mode is %s", + profileName.c_str(), + field.c_str(), + profileApp.pool_name.c_str(), + profileApp.threshold_mode.c_str()); + if (needRemoveOnFailure) + { + m_bufferProfileLookup.erase(profileName); + } + return task_process_status::task_failed; + } profileApp.threshold = value; } else if (field == buffer_headroom_type_field_name) @@ -2484,7 +2545,11 @@ task_process_status BufferMgrDynamic::handleBufferProfileTable(KeyOpFieldsValues if (profileApp.direction != BUFFER_INGRESS) { SWSS_LOG_ERROR("BUFFER_PROFILE %s is ingress but referencing an egress pool %s", profileName.c_str(), profileApp.pool_name.c_str()); - return task_process_status::task_success; + if (needRemoveOnFailure) + { + m_bufferProfileLookup.erase(profileName); + } + return task_process_status::task_failed; } if (profileApp.dynamic_calculated) @@ -2752,6 +2817,9 @@ void BufferMgrDynamic::handleDelSingleBufferObjectOnAdminDownPort(buffer_directi task_process_status BufferMgrDynamic::handleSingleBufferPgEntry(const string &key, const string &port, const KeyOpFieldsValuesTuple &tuple) { string op = kfvOp(tuple); + // If the buffer PG did not exist, it will be created in the next line by the [] operator with incomplete data. + // In case the flow does not finish successfully, the incomplete profile should be removed + bool needRemoveOnFailure = (m_portPgLookup[port].find(key) == m_portPgLookup[port].end()); buffer_pg_t &bufferPg = m_portPgLookup[port][key]; port_info_t &portInfo = m_portInfoLookup[port]; @@ -2787,6 +2855,10 @@ task_process_status BufferMgrDynamic::handleSingleBufferPgEntry(const string &ke if (profileName.empty()) { SWSS_LOG_ERROR("BUFFER_PG: Invalid format of reference to profile: %s", value.c_str()); + if (needRemoveOnFailure) + { + m_portPgLookup[port].erase(key); + } return task_process_status::task_invalid_entry; } @@ -2795,13 +2867,25 @@ task_process_status BufferMgrDynamic::handleSingleBufferPgEntry(const string &ke { // In this case, we shouldn't set the dynamic calculated flag to true // It will be updated when its profile configured. - bufferPg.dynamic_calculated = false; - SWSS_LOG_WARN("Profile %s hasn't been configured yet, skip", profileName.c_str()); + if (needRemoveOnFailure) + { + m_portPgLookup[port].erase(key); + } + SWSS_LOG_INFO("Profile %s hasn't been configured yet, skip", profileName.c_str()); return task_process_status::task_need_retry; } else { buffer_profile_t &profileRef = searchRef->second; + if (profileRef.direction == BUFFER_EGRESS) + { + if (needRemoveOnFailure) + { + m_portPgLookup[port].erase(key); + } + SWSS_LOG_ERROR("Egress buffer profile configured on PG %s", key.c_str()); + return task_process_status::task_failed; + } bufferPg.dynamic_calculated = profileRef.dynamic_calculated; bufferPg.configured_profile_name = profileName; bufferPg.lossless = profileRef.lossless; @@ -2813,6 +2897,10 @@ task_process_status BufferMgrDynamic::handleSingleBufferPgEntry(const string &ke if (field != buffer_profile_field_name) { SWSS_LOG_ERROR("BUFFER_PG: Invalid field %s", field.c_str()); + if (needRemoveOnFailure) + { + m_portPgLookup[port].erase(key); + } return task_process_status::task_invalid_entry; } @@ -2896,6 +2984,7 @@ task_process_status BufferMgrDynamic::handleSingleBufferPgEntry(const string &ke else { SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); + m_portPgLookup[port].erase(key); return task_process_status::task_invalid_entry; } @@ -2911,7 +3000,7 @@ task_process_status BufferMgrDynamic::checkBufferProfileDirection(const string & auto profileSearchRef = m_bufferProfileLookup.find(profileName); if (profileSearchRef == m_bufferProfileLookup.end()) { - SWSS_LOG_NOTICE("Profile %s doesn't exist, need retry", profileName.c_str()); + SWSS_LOG_INFO("Profile %s doesn't exist, need retry", profileName.c_str()); return task_process_status::task_need_retry; } @@ -2983,6 +3072,8 @@ task_process_status BufferMgrDynamic::handleSingleBufferQueueEntry(const string } SWSS_LOG_INFO("Removing entry %s from APPL_DB", key.c_str()); m_portQueueLookup[port].erase(queues); + if (m_portQueueLookup[port].empty()) + m_portQueueLookup.erase(port); if (PORT_ADMIN_DOWN == portInfo.state) { handleDelSingleBufferObjectOnAdminDownPort(BUFFER_QUEUE, port, key, portInfo); @@ -3189,7 +3280,8 @@ void BufferMgrDynamic::doTask(Consumer &consumer) { case task_process_status::task_failed: SWSS_LOG_ERROR("Failed to process table update"); - return; + it = consumer.m_toSync.erase(it); + break; case task_process_status::task_need_retry: SWSS_LOG_INFO("Unable to process table update. Will retry..."); it++; @@ -3238,7 +3330,7 @@ void BufferMgrDynamic::doTask(Consumer &consumer) */ void BufferMgrDynamic::handlePendingBufferObjects() { - if (m_bufferPoolReady) + if (m_bufferPoolReady && !m_defaultThreshold.empty()) { if (!m_pendingApplyZeroProfilePorts.empty()) { diff --git a/cfgmgr/buffermgrdyn.h b/cfgmgr/buffermgrdyn.h index ef1e4f567f..cb94227522 100644 --- a/cfgmgr/buffermgrdyn.h +++ b/cfgmgr/buffermgrdyn.h @@ -71,6 +71,7 @@ typedef struct { std::string xon_offset; std::string xoff; std::string threshold; + std::string threshold_mode; std::string pool_name; // port_pgs - stores pgs referencing this profile // An element will be added or removed when a PG added or removed @@ -177,7 +178,7 @@ class BufferMgrDynamic : public Orch std::string m_configuredSharedHeadroomPoolSize; - std::shared_ptr m_applDb = nullptr; + DBConnector *m_applDb = nullptr; SelectableTimer *m_buffermgrPeriodtimer = nullptr; // Fields for zero pool and profiles diff --git a/orchagent/aclorch.cpp b/orchagent/aclorch.cpp index e371ecd980..73aa02dac9 100644 --- a/orchagent/aclorch.cpp +++ b/orchagent/aclorch.cpp @@ -153,6 +153,176 @@ static const acl_capabilities_t defaultAclActionsSupported = } }; +static acl_table_action_list_lookup_t defaultAclActionList = +{ + { + // L3 + TABLE_TYPE_L3, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION, + SAI_ACL_ACTION_TYPE_REDIRECT + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION, + SAI_ACL_ACTION_TYPE_REDIRECT + } + } + } + }, + { + // L3V6 + TABLE_TYPE_L3V6, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION, + SAI_ACL_ACTION_TYPE_REDIRECT + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION, + SAI_ACL_ACTION_TYPE_REDIRECT + } + } + } + }, + { + // MIRROR + TABLE_TYPE_MIRROR, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_MIRROR_INGRESS + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_MIRROR_EGRESS + } + } + } + }, + { + // MIRRORV6 + TABLE_TYPE_MIRRORV6, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_MIRROR_INGRESS + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_MIRROR_EGRESS + } + } + } + }, + { + // MIRROR_DSCP + TABLE_TYPE_MIRROR_DSCP, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_MIRROR_INGRESS + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_MIRROR_EGRESS + } + } + } + }, + { + // TABLE_TYPE_PFCWD + TABLE_TYPE_PFCWD, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + } + } + }, + { + // MCLAG + TABLE_TYPE_MCLAG, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + } + } + }, + { + // MUX + TABLE_TYPE_MUX, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + } + } + }, + { + // DROP + TABLE_TYPE_DROP, + { + { + ACL_STAGE_INGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + }, + { + ACL_STAGE_EGRESS, + { + SAI_ACL_ACTION_TYPE_PACKET_ACTION + } + } + } + } +}; + static acl_ip_type_lookup_t aclIpTypeLookup = { { IP_TYPE_ANY, SAI_ACL_IP_TYPE_ANY }, @@ -301,6 +471,12 @@ const set& AclTableType::getActions() const return m_aclAcitons; } +bool AclTableType::addAction(sai_acl_action_type_t action) +{ + m_aclAcitons.insert(action); + return true; +} + AclTableTypeBuilder& AclTableTypeBuilder::withName(string name) { m_tableType.m_name = name; @@ -1808,6 +1984,51 @@ AclTable::AclTable(AclOrch *pAclOrch) noexcept : m_pAclOrch(pAclOrch) } +bool AclTable::addMandatoryActions() +{ + SWSS_LOG_ENTER(); + + if (stage == ACL_STAGE_UNKNOWN) + { + return false; + } + + if (!m_pAclOrch->isAclActionListMandatoryOnTableCreation(stage)) + { + // No op if action list is not mandatory on table creation. + return true; + } + if (!type.getActions().empty()) + { + // No change if action_list is provided + return true; + } + + sai_acl_action_type_t acl_action = SAI_ACL_ACTION_TYPE_COUNTER; + if (m_pAclOrch->isAclActionSupported(stage, acl_action)) + { + SWSS_LOG_INFO("Add counter acl action"); + type.addAction(acl_action); + } + + if (defaultAclActionList.count(type.getName()) != 0) + { + // Add the default action list + for (auto action : defaultAclActionList[type.getName()][stage]) + { + if (m_pAclOrch->isAclActionSupported(stage, acl_action)) + { + SWSS_LOG_INFO("Added default action for table type %s stage %s", + type.getName().c_str(), + ((stage == ACL_STAGE_INGRESS)? "INGRESS":"EGRESS")); + type.addAction(action); + } + } + } + + return true; +} + bool AclTable::validateAddType(const AclTableType &tableType) { SWSS_LOG_ENTER(); @@ -3949,6 +4170,8 @@ void AclOrch::doAclTableTask(Consumer &consumer) newTable.validateAddType(*tableType); + newTable.addMandatoryActions(); + // validate and create/update ACL Table if (bAllAttributesOk && newTable.validate()) { diff --git a/orchagent/aclorch.h b/orchagent/aclorch.h index 710720a5c1..02631d934e 100644 --- a/orchagent/aclorch.h +++ b/orchagent/aclorch.h @@ -113,6 +113,9 @@ typedef tuple acl_range_properties_t; typedef map acl_capabilities_t; typedef map> acl_action_enum_values_capabilities_t; +typedef map > acl_stage_action_list_t; +typedef map acl_table_action_list_lookup_t; + class AclRule; class AclTableMatchInterface @@ -156,6 +159,8 @@ class AclTableType const set& getRangeTypes() const; const set& getActions() const; + bool addAction(sai_acl_action_type_t action); + private: friend class AclTableTypeBuilder; @@ -387,6 +392,9 @@ class AclTable bool validate(); bool create(); + // Add actions to ACL table if mandatory action list is required on table creation. + bool addMandatoryActions(); + // validate AclRule match attribute against rule and table configuration bool validateAclRuleMatch(sai_acl_entry_attr_t matchId, const AclRule& rule) const; // validate AclRule action attribute against rule and table configuration diff --git a/orchagent/bufferorch.cpp b/orchagent/bufferorch.cpp index f9b91e7a16..b9fbd096b4 100644 --- a/orchagent/bufferorch.cpp +++ b/orchagent/bufferorch.cpp @@ -1,5 +1,6 @@ #include "tokenize.h" #include "bufferorch.h" +#include "directory.h" #include "logger.h" #include "sai_serialize.h" #include "warm_restart.h" @@ -16,6 +17,7 @@ extern sai_switch_api_t *sai_switch_api; extern sai_buffer_api_t *sai_buffer_api; extern PortsOrch *gPortsOrch; +extern Directory gDirectory; extern sai_object_id_t gSwitchId; #define BUFFER_POOL_WATERMARK_FLEX_STAT_COUNTER_POLL_MSECS "60000" @@ -948,6 +950,20 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) return handle_status; } } + // create/remove a port queue counter for the queue buffer + else + { + auto flexCounterOrch = gDirectory.get(); + auto queues = tokens[1]; + if (op == SET_COMMAND && flexCounterOrch->getQueueCountersState()) + { + gPortsOrch->createPortBufferQueueCounters(port, queues); + } + else if (op == DEL_COMMAND && flexCounterOrch->getQueueCountersState()) + { + gPortsOrch->removePortBufferQueueCounters(port, queues); + } + } } } } @@ -1007,7 +1023,7 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup if (op == SET_COMMAND) { ref_resolve_status resolve_result = resolveFieldRefValue(m_buffer_type_maps, buffer_profile_field_name, - buffer_to_ref_table_map.at(buffer_profile_field_name), tuple, + buffer_to_ref_table_map.at(buffer_profile_field_name), tuple, sai_buffer_profile, buffer_profile_name); if (ref_resolve_status::success != resolve_result) { @@ -1087,6 +1103,20 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup return handle_status; } } + // create or remove a port PG counter for the PG buffer + else + { + auto flexCounterOrch = gDirectory.get(); + auto pgs = tokens[1]; + if (op == SET_COMMAND && flexCounterOrch->getPgWatermarkCountersState()) + { + gPortsOrch->createPortBufferPgCounters(port, pgs); + } + else if (op == DEL_COMMAND && flexCounterOrch->getPgWatermarkCountersState()) + { + gPortsOrch->removePortBufferPgCounters(port, pgs); + } + } } } } diff --git a/orchagent/flex_counter/flex_counter_manager.cpp b/orchagent/flex_counter/flex_counter_manager.cpp index 3e61289acd..ecccf415b2 100644 --- a/orchagent/flex_counter/flex_counter_manager.cpp +++ b/orchagent/flex_counter/flex_counter_manager.cpp @@ -89,14 +89,28 @@ FlexCounterManager::FlexCounterManager( const uint polling_interval, const bool enabled, FieldValueTuple fv_plugin) : + FlexCounterManager("FLEX_COUNTER_DB", group_name, stats_mode, + polling_interval, enabled, fv_plugin) +{ +} + +FlexCounterManager::FlexCounterManager( + const string& db_name, + const string& group_name, + const StatsMode stats_mode, + const uint polling_interval, + const bool enabled, + FieldValueTuple fv_plugin) : group_name(group_name), stats_mode(stats_mode), polling_interval(polling_interval), enabled(enabled), fv_plugin(fv_plugin), - flex_counter_db(new DBConnector("FLEX_COUNTER_DB", 0)), - flex_counter_group_table(new ProducerTable(flex_counter_db.get(), FLEX_COUNTER_GROUP_TABLE)), - flex_counter_table(new ProducerTable(flex_counter_db.get(), FLEX_COUNTER_TABLE)) + flex_counter_db(new DBConnector(db_name, 0)), + flex_counter_group_table(new ProducerTable(flex_counter_db.get(), + FLEX_COUNTER_GROUP_TABLE)), + flex_counter_table(new ProducerTable(flex_counter_db.get(), + FLEX_COUNTER_TABLE)) { SWSS_LOG_ENTER(); diff --git a/orchagent/flex_counter/flex_counter_manager.h b/orchagent/flex_counter/flex_counter_manager.h index 6a997f28f7..38bf829058 100644 --- a/orchagent/flex_counter/flex_counter_manager.h +++ b/orchagent/flex_counter/flex_counter_manager.h @@ -52,6 +52,14 @@ class FlexCounterManager FlexCounterManager() {} + FlexCounterManager( + const std::string& db_name, + const std::string& group_name, + const StatsMode stats_mode, + const uint polling_interval, + const bool enabled, + swss::FieldValueTuple fv_plugin = std::make_pair("","")); + FlexCounterManager(const FlexCounterManager&) = delete; FlexCounterManager& operator=(const FlexCounterManager&) = delete; virtual ~FlexCounterManager(); diff --git a/orchagent/flexcounterorch.cpp b/orchagent/flexcounterorch.cpp index a3770b76cb..f16312f750 100644 --- a/orchagent/flexcounterorch.cpp +++ b/orchagent/flexcounterorch.cpp @@ -10,6 +10,7 @@ #include "debugcounterorch.h" #include "directory.h" #include "copporch.h" +#include #include "routeorch.h" #include "flowcounterrouteorch.h" @@ -58,6 +59,8 @@ unordered_map flexCounterGroupMap = FlexCounterOrch::FlexCounterOrch(DBConnector *db, vector &tableNames): Orch(db, tableNames), m_flexCounterConfigTable(db, CFG_FLEX_COUNTER_TABLE_NAME), + m_bufferQueueConfigTable(db, CFG_BUFFER_QUEUE_TABLE_NAME), + m_bufferPgConfigTable(db, CFG_BUFFER_PG_TABLE_NAME), m_flexCounterDb(new DBConnector("FLEX_COUNTER_DB", 0)), m_flexCounterGroupTable(new ProducerTable(m_flexCounterDb.get(), FLEX_COUNTER_GROUP_TABLE)) { @@ -144,11 +147,13 @@ void FlexCounterOrch::doTask(Consumer &consumer) } else if(key == QUEUE_KEY) { - gPortsOrch->generateQueueMap(); + gPortsOrch->generateQueueMap(getQueueConfigurations()); + m_queue_enabled = true; } else if(key == PG_WATERMARK_KEY) { - gPortsOrch->generatePriorityGroupMap(); + gPortsOrch->generatePriorityGroupMap(getPgConfigurations()); + m_pg_watermark_enabled = true; } } if(gIntfsOrch && (key == RIF_KEY) && (value == "enable")) @@ -196,6 +201,12 @@ void FlexCounterOrch::doTask(Consumer &consumer) vector fieldValues; fieldValues.emplace_back(FLEX_COUNTER_STATUS_FIELD, value); m_flexCounterGroupTable->set(flexCounterGroupMap[key], fieldValues); + + // Update FLEX_COUNTER_STATUS for gearbox port + if (key == PORT_KEY && gPortsOrch && gPortsOrch->isGearboxEnabled()) + { + gPortsOrch->setGearboxFlexCounterStatus(value == "enable"); + } } else if(field == FLEX_COUNTER_DELAY_STATUS_FIELD) { @@ -224,6 +235,16 @@ bool FlexCounterOrch::getPortBufferDropCountersState() const return m_port_buffer_drop_counter_enabled; } +bool FlexCounterOrch::getPgWatermarkCountersState() const +{ + return m_pg_watermark_enabled; +} + +bool FlexCounterOrch::getQueueCountersState() const +{ + return m_queue_enabled; +} + bool FlexCounterOrch::bake() { /* @@ -265,3 +286,165 @@ bool FlexCounterOrch::bake() Consumer* consumer = dynamic_cast(getExecutor(CFG_FLEX_COUNTER_TABLE_NAME)); return consumer->addToSync(entries); } + +map FlexCounterOrch::getQueueConfigurations() +{ + SWSS_LOG_ENTER(); + + map queuesStateVector; + std::vector portQueueKeys; + m_bufferQueueConfigTable.getKeys(portQueueKeys); + + for (const auto& portQueueKey : portQueueKeys) + { + auto toks = tokenize(portQueueKey, '|'); + if (toks.size() != 2) + { + SWSS_LOG_ERROR("Invalid BUFFER_QUEUE key: [%s]", portQueueKey.c_str()); + continue; + } + + auto configPortNames = tokenize(toks[0], ','); + auto configPortQueues = toks[1]; + toks = tokenize(configPortQueues, '-'); + + for (const auto& configPortName : configPortNames) + { + uint32_t maxQueueNumber = gPortsOrch->getNumberOfPortSupportedQueueCounters(configPortName); + uint32_t maxQueueIndex = maxQueueNumber - 1; + uint32_t minQueueIndex = 0; + + if (!queuesStateVector.count(configPortName)) + { + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); + queuesStateVector.insert(make_pair(configPortName, flexCounterQueueState)); + } + + try { + auto startIndex = to_uint(toks[0], minQueueIndex, maxQueueIndex); + if (toks.size() > 1) + { + auto endIndex = to_uint(toks[1], minQueueIndex, maxQueueIndex); + queuesStateVector.at(configPortName).enableQueueCounters(startIndex, endIndex); + } + else + { + queuesStateVector.at(configPortName).enableQueueCounter(startIndex); + } + } catch (std::invalid_argument const& e) { + SWSS_LOG_ERROR("Invalid queue index [%s] for port [%s]", configPortQueues.c_str(), configPortName.c_str()); + continue; + } + } + } + + return queuesStateVector; +} + +map FlexCounterOrch::getPgConfigurations() +{ + SWSS_LOG_ENTER(); + + map pgsStateVector; + std::vector portPgKeys; + m_bufferPgConfigTable.getKeys(portPgKeys); + + for (const auto& portPgKey : portPgKeys) + { + auto toks = tokenize(portPgKey, '|'); + if (toks.size() != 2) + { + SWSS_LOG_ERROR("Invalid BUFFER_PG key: [%s]", portPgKey.c_str()); + continue; + } + + auto configPortNames = tokenize(toks[0], ','); + auto configPortPgs = toks[1]; + toks = tokenize(configPortPgs, '-'); + + for (const auto& configPortName : configPortNames) + { + uint32_t maxPgNumber = gPortsOrch->getNumberOfPortSupportedPgCounters(configPortName); + uint32_t maxPgIndex = maxPgNumber - 1; + uint32_t minPgIndex = 0; + + if (!pgsStateVector.count(configPortName)) + { + FlexCounterPgStates flexCounterPgState(maxPgNumber); + pgsStateVector.insert(make_pair(configPortName, flexCounterPgState)); + } + + try { + auto startIndex = to_uint(toks[0], minPgIndex, maxPgIndex); + if (toks.size() > 1) + { + auto endIndex = to_uint(toks[1], minPgIndex, maxPgIndex); + pgsStateVector.at(configPortName).enablePgCounters(startIndex, endIndex); + } + else + { + pgsStateVector.at(configPortName).enablePgCounter(startIndex); + } + } catch (std::invalid_argument const& e) { + SWSS_LOG_ERROR("Invalid pg index [%s] for port [%s]", configPortPgs.c_str(), configPortName.c_str()); + continue; + } + } + } + + return pgsStateVector; +} + +FlexCounterQueueStates::FlexCounterQueueStates(uint32_t maxQueueNumber) +{ + SWSS_LOG_ENTER(); + m_queueStates.resize(maxQueueNumber, false); +} + +bool FlexCounterQueueStates::isQueueCounterEnabled(uint32_t index) const +{ + SWSS_LOG_ENTER(); + return m_queueStates[index]; +} + +void FlexCounterQueueStates::enableQueueCounters(uint32_t startIndex, uint32_t endIndex) +{ + SWSS_LOG_ENTER(); + for (uint32_t queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) + { + enableQueueCounter(queueIndex); + } +} + +void FlexCounterQueueStates::enableQueueCounter(uint32_t queueIndex) +{ + SWSS_LOG_ENTER(); + m_queueStates[queueIndex] = true; +} + +FlexCounterPgStates::FlexCounterPgStates(uint32_t maxPgNumber) +{ + SWSS_LOG_ENTER(); + m_pgStates.resize(maxPgNumber, false); +} + +bool FlexCounterPgStates::isPgCounterEnabled(uint32_t index) const +{ + SWSS_LOG_ENTER(); + return m_pgStates[index]; +} + +void FlexCounterPgStates::enablePgCounters(uint32_t startIndex, uint32_t endIndex) +{ + SWSS_LOG_ENTER(); + for (uint32_t pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) + { + enablePgCounter(pgIndex); + } +} + +void FlexCounterPgStates::enablePgCounter(uint32_t pgIndex) +{ + SWSS_LOG_ENTER(); + m_pgStates[pgIndex] = true; +} diff --git a/orchagent/flexcounterorch.h b/orchagent/flexcounterorch.h index 4f9734c0e2..a8106720da 100644 --- a/orchagent/flexcounterorch.h +++ b/orchagent/flexcounterorch.h @@ -10,6 +10,30 @@ extern "C" { #include "sai.h" } +class FlexCounterQueueStates +{ +public: + FlexCounterQueueStates(uint32_t maxQueueNumber); + bool isQueueCounterEnabled(uint32_t index) const; + void enableQueueCounters(uint32_t startIndex, uint32_t endIndex); + void enableQueueCounter(uint32_t queueIndex); + +private: + std::vector m_queueStates{}; +}; + +class FlexCounterPgStates +{ +public: + FlexCounterPgStates(uint32_t maxPgNumber); + bool isPgCounterEnabled(uint32_t index) const; + void enablePgCounters(uint32_t startIndex, uint32_t endIndex); + void enablePgCounter(uint32_t pgIndex); + +private: + std::vector m_pgStates{}; +}; + class FlexCounterOrch: public Orch { public: @@ -18,6 +42,10 @@ class FlexCounterOrch: public Orch virtual ~FlexCounterOrch(void); bool getPortCountersState() const; bool getPortBufferDropCountersState() const; + bool getPgWatermarkCountersState() const; + bool getQueueCountersState() const; + map getQueueConfigurations(); + map getPgConfigurations(); bool getHostIfTrapCounterState() const {return m_hostif_trap_counter_enabled;} bool getRouteFlowCountersState() const {return m_route_flow_counter_enabled;} bool bake() override; @@ -27,9 +55,13 @@ class FlexCounterOrch: public Orch std::shared_ptr m_flexCounterGroupTable = nullptr; bool m_port_counter_enabled = false; bool m_port_buffer_drop_counter_enabled = false; + bool m_pg_watermark_enabled = false; + bool m_queue_enabled = false; bool m_hostif_trap_counter_enabled = false; bool m_route_flow_counter_enabled = false; Table m_flexCounterConfigTable; + Table m_bufferQueueConfigTable; + Table m_bufferPgConfigTable; }; #endif diff --git a/orchagent/macsecorch.cpp b/orchagent/macsecorch.cpp index 20b6057733..70721979d2 100644 --- a/orchagent/macsecorch.cpp +++ b/orchagent/macsecorch.cpp @@ -621,6 +621,21 @@ MACsecOrch::MACsecOrch( StatsMode::READ, MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true), m_macsec_flow_stat_manager( + COUNTERS_MACSEC_FLOW_GROUP, + StatsMode::READ, + MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true), + m_gb_macsec_sa_attr_manager( + "GB_FLEX_COUNTER_DB", + COUNTERS_MACSEC_SA_ATTR_GROUP, + StatsMode::READ, + MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true), + m_gb_macsec_sa_stat_manager( + "GB_FLEX_COUNTER_DB", + COUNTERS_MACSEC_SA_GROUP, + StatsMode::READ, + MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true), + m_gb_macsec_flow_stat_manager( + "GB_FLEX_COUNTER_DB", COUNTERS_MACSEC_FLOW_GROUP, StatsMode::READ, MACSEC_STAT_FLEX_COUNTER_POLLING_INTERVAL_MS, true) @@ -2122,17 +2137,17 @@ task_process_status MACsecOrch::createMACsecSA( sc->m_sa_ids.erase(an); }); - installCounter(CounterType::MACSEC_SA_ATTR, direction, port_sci_an, sc->m_sa_ids[an], macsec_sa_attrs); + installCounter(ctx, CounterType::MACSEC_SA_ATTR, direction, port_sci_an, sc->m_sa_ids[an], macsec_sa_attrs); std::vector fvVector; fvVector.emplace_back("state", "ok"); if (direction == SAI_MACSEC_DIRECTION_EGRESS) { - installCounter(CounterType::MACSEC_SA, direction, port_sci_an, sc->m_sa_ids[an], macsec_sa_egress_stats); + installCounter(ctx, CounterType::MACSEC_SA, direction, port_sci_an, sc->m_sa_ids[an], macsec_sa_egress_stats); m_state_macsec_egress_sa.set(swss::join('|', port_name, sci, an), fvVector); } else { - installCounter(CounterType::MACSEC_SA, direction, port_sci_an, sc->m_sa_ids[an], macsec_sa_ingress_stats); + installCounter(ctx, CounterType::MACSEC_SA, direction, port_sci_an, sc->m_sa_ids[an], macsec_sa_ingress_stats); m_state_macsec_ingress_sa.set(swss::join('|', port_name, sci, an), fvVector); } @@ -2167,8 +2182,8 @@ task_process_status MACsecOrch::deleteMACsecSA( auto result = task_success; - uninstallCounter(CounterType::MACSEC_SA_ATTR, direction, port_sci_an, ctx.get_macsec_sc()->m_sa_ids[an]); - uninstallCounter(CounterType::MACSEC_SA, direction, port_sci_an, ctx.get_macsec_sc()->m_sa_ids[an]); + uninstallCounter(ctx, CounterType::MACSEC_SA_ATTR, direction, port_sci_an, ctx.get_macsec_sc()->m_sa_ids[an]); + uninstallCounter(ctx, CounterType::MACSEC_SA, direction, port_sci_an, ctx.get_macsec_sc()->m_sa_ids[an]); if (!deleteMACsecSA(ctx.get_macsec_sc()->m_sa_ids[an])) { SWSS_LOG_WARN("Cannot delete the MACsec SA %s.", port_sci_an.c_str()); @@ -2293,7 +2308,29 @@ bool MACsecOrch::deleteMACsecSA(sai_object_id_t sa_id) return true; } +FlexCounterManager& MACsecOrch::MACsecSaStatManager(MACsecOrchContext &ctx) +{ + if (ctx.get_gearbox_phy() != nullptr) + return m_gb_macsec_sa_stat_manager; + return m_macsec_sa_stat_manager; +} + +FlexCounterManager& MACsecOrch::MACsecSaAttrStatManager(MACsecOrchContext &ctx) +{ + if (ctx.get_gearbox_phy() != nullptr) + return m_gb_macsec_sa_attr_manager; + return m_macsec_sa_attr_manager; +} + +FlexCounterManager& MACsecOrch::MACsecFlowStatManager(MACsecOrchContext &ctx) +{ + if (ctx.get_gearbox_phy() != nullptr) + return m_gb_macsec_flow_stat_manager; + return m_macsec_flow_stat_manager; +} + void MACsecOrch::installCounter( + MACsecOrchContext &ctx, CounterType counter_type, sai_macsec_direction_t direction, const std::string &obj_name, @@ -2312,12 +2349,12 @@ void MACsecOrch::installCounter( switch(counter_type) { case CounterType::MACSEC_SA_ATTR: - m_macsec_sa_attr_manager.setCounterIdList(obj_id, counter_type, counter_stats); + MACsecSaAttrStatManager(ctx).setCounterIdList(obj_id, counter_type, counter_stats); m_macsec_counters_map.set("", fields); break; case CounterType::MACSEC_SA: - m_macsec_sa_stat_manager.setCounterIdList(obj_id, counter_type, counter_stats); + MACsecSaStatManager(ctx).setCounterIdList(obj_id, counter_type, counter_stats); if (direction == SAI_MACSEC_DIRECTION_EGRESS) { m_macsec_sa_tx_counters_map.set("", fields); @@ -2329,7 +2366,7 @@ void MACsecOrch::installCounter( break; case CounterType::MACSEC_FLOW: - m_macsec_flow_stat_manager.setCounterIdList(obj_id, counter_type, counter_stats); + MACsecFlowStatManager(ctx).setCounterIdList(obj_id, counter_type, counter_stats); break; default: @@ -2340,6 +2377,7 @@ void MACsecOrch::installCounter( } void MACsecOrch::uninstallCounter( + MACsecOrchContext &ctx, CounterType counter_type, sai_macsec_direction_t direction, const std::string &obj_name, @@ -2348,12 +2386,12 @@ void MACsecOrch::uninstallCounter( switch(counter_type) { case CounterType::MACSEC_SA_ATTR: - m_macsec_sa_attr_manager.clearCounterIdList(obj_id); + MACsecSaAttrStatManager(ctx).clearCounterIdList(obj_id); m_counter_db.hdel(COUNTERS_MACSEC_NAME_MAP, obj_name); break; case CounterType::MACSEC_SA: - m_macsec_sa_stat_manager.clearCounterIdList(obj_id); + MACsecSaStatManager(ctx).clearCounterIdList(obj_id); if (direction == SAI_MACSEC_DIRECTION_EGRESS) { m_counter_db.hdel(COUNTERS_MACSEC_SA_TX_NAME_MAP, obj_name); @@ -2365,7 +2403,7 @@ void MACsecOrch::uninstallCounter( break; case CounterType::MACSEC_FLOW: - m_macsec_flow_stat_manager.clearCounterIdList(obj_id); + MACsecFlowStatManager(ctx).clearCounterIdList(obj_id); break; default: diff --git a/orchagent/macsecorch.h b/orchagent/macsecorch.h index b59984a3a6..2472d8c0ef 100644 --- a/orchagent/macsecorch.h +++ b/orchagent/macsecorch.h @@ -72,6 +72,10 @@ class MACsecOrch : public Orch FlexCounterManager m_macsec_sa_stat_manager; FlexCounterManager m_macsec_flow_stat_manager; + FlexCounterManager m_gb_macsec_sa_attr_manager; + FlexCounterManager m_gb_macsec_sa_stat_manager; + FlexCounterManager m_gb_macsec_flow_stat_manager; + struct MACsecACLTable { sai_object_id_t m_table_id; @@ -209,17 +213,24 @@ class MACsecOrch : public Orch /* Counter */ void installCounter( + MACsecOrchContext &ctx, CounterType counter_type, sai_macsec_direction_t direction, const std::string &obj_name, sai_object_id_t obj_id, const std::vector &stats); void uninstallCounter( + MACsecOrchContext &ctx, CounterType counter_type, sai_macsec_direction_t direction, const std::string &obj_name, sai_object_id_t obj_id); + /* Flex Counter Manager */ + FlexCounterManager& MACsecSaStatManager(MACsecOrchContext &ctx); + FlexCounterManager& MACsecSaAttrStatManager(MACsecOrchContext &ctx); + FlexCounterManager& MACsecFlowStatManager(MACsecOrchContext &ctx); + /* MACsec ACL */ bool initMACsecACLTable( MACsecACLTable &acl_table, diff --git a/orchagent/port.h b/orchagent/port.h index db9f2b7bff..fe366630ac 100644 --- a/orchagent/port.h +++ b/orchagent/port.h @@ -171,6 +171,7 @@ class Port SystemLagInfo m_system_lag_info; sai_object_id_t m_switch_id = 0; + sai_object_id_t m_system_side_id = 0; sai_object_id_t m_line_side_id = 0; bool m_fec_cfg = false; diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 5a6ba61e5c..6700031bd2 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -245,6 +245,24 @@ const vector port_stat_ids = SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS }; +const vector gbport_stat_ids = +{ + SAI_PORT_STAT_IF_IN_OCTETS, + SAI_PORT_STAT_IF_OUT_OCTETS, + SAI_PORT_STAT_IF_IN_DISCARDS, + SAI_PORT_STAT_IF_OUT_DISCARDS, + SAI_PORT_STAT_IF_IN_ERRORS, + SAI_PORT_STAT_IF_OUT_ERRORS, + SAI_PORT_STAT_ETHER_RX_OVERSIZE_PKTS, + SAI_PORT_STAT_ETHER_TX_OVERSIZE_PKTS, + SAI_PORT_STAT_ETHER_STATS_UNDERSIZE_PKTS, + SAI_PORT_STAT_ETHER_STATS_JABBERS, + SAI_PORT_STAT_ETHER_STATS_FRAGMENTS, + SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES, + SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES, + SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS +}; + const vector port_buffer_drop_stat_ids = { SAI_PORT_STAT_IN_DROPPED_PKTS, @@ -305,6 +323,9 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vectorgetPortCountersState()) { auto port_counter_stats = generateCounterStats(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP); - port_stat_manager.setCounterIdList(p.m_port_id, CounterType::PORT, port_counter_stats); + port_stat_manager.setCounterIdList(p.m_port_id, + CounterType::PORT, port_counter_stats); + auto gbport_counter_stats = generateCounterStats(GBPORT_STAT_COUNTER_FLEX_COUNTER_GROUP); + if (p.m_system_side_id) + gb_port_stat_manager.setCounterIdList(p.m_system_side_id, + CounterType::PORT, gbport_counter_stats); + if (p.m_line_side_id) + gb_port_stat_manager.setCounterIdList(p.m_line_side_id, + CounterType::PORT, gbport_counter_stats); } if (flex_counters_orch->getPortBufferDropCountersState()) { @@ -2423,18 +2457,6 @@ bool PortsOrch::initPort(const string &alias, const string &role, const int inde port_buffer_drop_stat_manager.setCounterIdList(p.m_port_id, CounterType::PORT, port_buffer_drop_stats); } - /* when a port is added and priority group map counter is enabled --> we need to add pg counter for it */ - if (m_isPriorityGroupMapGenerated) - { - generatePriorityGroupMapPerPort(p); - } - - /* when a port is added and queue map counter is enabled --> we need to add queue map counter for it */ - if (m_isQueueMapGenerated) - { - generateQueueMapPerPort(p); - } - PortUpdate update = { p, true }; notify(SUBJECT_TYPE_PORT_CHANGE, static_cast(&update)); @@ -2487,18 +2509,6 @@ void PortsOrch::deInitPort(string alias, sai_object_id_t port_id) port_buffer_drop_stat_manager.clearCounterIdList(p.m_port_id); } - /* remove pg port counters */ - if (m_isPriorityGroupMapGenerated) - { - removePriorityGroupMapPerPort(p); - } - - /* remove queue port counters */ - if (m_isQueueMapGenerated) - { - removeQueueMapPerPort(p); - } - /* remove port name map from counter table */ m_counterTable->hdel("", alias); @@ -5464,7 +5474,7 @@ bool PortsOrch::removeTunnel(Port tunnel) return true; } -void PortsOrch::generateQueueMap() +void PortsOrch::generateQueueMap(map queuesStateVector) { if (m_isQueueMapGenerated) { @@ -5475,53 +5485,87 @@ void PortsOrch::generateQueueMap() { if (it.second.m_type == Port::PHY) { - generateQueueMapPerPort(it.second); + if (!queuesStateVector.count(it.second.m_alias)) + { + auto maxQueueNumber = getNumberOfPortSupportedQueueCounters(it.second.m_alias); + FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); + queuesStateVector.insert(make_pair(it.second.m_alias, flexCounterQueueState)); + } + generateQueueMapPerPort(it.second, queuesStateVector.at(it.second.m_alias)); } } m_isQueueMapGenerated = true; } -void PortsOrch::removeQueueMapPerPort(const Port& port) +void PortsOrch::generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState) { - /* Remove the Queue map in the Counter DB */ + /* Create the Queue map in the Counter DB */ + /* Add stat counters to flex_counter */ + vector queueVector; + vector queuePortVector; + vector queueIndexVector; + vector queueTypeVector; for (size_t queueIndex = 0; queueIndex < port.m_queue_ids.size(); ++queueIndex) { std::ostringstream name; name << port.m_alias << ":" << queueIndex; - std::unordered_set counter_stats; const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); - m_queueTable->hdel("",name.str()); - m_queuePortTable->hdel("",id); - string queueType; uint8_t queueRealIndex = 0; if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) { - m_queueTypeTable->hdel("",id); - m_queueIndexTable->hdel("",id); + if (!queuesState.isQueueCounterEnabled(queueRealIndex)) + { + continue; + } + queueTypeVector.emplace_back(id, queueType); + queueIndexVector.emplace_back(id, to_string(queueRealIndex)); } + queueVector.emplace_back(name.str(), id); + queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + + // Install a flex counter for this queue to track stats + std::unordered_set counter_stats; for (const auto& it: queue_stat_ids) { counter_stats.emplace(sai_serialize_queue_stat(it)); } - queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex]); + queue_stat_manager.setCounterIdList(port.m_queue_ids[queueIndex], CounterType::QUEUE, counter_stats); - /* remove watermark queue counters */ + /* add watermark queue counters */ string key = getQueueWatermarkFlexCounterTableKey(id); - m_flexCounterTable->del(key); + string delimiter(""); + std::ostringstream counters_stream; + for (const auto& it: queueWatermarkStatIds) + { + counters_stream << delimiter << sai_serialize_queue_stat(it); + delimiter = comma; + } + + vector fieldValues; + fieldValues.emplace_back(QUEUE_COUNTER_ID_LIST, counters_stream.str()); + + m_flexCounterTable->set(key, fieldValues); } - CounterCheckOrch::getInstance().removePort(port); + m_queueTable->set("", queueVector); + m_queuePortTable->set("", queuePortVector); + m_queueIndexTable->set("", queueIndexVector); + m_queueTypeTable->set("", queueTypeVector); + + CounterCheckOrch::getInstance().addPort(port); } -void PortsOrch::generateQueueMapPerPort(const Port& port) +void PortsOrch::createPortBufferQueueCounters(const Port &port, string queues) { + SWSS_LOG_ENTER(); + /* Create the Queue map in the Counter DB */ /* Add stat counters to flex_counter */ vector queueVector; @@ -5529,16 +5573,21 @@ void PortsOrch::generateQueueMapPerPort(const Port& port) vector queueIndexVector; vector queueTypeVector; - for (size_t queueIndex = 0; queueIndex < port.m_queue_ids.size(); ++queueIndex) + auto toks = tokenize(queues, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) { std::ostringstream name; name << port.m_alias << ":" << queueIndex; const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); - queueVector.emplace_back(name.str(), id); - queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); - string queueType; uint8_t queueRealIndex = 0; if (getQueueTypeAndIndex(port.m_queue_ids[queueIndex], queueType, queueRealIndex)) @@ -5547,6 +5596,9 @@ void PortsOrch::generateQueueMapPerPort(const Port& port) queueIndexVector.emplace_back(id, to_string(queueRealIndex)); } + queueVector.emplace_back(name.str(), id); + queuePortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + // Install a flex counter for this queue to track stats std::unordered_set counter_stats; for (const auto& it: queue_stat_ids) @@ -5580,7 +5632,42 @@ void PortsOrch::generateQueueMapPerPort(const Port& port) CounterCheckOrch::getInstance().addPort(port); } -void PortsOrch::generatePriorityGroupMap() +void PortsOrch::removePortBufferQueueCounters(const Port &port, string queues) +{ + SWSS_LOG_ENTER(); + + /* Remove the Queues maps in the Counter DB */ + /* Remove stat counters from flex_counter DB */ + auto toks = tokenize(queues, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto queueIndex = startIndex; queueIndex <= endIndex; queueIndex++) + { + std::ostringstream name; + name << port.m_alias << ":" << queueIndex; + const auto id = sai_serialize_object_id(port.m_queue_ids[queueIndex]); + + /* Remove watermark queue counters */ + string key = getQueueWatermarkFlexCounterTableKey(id); + m_flexCounterTable->del(key); + + // Remove the flex counter for this queue + queue_stat_manager.clearCounterIdList(port.m_queue_ids[queueIndex]); + + // Remove the queue counter from counters DB maps + m_queueTable->hdel("", name.str()); + m_queuePortTable->hdel("", id); + m_queueIndexTable->hdel("", id); + m_queueTypeTable->hdel("", id); + } +} + +void PortsOrch::generatePriorityGroupMap(map pgsStateVector) { if (m_isPriorityGroupMapGenerated) { @@ -5591,48 +5678,100 @@ void PortsOrch::generatePriorityGroupMap() { if (it.second.m_type == Port::PHY) { - generatePriorityGroupMapPerPort(it.second); + if (!pgsStateVector.count(it.second.m_alias)) + { + auto maxPgNumber = getNumberOfPortSupportedPgCounters(it.second.m_alias); + FlexCounterPgStates flexCounterPgState(maxPgNumber); + pgsStateVector.insert(make_pair(it.second.m_alias, flexCounterPgState)); + } + generatePriorityGroupMapPerPort(it.second, pgsStateVector.at(it.second.m_alias)); } } m_isPriorityGroupMapGenerated = true; } -void PortsOrch::removePriorityGroupMapPerPort(const Port& port) +void PortsOrch::generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState) { - /* Remove the PG map in the Counter DB */ + /* Create the PG map in the Counter DB */ + /* Add stat counters to flex_counter */ + vector pgVector; + vector pgPortVector; + vector pgIndexVector; for (size_t pgIndex = 0; pgIndex < port.m_priority_group_ids.size(); ++pgIndex) { + if (!pgsState.isPgCounterEnabled(static_cast(pgIndex))) + { + continue; + } std::ostringstream name; name << port.m_alias << ":" << pgIndex; const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); + + pgVector.emplace_back(name.str(), id); + pgPortVector.emplace_back(id, sai_serialize_object_id(port.m_port_id)); + pgIndexVector.emplace_back(id, to_string(pgIndex)); + string key = getPriorityGroupWatermarkFlexCounterTableKey(id); - m_pgTable->hdel("",name.str()); - m_pgPortTable->hdel("",id); - m_pgIndexTable->hdel("",id); + std::string delimiter = ""; + std::ostringstream counters_stream; + /* Add watermark counters to flex_counter */ + for (const auto& it: ingressPriorityGroupWatermarkStatIds) + { + counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); + delimiter = comma; + } - m_flexCounterTable->del(key); + vector fieldValues; + fieldValues.emplace_back(PG_COUNTER_ID_LIST, counters_stream.str()); + m_flexCounterTable->set(key, fieldValues); + delimiter = ""; + std::ostringstream ingress_pg_drop_packets_counters_stream; key = getPriorityGroupDropPacketsFlexCounterTableKey(id); - /* remove dropped packets counters to flex_counter */ - m_flexCounterTable->del(key); + /* Add dropped packets counters to flex_counter */ + for (const auto& it: ingressPriorityGroupDropStatIds) + { + ingress_pg_drop_packets_counters_stream << delimiter << sai_serialize_ingress_priority_group_stat(it); + if (delimiter.empty()) + { + delimiter = comma; + } + } + fieldValues.clear(); + fieldValues.emplace_back(PG_COUNTER_ID_LIST, ingress_pg_drop_packets_counters_stream.str()); + m_flexCounterTable->set(key, fieldValues); } - CounterCheckOrch::getInstance().removePort(port); + m_pgTable->set("", pgVector); + m_pgPortTable->set("", pgPortVector); + m_pgIndexTable->set("", pgIndexVector); + + CounterCheckOrch::getInstance().addPort(port); } -void PortsOrch::generatePriorityGroupMapPerPort(const Port& port) +void PortsOrch::createPortBufferPgCounters(const Port& port, string pgs) { + SWSS_LOG_ENTER(); + /* Create the PG map in the Counter DB */ /* Add stat counters to flex_counter */ vector pgVector; vector pgPortVector; vector pgIndexVector; - for (size_t pgIndex = 0; pgIndex < port.m_priority_group_ids.size(); ++pgIndex) + auto toks = tokenize(pgs, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) { std::ostringstream name; name << port.m_alias << ":" << pgIndex; @@ -5682,6 +5821,41 @@ void PortsOrch::generatePriorityGroupMapPerPort(const Port& port) CounterCheckOrch::getInstance().addPort(port); } +void PortsOrch::removePortBufferPgCounters(const Port& port, string pgs) +{ + SWSS_LOG_ENTER(); + + /* Remove the Pgs maps in the Counter DB */ + /* Remove stat counters from flex_counter DB */ + auto toks = tokenize(pgs, '-'); + auto startIndex = to_uint(toks[0]); + auto endIndex = startIndex; + if (toks.size() > 1) + { + endIndex = to_uint(toks[1]); + } + + for (auto pgIndex = startIndex; pgIndex <= endIndex; pgIndex++) + { + std::ostringstream name; + name << port.m_alias << ":" << pgIndex; + const auto id = sai_serialize_object_id(port.m_priority_group_ids[pgIndex]); + + /* Remove dropped packets counters from flex_counter */ + string key = getPriorityGroupDropPacketsFlexCounterTableKey(id); + m_flexCounterTable->del(key); + + /* Remove watermark counters from flex_counter */ + key = getPriorityGroupWatermarkFlexCounterTableKey(id); + m_flexCounterTable->del(key); + + // Remove the pg counter from counters DB maps + m_pgTable->hdel("", name.str()); + m_pgPortTable->hdel("", id); + m_pgIndexTable->hdel("", id); + } +} + void PortsOrch::generatePortCounterMap() { if (m_isPortCounterMapGenerated) @@ -5690,6 +5864,7 @@ void PortsOrch::generatePortCounterMap() } auto port_counter_stats = generateCounterStats(PORT_STAT_COUNTER_FLEX_COUNTER_GROUP); + auto gbport_counter_stats = generateCounterStats(GBPORT_STAT_COUNTER_FLEX_COUNTER_GROUP); for (const auto& it: m_portList) { // Set counter stats only for PHY ports to ensure syncd will not try to query the counter statistics from the HW for non-PHY ports. @@ -5697,7 +5872,14 @@ void PortsOrch::generatePortCounterMap() { continue; } - port_stat_manager.setCounterIdList(it.second.m_port_id, CounterType::PORT, port_counter_stats); + port_stat_manager.setCounterIdList(it.second.m_port_id, + CounterType::PORT, port_counter_stats); + if (it.second.m_system_side_id) + gb_port_stat_manager.setCounterIdList(it.second.m_system_side_id, + CounterType::PORT, gbport_counter_stats); + if (it.second.m_line_side_id) + gb_port_stat_manager.setCounterIdList(it.second.m_line_side_id, + CounterType::PORT, gbport_counter_stats); } m_isPortCounterMapGenerated = true; @@ -5724,6 +5906,16 @@ void PortsOrch::generatePortBufferDropCounterMap() m_isPortBufferDropCounterMapGenerated = true; } +uint32_t PortsOrch::getNumberOfPortSupportedPgCounters(string port) +{ + return static_cast(m_portList[port].m_priority_group_ids.size()); +} + +uint32_t PortsOrch::getNumberOfPortSupportedQueueCounters(string port) +{ + return static_cast(m_portList[port].m_queue_ids.size()); +} + void PortsOrch::doTask(NotificationConsumer &consumer) { SWSS_LOG_ENTER(); @@ -5803,6 +5995,7 @@ void PortsOrch::updatePortOperStatus(Port &port, sai_port_oper_status_t status) if (port.m_type == Port::PHY) { updateDbPortOperStatus(port, status); + updateGearboxPortOperStatus(port); } port.m_oper_status = status; @@ -6285,6 +6478,9 @@ void PortsOrch::initGearbox() SWSS_LOG_NOTICE("BOX: m_gearboxInterfaceMap size = %d.", (int) m_gearboxInterfaceMap.size()); SWSS_LOG_NOTICE("BOX: m_gearboxLaneMap size = %d.", (int) m_gearboxLaneMap.size()); SWSS_LOG_NOTICE("BOX: m_gearboxPortMap size = %d.", (int) m_gearboxPortMap.size()); + + m_gb_counter_db = shared_ptr(new DBConnector("GB_COUNTERS_DB", 0)); + m_gbcounterTable = unique_ptr(new Table(m_gb_counter_db.get(), COUNTERS_PORT_NAME_MAP)); } } @@ -6383,6 +6579,7 @@ bool PortsOrch::initGearboxPort(Port &port) } SWSS_LOG_NOTICE("BOX: Created Gearbox system-side port 0x%" PRIx64 " for alias:%s index:%d", systemPort, port.m_alias.c_str(), port.m_index); + port.m_system_side_id = systemPort; /* Create LINE-SIDE port */ attrs.clear(); @@ -6495,6 +6692,15 @@ bool PortsOrch::initGearboxPort(Port &port) SWSS_LOG_NOTICE("BOX: Connected Gearbox ports; system-side:0x%" PRIx64 " to line-side:0x%" PRIx64, systemPort, linePort); m_gearboxPortListLaneMap[port.m_port_id] = make_tuple(systemPort, linePort); port.m_line_side_id = linePort; + + /* Add gearbox system/line port name map to counter table */ + FieldValueTuple tuple(port.m_alias + "_system", sai_serialize_object_id(systemPort)); + vector fields; + fields.push_back(tuple); + m_gbcounterTable->set("", fields); + + fields[0] = FieldValueTuple(port.m_alias + "_line", sai_serialize_object_id(linePort)); + m_gbcounterTable->set("", fields); } } @@ -6920,6 +7126,13 @@ std::unordered_set PortsOrch::generateCounterStats(const string& ty counter_stats.emplace(sai_serialize_port_stat(it)); } } + else if (type == GBPORT_STAT_COUNTER_FLEX_COUNTER_GROUP) + { + for (const auto& it: gbport_stat_ids) + { + counter_stats.emplace(sai_serialize_port_stat(it)); + } + } else if (type == PORT_BUFFER_DROP_STAT_FLEX_COUNTER_GROUP) { for (const auto& it: port_buffer_drop_stat_ids) @@ -6930,6 +7143,61 @@ std::unordered_set PortsOrch::generateCounterStats(const string& ty return counter_stats; } +void PortsOrch::setGearboxFlexCounterStatus(bool enabled) +{ + if (enabled) + { + gb_port_stat_manager.enableFlexCounterGroup(); + } + else + { + gb_port_stat_manager.disableFlexCounterGroup(); + } +} + +void PortsOrch::updateGearboxPortOperStatus(const Port& port) +{ + if (!isGearboxEnabled()) + return; + + SWSS_LOG_NOTICE("BOX: port %s, system_side_id:0x%" PRIx64 "line_side_id:0x%" PRIx64, + port.m_alias.c_str(), port.m_system_side_id, port.m_line_side_id); + + if (!port.m_system_side_id || !port.m_line_side_id) + return; + + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_OPER_STATUS; + sai_status_t ret = sai_port_api->get_port_attribute(port.m_system_side_id, 1, &attr); + if (ret != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("BOX: Failed to get system_oper_status for %s", port.m_alias.c_str()); + } + else + { + sai_port_oper_status_t oper = static_cast(attr.value.u32); + vector tuples; + FieldValueTuple tuple("system_oper_status", oper_status_strings.at(oper)); + tuples.push_back(tuple); + m_portTable->set(port.m_alias, tuples); + } + + attr.id = SAI_PORT_ATTR_OPER_STATUS; + ret = sai_port_api->get_port_attribute(port.m_line_side_id, 1, &attr); + if (ret != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("BOX: Failed to get line_oper_status for %s", port.m_alias.c_str()); + } + else + { + sai_port_oper_status_t oper = static_cast(attr.value.u32); + vector tuples; + FieldValueTuple tuple("line_oper_status", oper_status_strings.at(oper)); + tuples.push_back(tuple); + m_portTable->set(port.m_alias, tuples); + } +} + bool PortsOrch::decrFdbCount(const std::string& alias, int count) { auto itr = m_portList.find(alias); diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index 2848cdcb91..6291231ae7 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -20,6 +20,7 @@ #define VLAN_TAG_LEN 4 #define PORT_STAT_COUNTER_FLEX_COUNTER_GROUP "PORT_STAT_COUNTER" #define PORT_RATE_COUNTER_FLEX_COUNTER_GROUP "PORT_RATE_COUNTER" +#define GBPORT_STAT_COUNTER_FLEX_COUNTER_GROUP "GBPORT_STAT_COUNTER" #define PORT_BUFFER_DROP_STAT_FLEX_COUNTER_GROUP "PORT_BUFFER_DROP_STAT" #define QUEUE_STAT_COUNTER_FLEX_COUNTER_GROUP "QUEUE_STAT_COUNTER" #define QUEUE_WATERMARK_STAT_COUNTER_FLEX_COUNTER_GROUP "QUEUE_WATERMARK_STAT_COUNTER" @@ -80,6 +81,7 @@ class PortsOrch : public Orch, public Subject bool allPortsReady(); bool isInitDone(); bool isConfigDone(); + bool isGearboxEnabled(); bool isPortAdminUp(const string &alias); map& getAllPorts(); @@ -126,9 +128,17 @@ class PortsOrch : public Orch, public Subject bool setPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t pfc_bitmask); bool getPortPfcWatchdogStatus(sai_object_id_t portId, uint8_t *pfc_bitmask); + + void generateQueueMap(map queuesStateVector); + uint32_t getNumberOfPortSupportedQueueCounters(string port); + void createPortBufferQueueCounters(const Port &port, string queues); + void removePortBufferQueueCounters(const Port &port, string queues); + + void generatePriorityGroupMap(map pgsStateVector); + uint32_t getNumberOfPortSupportedPgCounters(string port); + void createPortBufferPgCounters(const Port &port, string pgs); + void removePortBufferPgCounters(const Port& port, string pgs); - void generateQueueMap(); - void generatePriorityGroupMap(); void generatePortCounterMap(); void generatePortBufferDropCounterMap(); @@ -168,7 +178,11 @@ class PortsOrch : public Orch, public Subject bool getPortOperStatus(const Port& port, sai_port_oper_status_t& status) const; + void setGearboxFlexCounterStatus(bool enabled); + void updateGearboxPortOperStatus(const Port& port); + bool decrFdbCount(const string& alias, int count); + private: unique_ptr
m_counterTable; unique_ptr
m_counterLagTable; @@ -199,6 +213,10 @@ class PortsOrch : public Orch, public Subject FlexCounterManager port_buffer_drop_stat_manager; FlexCounterManager queue_stat_manager; + FlexCounterManager gb_port_stat_manager; + shared_ptr m_gb_counter_db; + unique_ptr
m_gbcounterTable; + std::map m_portSupportedSpeeds; bool m_initDone = false; @@ -315,13 +333,9 @@ class PortsOrch : public Orch, public Subject bool getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uint8_t &index); bool m_isQueueMapGenerated = false; - void generateQueueMapPerPort(const Port& port); - void removeQueueMapPerPort(const Port& port); - + void generateQueueMapPerPort(const Port& port, FlexCounterQueueStates& queuesState); bool m_isPriorityGroupMapGenerated = false; - void generatePriorityGroupMapPerPort(const Port& port); - void removePriorityGroupMapPerPort(const Port& port); - + void generatePriorityGroupMapPerPort(const Port& port, FlexCounterPgStates& pgsState); bool m_isPortCounterMapGenerated = false; bool m_isPortBufferDropCounterMapGenerated = false; diff --git a/tests/mock_tests/Makefile.am b/tests/mock_tests/Makefile.am index 2a6dade254..54fb4003a2 100644 --- a/tests/mock_tests/Makefile.am +++ b/tests/mock_tests/Makefile.am @@ -2,7 +2,7 @@ FLEX_CTR_DIR = $(top_srcdir)/orchagent/flex_counter DEBUG_CTR_DIR = $(top_srcdir)/orchagent/debug_counter P4_ORCH_DIR = $(top_srcdir)/orchagent/p4orch -INCLUDES = -I $(FLEX_CTR_DIR) -I $(DEBUG_CTR_DIR) -I $(top_srcdir)/lib +INCLUDES = -I $(FLEX_CTR_DIR) -I $(DEBUG_CTR_DIR) -I $(top_srcdir)/lib -I $(top_srcdir)/cfgmgr CFLAGS_SAI = -I /usr/include/sai @@ -26,6 +26,7 @@ tests_SOURCES = aclorch_ut.cpp \ routeorch_ut.cpp \ qosorch_ut.cpp \ bufferorch_ut.cpp \ + buffermgrdyn_ut.cpp \ fdborch/flush_syncd_notif_ut.cpp \ copporch_ut.cpp \ saispy_ut.cpp \ @@ -95,7 +96,8 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/orchagent/lagid.cpp \ $(top_srcdir)/orchagent/bfdorch.cpp \ $(top_srcdir)/orchagent/srv6orch.cpp \ - $(top_srcdir)/orchagent/nvgreorch.cpp + $(top_srcdir)/orchagent/nvgreorch.cpp \ + $(top_srcdir)/cfgmgr/buffermgrdyn.cpp tests_SOURCES += $(FLEX_CTR_DIR)/flex_counter_manager.cpp $(FLEX_CTR_DIR)/flex_counter_stat_manager.cpp $(FLEX_CTR_DIR)/flow_counter_handler.cpp $(FLEX_CTR_DIR)/flowcounterrouteorch.cpp tests_SOURCES += $(DEBUG_CTR_DIR)/debug_counter.cpp $(DEBUG_CTR_DIR)/drop_counter.cpp diff --git a/tests/mock_tests/aclorch_ut.cpp b/tests/mock_tests/aclorch_ut.cpp index 0d81c93f69..9886e5d8ff 100644 --- a/tests/mock_tests/aclorch_ut.cpp +++ b/tests/mock_tests/aclorch_ut.cpp @@ -1755,4 +1755,96 @@ namespace aclorch_test // try to delete non existing acl rule ASSERT_TRUE(orch->m_aclOrch->removeAclRule(tableId, ruleId)); } + + sai_switch_api_t *old_sai_switch_api; + + // The following function is used to override SAI API get_switch_attribute to request passing + // mandatory ACL actions to SAI when creating mirror ACL table. + sai_status_t getSwitchAttribute(_In_ sai_object_id_t switch_id,_In_ uint32_t attr_count, + _Inout_ sai_attribute_t *attr_list) + { + if (attr_count == 1) + { + switch(attr_list[0].id) + { + case SAI_SWITCH_ATTR_MAX_ACL_ACTION_COUNT: + attr_list[0].value.u32 = 2; + return SAI_STATUS_SUCCESS; + case SAI_SWITCH_ATTR_ACL_STAGE_INGRESS: + case SAI_SWITCH_ATTR_ACL_STAGE_EGRESS: + attr_list[0].value.aclcapability.action_list.count = 2; + attr_list[0].value.aclcapability.action_list.list[0]= SAI_ACL_ACTION_TYPE_COUNTER; + attr_list[0].value.aclcapability.action_list.list[1]= + attr_list[0].id == SAI_SWITCH_ATTR_ACL_STAGE_INGRESS ? + SAI_ACL_ACTION_TYPE_MIRROR_INGRESS : SAI_ACL_ACTION_TYPE_MIRROR_EGRESS; + attr_list[0].value.aclcapability.is_action_list_mandatory = true; + return SAI_STATUS_SUCCESS; + } + } + return old_sai_switch_api->get_switch_attribute(switch_id, attr_count, attr_list); + } + + TEST_F(AclOrchTest, AclTableCreationWithMandatoryActions) + { + // Override SAI API get_switch_attribute to request passing mandatory ACL actions to SAI + // when creating mirror ACL table. + old_sai_switch_api = sai_switch_api; + sai_switch_api_t new_sai_switch_api = *sai_switch_api; + sai_switch_api = &new_sai_switch_api; + sai_switch_api->get_switch_attribute = getSwitchAttribute; + + // Set platform env to enable support of MIRRORV6 ACL table. + bool unset_platform_env = false; + if (!getenv("platform")) + { + setenv("platform", VS_PLATFORM_SUBSTRING, 0); + unset_platform_env = true; + } + + auto orch = createAclOrch(); + + for (const auto &acl_table_type : { TABLE_TYPE_MIRROR, TABLE_TYPE_MIRRORV6, TABLE_TYPE_MIRROR_DSCP }) + { + for (const auto &acl_table_stage : { STAGE_INGRESS, STAGE_EGRESS }) + { + // Create ACL table. + string acl_table_id = "mirror_acl_table"; + auto kvfAclTable = deque( + { { acl_table_id, + SET_COMMAND, + { { ACL_TABLE_DESCRIPTION, acl_table_type }, + { ACL_TABLE_TYPE, acl_table_type }, + { ACL_TABLE_STAGE, acl_table_stage }, + { ACL_TABLE_PORTS, "1,2" } } } }); + orch->doAclTableTask(kvfAclTable); + auto acl_table = orch->getAclTable(acl_table_id); + ASSERT_NE(acl_table, nullptr); + + // Verify mandaotry ACL actions has been added. + auto acl_actions = acl_table->type.getActions(); + ASSERT_NE(acl_actions.find(SAI_ACL_ACTION_TYPE_COUNTER), acl_actions.end()); + sai_acl_action_type_t action = strcmp(acl_table_stage, STAGE_INGRESS) == 0 ? + SAI_ACL_ACTION_TYPE_MIRROR_INGRESS : SAI_ACL_ACTION_TYPE_MIRROR_EGRESS; + ASSERT_NE(acl_actions.find(action), acl_actions.end()); + + // Delete ACL table. + kvfAclTable = deque( + { { acl_table_id, + DEL_COMMAND, + {} } }); + orch->doAclTableTask(kvfAclTable); + acl_table = orch->getAclTable(acl_table_id); + ASSERT_EQ(acl_table, nullptr); + } + } + + // Unset platform env. + if (unset_platform_env) + { + unsetenv("platform"); + } + + // Restore sai_switch_api. + sai_switch_api = old_sai_switch_api; + } } // namespace nsAclOrchTest diff --git a/tests/mock_tests/buffermgrdyn_ut.cpp b/tests/mock_tests/buffermgrdyn_ut.cpp new file mode 100644 index 0000000000..b64a367c79 --- /dev/null +++ b/tests/mock_tests/buffermgrdyn_ut.cpp @@ -0,0 +1,902 @@ +#define private public // make Directory::m_values available to clean it. +#include "directory.h" +#undef private +#define protected public +#include "orch.h" +#undef protected +#include "ut_helper.h" +#include "mock_orchagent_main.h" +#include "mock_table.h" +#define private public +#include "buffermgrdyn.h" +#undef private +#include "warm_restart.h" + +extern string gMySwitchType; + + +namespace buffermgrdyn_test +{ + using namespace std; + + shared_ptr m_app_db = make_shared("APPL_DB", 0); + shared_ptr m_config_db = make_shared("CONFIG_DB", 0); + shared_ptr m_state_db = make_shared("STATE_DB", 0); + + BufferMgrDynamic *m_dynamicBuffer; + SelectableTimer m_selectableTable(timespec({ .tv_sec = BUFFERMGR_TIMER_PERIOD, .tv_nsec = 0 }), 0); + Table portTable(m_config_db.get(), CFG_PORT_TABLE_NAME); + Table cableLengthTable(m_config_db.get(), CFG_PORT_CABLE_LEN_TABLE_NAME); + Table bufferPoolTable(m_config_db.get(), CFG_BUFFER_POOL_TABLE_NAME); + Table bufferProfileTable(m_config_db.get(), CFG_BUFFER_PROFILE_TABLE_NAME); + Table bufferPgTable(m_config_db.get(), CFG_BUFFER_PG_TABLE_NAME); + Table bufferQueueTable(m_config_db.get(), CFG_BUFFER_QUEUE_TABLE_NAME); + Table bufferIngProfileListTable(m_config_db.get(), CFG_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME); + Table bufferEgrProfileListTable(m_config_db.get(), CFG_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME); + Table defaultLosslessParameterTable(m_config_db.get(), CFG_DEFAULT_LOSSLESS_BUFFER_PARAMETER); + Table appPortTable(m_app_db.get(), APP_PORT_TABLE_NAME); + Table appBufferPoolTable(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME); + Table appBufferProfileTable(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); + Table appBufferPgTable(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); + Table appBufferQueueTable(m_app_db.get(), APP_BUFFER_QUEUE_TABLE_NAME); + Table appBufferIngProfileListTable(m_app_db.get(), APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME); + Table appBufferEgrProfileListTable(m_app_db.get(), APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME); + Table bufferMaxParamTable(m_state_db.get(), STATE_BUFFER_MAXIMUM_VALUE_TABLE); + Table statePortTable(m_state_db.get(), STATE_PORT_TABLE_NAME); + Table stateBufferTable(m_state_db.get(), STATE_BUFFER_MAXIMUM_VALUE_TABLE); + + map> zeroProfileMap; + vector zeroProfile; + + struct BufferMgrDynTest : public ::testing::Test + { + map> testBufferProfile; + map> testBufferPool; + + void SetUpReclaimingBuffer() + { + zeroProfileMap["ingress_zero_pool"] = { + {"mode", "static"}, + {"type", "ingress"}, + {"size", "0"} + }; + zeroProfileMap["ingress_lossy_pg_zero_profile"] = { + {"pool", "ingress_zero_pool"}, + {"size", "0"}, + {"static_th", "0"} + }; + zeroProfileMap["ingress_lossless_zero_profile"] = { + {"pool", "ingress_lossless_pool"}, + {"size", "0"}, + {"dynamic_th", "-8"} + }; + zeroProfileMap["egress_lossy_zero_profile"] = { + {"pool", "egress_lossy_pool"}, + {"size", "0"}, + {"dynamic_th", "-8"} + }; + zeroProfileMap["egress_lossless_zero_profile"] = { + {"pool", "egress_lossless_pool"}, + {"size", "0"}, + {"dynamic_th", "-8"} + }; + + zeroProfile = { + { + "BUFFER_POOL_TABLE:ingress_zero_pool", + "SET", + zeroProfileMap["ingress_zero_pool"] + }, + { + "BUFFER_PROFILE_TABLE:ingress_lossy_pg_zero_profile", + "SET", + zeroProfileMap["ingress_lossy_pg_zero_profile"] + }, + { + "BUFFER_PROFILE_TABLE:ingress_lossless_zero_profile", + "SET", + zeroProfileMap["ingress_lossless_zero_profile"] + }, + { + "BUFFER_PROFILE_TABLE:egress_lossy_zero_profile", + "SET", + zeroProfileMap["egress_lossy_zero_profile"] + }, + { + "BUFFER_PROFILE_TABLE:egress_lossless_zero_profile", + "SET", + zeroProfileMap["egress_lossless_zero_profile"] + }, + { + "control_fields", + "SET", + { + {"pgs_to_apply_zero_profile", "0"}, + {"ingress_zero_profile", "ingress_lossy_pg_zero_profile"} + } + } + }; + } + + BufferMgrDynTest() + { + testBufferPool["ingress_lossless_pool"] = { + {"mode", "dynamic"}, + {"type", "ingress"}, + {"size", "1024000"} + }; + testBufferPool["egress_lossless_pool"] = { + {"mode", "dynamic"}, + {"type", "egress"}, + {"size", "1024000"} + }; + testBufferPool["egress_lossy_pool"] = { + {"mode", "dynamic"}, + {"type", "egress"}, + {"size", "1024000"} + }; + + testBufferProfile["ingress_lossless_profile"] = { + {"dynamic_th", "7"}, + {"pool", "ingress_lossless_pool"}, + {"size", "0"} + }; + testBufferProfile["egress_lossless_profile"] = { + {"dynamic_th", "7"}, + {"pool", "egress_lossless_pool"}, + {"size", "0"} + }; + testBufferProfile["egress_lossy_profile"] = { + {"dynamic_th", "3"}, + {"pool", "egress_lossy_pool"}, + {"size", "0"} + }; + } + + void SetUp() override + { + setenv("ASIC_VENDOR", "mock_test", 1); + + testing_db::reset(); + + WarmStart::initialize("buffermgrd", "swss"); + WarmStart::checkWarmStart("buffermgrd", "swss"); + } + + void StartBufferManager(shared_ptr> zero_profile=nullptr) + { + // Init switch and create dependencies + vector buffer_table_connectors = { + TableConnector(m_config_db.get(), CFG_PORT_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_PORT_CABLE_LEN_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_BUFFER_POOL_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_BUFFER_PROFILE_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_BUFFER_PG_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_BUFFER_QUEUE_TABLE_NAME), + TableConnector(m_config_db.get(), CFG_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME), + TableConnector(m_config_db.get(), CFG_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME), + TableConnector(m_config_db.get(), CFG_DEFAULT_LOSSLESS_BUFFER_PARAMETER), + TableConnector(m_state_db.get(), STATE_BUFFER_MAXIMUM_VALUE_TABLE), + TableConnector(m_state_db.get(), STATE_PORT_TABLE_NAME) + }; + + m_dynamicBuffer = new BufferMgrDynamic(m_config_db.get(), m_state_db.get(), m_app_db.get(), buffer_table_connectors, nullptr, zero_profile); + } + + void InitPort(const string &port="Ethernet0", const string &admin_status="up") + { + portTable.set(port, + { + {"speed", "100000"}, + {"mtu", "9100"}, + {"admin_status", admin_status} + }); + m_dynamicBuffer->addExistingData(&portTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void SetPortInitDone() + { + appPortTable.set("PortInitDone", + { + {"lanes", "0"} + }); + m_dynamicBuffer->addExistingData(&appPortTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void InitMmuSize() + { + bufferMaxParamTable.set("global", + { + {"mmu_size", "1024000"} + }); + if (m_dynamicBuffer) + m_dynamicBuffer->addExistingData(&bufferMaxParamTable); + } + + void InitDefaultLosslessParameter(const string &over_subscribe_ratio="") + { + if (over_subscribe_ratio.empty()) + { + defaultLosslessParameterTable.set("AZURE", + { + {"default_dynamic_th", "0"} + }); + } + else + { + defaultLosslessParameterTable.set("AZURE", + { + {"default_dynamic_th", "0"}, + {"over_subscribe_ratio", over_subscribe_ratio} + }); + } + if (m_dynamicBuffer) + { + m_dynamicBuffer->addExistingData(&defaultLosslessParameterTable); + static_cast(m_dynamicBuffer)->doTask(); + } + } + + void InitBufferPool() + { + for(auto &i: testBufferPool) + { + bufferPoolTable.set(i.first, i.second); + } + + m_dynamicBuffer->addExistingData(&bufferPoolTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void ClearBufferPool(const string &skippedPool="", const string &clearPool="") + { + std::deque entries; + for (auto &i: testBufferPool) + { + if (skippedPool == i.first) + continue; + if (!clearPool.empty() && clearPool != i.first) + continue; + entries.push_back({i.first, "DEL", {}}); + } + + auto consumer = dynamic_cast(m_dynamicBuffer->getExecutor(CFG_BUFFER_POOL_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(m_dynamicBuffer)->doTask(); + } + + void InitDefaultBufferProfile() + { + for (auto &i: testBufferProfile) + { + bufferProfileTable.set(i.first, i.second); + } + + m_dynamicBuffer->addExistingData(&bufferProfileTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void ClearBufferProfile() + { + std::deque entries; + for (auto &i: testBufferProfile) + entries.push_back({i.first, "DEL", {}}); + + auto consumer = dynamic_cast(m_dynamicBuffer->getExecutor(CFG_BUFFER_PROFILE_TABLE_NAME)); + consumer->addToSync(entries); + static_cast(m_dynamicBuffer)->doTask(); + } + + void InitBufferPg(const string &key, const string &profile="NULL") + { + bufferPgTable.set(key, + { + {"profile", profile} + }); + m_dynamicBuffer->addExistingData(&bufferPgTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void ClearBufferObject(const string &key, const string &tableName) + { + std::deque entries; + entries.push_back({key, "DEL", {}}); + + auto consumer = dynamic_cast(m_dynamicBuffer->getExecutor(tableName)); + consumer->addToSync(entries); + static_cast(m_dynamicBuffer)->doTask(); + + Table tableObject(m_config_db.get(), tableName); + tableObject.del(key); + } + + void InitBufferQueue(const string &key, const string &profile) + { + bufferQueueTable.set(key, + { + {"profile", profile} + }); + m_dynamicBuffer->addExistingData(&bufferQueueTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void InitBufferProfileList(const string &ports, const string &profileList, Table &appDb) + { + appDb.set(ports, + { + {"profile_list", profileList} + }); + m_dynamicBuffer->addExistingData(&appDb); + static_cast(m_dynamicBuffer)->doTask(); + } + + void InitCableLength(const string &port, const string &length) + { + cableLengthTable.set("AZURE", + { + {port, length} + }); + m_dynamicBuffer->addExistingData(&cableLengthTable); + static_cast(m_dynamicBuffer)->doTask(); + } + + void HandleTable(Table &table) + { + m_dynamicBuffer->addExistingData(&table); + static_cast(m_dynamicBuffer)->doTask(); + } + + void CheckPool(buffer_pool_t &pool, const vector &tuples) + { + for (auto i : tuples) + { + if (fvField(i) == buffer_pool_type_field_name) + { + if (fvValue(i) == buffer_value_ingress) + ASSERT_EQ(pool.direction, BUFFER_INGRESS); + else + ASSERT_EQ(pool.direction, BUFFER_EGRESS); + } + else if (fvField(i) == buffer_pool_mode_field_name) + { + ASSERT_EQ(pool.mode, fvValue(i)); + } + else if (fvField(i) == buffer_size_field_name) + { + ASSERT_TRUE(!pool.dynamic_size); + ASSERT_EQ("1024000", fvValue(i)); + } + } + } + + void CheckProfile(buffer_profile_t &profile, const vector &tuples) + { + for (auto i : tuples) + { + if (fvField(i) == buffer_pool_field_name) + { + ASSERT_EQ(profile.pool_name, fvValue(i)); + if (strstr(profile.pool_name.c_str(), "ingress") != nullptr) + ASSERT_EQ(profile.direction, BUFFER_INGRESS); + else + ASSERT_EQ(profile.direction, BUFFER_EGRESS); + } + else if (fvField(i) == buffer_dynamic_th_field_name) + { + ASSERT_EQ(profile.threshold_mode, buffer_dynamic_th_field_name); + ASSERT_EQ(profile.threshold, fvValue(i)); + } + else if (fvField(i) == buffer_size_field_name) + { + ASSERT_EQ(profile.size, fvValue(i)); + } + } + } + + void CheckPg(const string &port, const string &key, const string &expectedProfile="") + { + vector fieldValues; + + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup[port][key].dynamic_calculated); + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup[port][key].lossless); + + auto existInDb = (!expectedProfile.empty()); + ASSERT_EQ(appBufferPgTable.get(key, fieldValues), existInDb); + if (existInDb) + { + ASSERT_EQ(m_dynamicBuffer->m_portPgLookup[port][key].running_profile_name, expectedProfile); + ASSERT_EQ(fvField(fieldValues[0]), "profile"); + ASSERT_EQ(fvValue(fieldValues[0]), expectedProfile); + } + } + + void CheckQueue(const string &port, const string &key, const string &expectedProfile, bool existInDb) + { + vector fieldValues; + + ASSERT_EQ(m_dynamicBuffer->m_portQueueLookup[port][key].running_profile_name, expectedProfile); + ASSERT_EQ(appBufferQueueTable.get(key, fieldValues), existInDb); + if (existInDb) + { + ASSERT_EQ(fvField(fieldValues[0]), "profile"); + ASSERT_EQ(fvValue(fieldValues[0]), expectedProfile); + } + } + + void CheckProfileList(const string &port, bool ingress, const string &profileList, bool existInDb=true) + { + vector fieldValues; + + auto direction = ingress ? BUFFER_INGRESS : BUFFER_EGRESS; + ASSERT_EQ(m_dynamicBuffer->m_portProfileListLookups[direction][port], profileList); + + auto &appDb = ingress ? appBufferIngProfileListTable : appBufferEgrProfileListTable; + + ASSERT_EQ(appDb.get(port, fieldValues), existInDb); + if (existInDb) + { + ASSERT_EQ(fieldValues.size(), 1); + ASSERT_EQ(fvField(fieldValues[0]), "profile_list"); + ASSERT_EQ(fvValue(fieldValues[0]), profileList); + } + } + + void CheckIfVectorsMatch(const vector &vec1, const vector &vec2) + { + ASSERT_EQ(vec1.size(), vec2.size()); + for (auto &i : vec1) + { + bool found = false; + for (auto &j : vec2) + { + if (i == j) + { + found = true; + break; + } + } + ASSERT_TRUE(found); + } + } + + void TearDown() override + { + delete m_dynamicBuffer; + m_dynamicBuffer = nullptr; + + unsetenv("ASIC_VENDOR"); + } + }; + + /* + * Dependencies + * 1. Buffer manager reads default lossless parameter and maximum mmu size at the beginning + * 2. Maximum mmu size will be pushed ahead of PortInitDone + * 3. Buffer pools can be ready at any time after PortInitDone + * 4. Buffer tables can be applied in any order + * 5. Port and buffer PG can be applied in any order + * 6. Sequence after config qos clear + */ + + /* + * Normal starting flow + * 1. Start buffer manager with default lossless parameter and maximum mmu size + * 2. PortInitDone + * 3. Cable length and port configuration + * 4. Buffer tables: BUFFER_POOL/BUFFER_PROFILE/BUFFER_PG + * 5. Queue and buffer profile lists with/without port created + */ + TEST_F(BufferMgrDynTest, BufferMgrTestNormalFlows) + { + vector fieldValues; + vector keys; + + // Prepare information that will be read at the beginning + InitDefaultLosslessParameter(); + InitMmuSize(); + + StartBufferManager(); + + InitPort(); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_INITIALIZING); + + SetPortInitDone(); + // Timer will be called + m_dynamicBuffer->doTask(m_selectableTable); + + ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 0); + InitBufferPool(); + ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 3); + appBufferPoolTable.getKeys(keys); + ASSERT_EQ(keys.size(), 3); + for (auto i : testBufferPool) + { + CheckPool(m_dynamicBuffer->m_bufferPoolLookup[i.first], testBufferPool[i.first]); + fieldValues.clear(); + appBufferPoolTable.get(i.first, fieldValues); + CheckPool(m_dynamicBuffer->m_bufferPoolLookup[i.first], fieldValues); + } + + InitDefaultBufferProfile(); + appBufferProfileTable.getKeys(keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 3); + for (auto i : testBufferProfile) + { + CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], testBufferProfile[i.first]); + fieldValues.clear(); + appBufferProfileTable.get(i.first, fieldValues); + CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], fieldValues); + } + + InitCableLength("Ethernet0", "5m"); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_READY); + + InitBufferPg("Ethernet0|3-4"); + + auto expectedProfile = "pg_lossless_100000_5m_profile"; + CheckPg("Ethernet0", "Ethernet0:3-4", expectedProfile); + auto &portPgMap = m_dynamicBuffer->m_bufferProfileLookup[expectedProfile].port_pgs; + ASSERT_EQ(portPgMap.size(), 1); + ASSERT_TRUE(portPgMap.find("Ethernet0:3-4") != portPgMap.end()); + + // Multiple port key + InitBufferPg("Ethernet2,Ethernet4|3-4"); + + CheckPg("Ethernet2", "Ethernet2:3-4"); + CheckPg("Ethernet4", "Ethernet4:3-4"); + + // Buffer queue, ingress and egress profile list table + InitPort("Ethernet2"); + InitPort("Ethernet4"); + + InitBufferQueue("Ethernet2,Ethernet4,Ethernet6|3-4", "egress_lossless_profile"); + CheckQueue("Ethernet2", "Ethernet2:3-4", "egress_lossless_profile", true); + CheckQueue("Ethernet4", "Ethernet4:3-4", "egress_lossless_profile", true); + + InitBufferProfileList("Ethernet2,Ethernet4,Ethernet6", "ingress_lossless_profile", bufferIngProfileListTable); + CheckProfileList("Ethernet2", true, "ingress_lossless_profile"); + CheckProfileList("Ethernet4", true, "ingress_lossless_profile"); + + InitBufferProfileList("Ethernet2,Ethernet4,Ethernet6", "egress_lossless_profile,egress_lossy_profile", bufferEgrProfileListTable); + CheckProfileList("Ethernet2", false, "egress_lossless_profile,egress_lossy_profile"); + CheckProfileList("Ethernet4", false, "egress_lossless_profile,egress_lossy_profile"); + + // Check whether queue, profile lists have been applied after port created + InitPort("Ethernet6"); + CheckQueue("Ethernet6", "Ethernet6:3-4", "egress_lossless_profile", true); + CheckProfileList("Ethernet6", true, "ingress_lossless_profile"); + CheckProfileList("Ethernet6", false, "egress_lossless_profile,egress_lossy_profile"); + } + + /* + * Verify a buffer pool will not be created without corresponding item in BUFFER_POOL + * otherwise it interferes starting flow + * 1. Configure oversubscribe ratio + * 2. Check whether ingress_lossless_pool is created + */ + TEST_F(BufferMgrDynTest, BufferMgrTestNoPoolCreatedWithoutDb) + { + StartBufferManager(); + + InitMmuSize(); + InitDefaultLosslessParameter("0"); + InitPort("Ethernet0"); + + static_cast(m_dynamicBuffer)->doTask(); + m_dynamicBuffer->doTask(m_selectableTable); + + ASSERT_TRUE(m_dynamicBuffer->m_bufferPoolLookup.empty()); + + InitBufferPool(); + static_cast(m_dynamicBuffer)->doTask(); + + ASSERT_FALSE(m_dynamicBuffer->m_bufferPoolLookup.empty()); + } + + /* + * Sad flows test. Order is reversed in the following cases: + * - The buffer table creating. The tables referencing other tables are created first + * - Buffer manager starts with neither default lossless parameter nor maximum mmu size available + * + * 1. Start buffer manager without default lossless parameter and maximum mmu size + * 2. Buffer tables are applied in order: + * - Port configuration + * - BUFFER_QUEUE/buffer profile list + * - BUFFER_PG/BUFFER_PROFILE/BUFFER_POOL + * - PortInitDone + * 3. Cable length + * 4. Create a buffer profile with wrong threshold mode or direction + * and verify it will not be propagated to SAI + */ + TEST_F(BufferMgrDynTest, BufferMgrTestSadFlows) + { + vector ts; + vector fieldValues; + vector keys; + + StartBufferManager(); + + static_cast(m_dynamicBuffer)->doTask(); + + InitPort(); + + InitBufferPg("Ethernet0|3-4"); + // No item generated in BUFFER_PG_TABLE + CheckPg("Ethernet0", "Ethernet0:3-4"); + + InitBufferQueue("Ethernet0|3-4", "egress_lossless_profile"); + ASSERT_TRUE(m_dynamicBuffer->m_portQueueLookup["Ethernet0"]["Ethernet0:3-4"].running_profile_name.empty()); + + InitBufferProfileList("Ethernet0", "ingress_lossless_profile", bufferIngProfileListTable); + ASSERT_TRUE(m_dynamicBuffer->m_portProfileListLookups[BUFFER_INGRESS]["Ethernet0"].empty()); + + InitBufferProfileList("Ethernet0", "egress_lossless_profile,egress_lossy_profile", bufferEgrProfileListTable); + ASSERT_TRUE(m_dynamicBuffer->m_portProfileListLookups[BUFFER_EGRESS]["Ethernet0"].empty()); + + InitDefaultBufferProfile(); + appBufferProfileTable.getKeys(keys); + ASSERT_EQ(keys.size(), 0); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 0); + + ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 0); + InitBufferPool(); + appBufferPoolTable.getKeys(keys); + ASSERT_EQ(keys.size(), 3); + ASSERT_EQ(m_dynamicBuffer->m_bufferPoolLookup.size(), 3); + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.size(), 3); + for (auto i : testBufferProfile) + { + CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], testBufferProfile[i.first]); + fieldValues.clear(); + appBufferProfileTable.get(i.first, fieldValues); + CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[i.first], fieldValues); + } + for (auto i : testBufferPool) + { + CheckPool(m_dynamicBuffer->m_bufferPoolLookup[i.first], testBufferPool[i.first]); + fieldValues.clear(); + appBufferPoolTable.get(i.first, fieldValues); + CheckPool(m_dynamicBuffer->m_bufferPoolLookup[i.first], fieldValues); + } + + ASSERT_EQ(m_dynamicBuffer->m_portPgLookup.size(), 1); + static_cast(m_dynamicBuffer)->doTask(); + CheckProfileList("Ethernet0", true, "ingress_lossless_profile", false); + CheckProfileList("Ethernet0", false, "egress_lossless_profile,egress_lossy_profile", false); + + // All default buffer profiles should be generated and pushed into BUFFER_PROFILE_TABLE + static_cast(m_dynamicBuffer)->doTask(); + + InitMmuSize(); + SetPortInitDone(); + m_dynamicBuffer->doTask(m_selectableTable); + + InitDefaultLosslessParameter(); + m_dynamicBuffer->doTask(m_selectableTable); + + CheckPg("Ethernet0", "Ethernet0:3-4"); + InitCableLength("Ethernet0", "5m"); + auto expectedProfile = "pg_lossless_100000_5m_profile"; + CheckPg("Ethernet0", "Ethernet0:3-4", expectedProfile); + CheckQueue("Ethernet0", "Ethernet0:3-4", "egress_lossless_profile", true); + + CheckProfileList("Ethernet0", true, "ingress_lossless_profile", true); + CheckProfileList("Ethernet0", false, "egress_lossless_profile,egress_lossy_profile", true); + + InitPort("Ethernet4"); + InitPort("Ethernet6"); + InitBufferQueue("Ethernet6|0-2", "egress_lossy_profile"); + InitBufferProfileList("Ethernet6", "ingress_lossless_profile", bufferIngProfileListTable); + + // Buffer queue/PG/profile lists with wrong direction should not overwrite the existing ones + vector ingressProfiles = {"egress_lossy_profile", "ingress_profile", ""}; + vector portsToTest = {"Ethernet0", "Ethernet4"}; + for (auto port : portsToTest) + { + for (auto ingressProfile : ingressProfiles) + { + InitBufferPg(port + "|3-4", ingressProfile); + if (port == "Ethernet0") + { + ASSERT_EQ(m_dynamicBuffer->m_portPgLookup["Ethernet0"]["Ethernet0:3-4"].running_profile_name, expectedProfile); + ASSERT_TRUE(appBufferPgTable.get("Ethernet0:3-4", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", expectedProfile}}); + } + else + { + ASSERT_TRUE(m_dynamicBuffer->m_portPgLookup[port].find(port + ":3-4") == m_dynamicBuffer->m_portPgLookup[port].end()); + ASSERT_FALSE(appBufferPgTable.get(port + ":3-4", fieldValues)); + } + } + } + + InitBufferQueue("Ethernet4|0-2", "ingress_lossless_profile"); + ASSERT_TRUE(m_dynamicBuffer->m_portQueueLookup["Ethernet4"]["Ethernet0:0-2"].running_profile_name.empty()); + ASSERT_FALSE(appBufferQueueTable.get("Ethernet4:0-2", fieldValues)); + // No pending notifications + ts.clear(); + m_dynamicBuffer->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 0); + + InitBufferQueue("Ethernet6|0-2", "ingress_lossless_profile"); + ASSERT_EQ(m_dynamicBuffer->m_portQueueLookup["Ethernet6"]["Ethernet6:0-2"].running_profile_name, "egress_lossy_profile"); + ASSERT_TRUE(appBufferQueueTable.get("Ethernet6:0-2", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile", "egress_lossy_profile"}}); + // No pending notifications + m_dynamicBuffer->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 0); + + // Wrong direction + InitBufferProfileList("Ethernet4", "egress_lossless_profile", bufferIngProfileListTable); + ASSERT_TRUE(m_dynamicBuffer->m_portProfileListLookups[BUFFER_INGRESS]["Ethernet4"].empty()); + ASSERT_FALSE(appBufferIngProfileListTable.get("Ethernet4", fieldValues)); + // No pending notifications + m_dynamicBuffer->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 0); + + InitBufferProfileList("Ethernet6", "egress_lossless_profile", bufferIngProfileListTable); + ASSERT_EQ(m_dynamicBuffer->m_portProfileListLookups[BUFFER_INGRESS]["Ethernet6"], "ingress_lossless_profile"); + ASSERT_TRUE(appBufferIngProfileListTable.get("Ethernet6", fieldValues)); + CheckIfVectorsMatch(fieldValues, {{"profile_list", "ingress_lossless_profile"}}); + // No pending notifications + m_dynamicBuffer->dumpPendingTasks(ts); + ASSERT_EQ(ts.size(), 0); + + // Profile with wrong mode should not override the existing entries + vector wrong_profile_names = {"ingress_lossless_profile", "wrong_param_profile"}; + vector> wrong_profile_patterns = { + // wrong threshold mode + { + {"pool", "ingress_lossless_pool"}, + {"static_th", "100"}, + {"size", "0"} + }, + // unconfigured pool + { + {"pool", "ingress_pool"}, + {"dynamic_th", "0"}, + {"size", "0"} + } + }; + auto expected_pending_tasks = 0; + for (auto wrong_profile_name : wrong_profile_names) + { + bool exist = (testBufferProfile.find(wrong_profile_name) != testBufferProfile.end()); + for (auto wrong_profile_pattern : wrong_profile_patterns) + { + bufferProfileTable.set(wrong_profile_name, wrong_profile_pattern); + m_dynamicBuffer->addExistingData(&bufferProfileTable); + static_cast(m_dynamicBuffer)->doTask(); + if (exist) + CheckProfile(m_dynamicBuffer->m_bufferProfileLookup[wrong_profile_name], testBufferProfile[wrong_profile_name]); + else + ASSERT_EQ(m_dynamicBuffer->m_bufferProfileLookup.find(wrong_profile_name), m_dynamicBuffer->m_bufferProfileLookup.end()); + ASSERT_EQ(appBufferProfileTable.get(wrong_profile_name, fieldValues), exist); + // No pending notifications + ts.clear(); + m_dynamicBuffer->dumpPendingTasks(ts); + if (get<1>(wrong_profile_pattern[0]) == "ingress_pool") + expected_pending_tasks++; + ASSERT_EQ(ts.size(), expected_pending_tasks); + } + } + } + + /* + * Port configuration flow + * Port table items are received in different order + */ + TEST_F(BufferMgrDynTest, BufferMgrTestPortConfigFlow) + { + // Prepare information that will be read at the beginning + StartBufferManager(); + + /* + * Speed, admin up, cable length + */ + portTable.set("Ethernet0", + { + {"speed", "100000"} + }); + HandleTable(portTable); + ASSERT_TRUE(m_dynamicBuffer->m_portInfoLookup.find("Ethernet0") != m_dynamicBuffer->m_portInfoLookup.end()); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_ADMIN_DOWN); + + portTable.set("Ethernet0", + { + {"speed", "100000"}, + {"admin_status", "up"} + }); + HandleTable(portTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_INITIALIZING); + + cableLengthTable.set("AZURE", + { + {"Ethernet0", "5m"} + }); + HandleTable(cableLengthTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet0"].state, PORT_READY); + + /* + * Speed, admin down, cable length, admin up + */ + portTable.set("Ethernet4", + { + {"speed", "100000"}, + {"admin_status", "down"} + }); + HandleTable(portTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet4"].state, PORT_ADMIN_DOWN); + cableLengthTable.set("AZURE", + { + {"Ethernet4", "5m"} + }); + HandleTable(cableLengthTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet4"].state, PORT_ADMIN_DOWN); + portTable.set("Ethernet4", + { + {"speed", "100000"}, + {"admin_status", "up"} + }); + HandleTable(portTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet4"].state, PORT_READY); + + /* + * Auto-negotiation: supported speeds received after port table + */ + portTable.set("Ethernet8", + { + {"speed", "100000"}, + {"admin_status", "up"}, + {"autoneg", "on"} + }); + HandleTable(portTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet8"].state, PORT_INITIALIZING); + ASSERT_TRUE(m_dynamicBuffer->m_portInfoLookup["Ethernet8"].effective_speed.empty()); + + cableLengthTable.set("AZURE", + { + {"Ethernet8", "5m"} + }); + HandleTable(cableLengthTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet8"].state, PORT_INITIALIZING); + + statePortTable.set("Ethernet8", + { + {"supported_speeds", "100000,50000,40000,25000,10000,1000"} + }); + HandleTable(statePortTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet8"].effective_speed, "100000"); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet8"].state, PORT_READY); + + /* + * Auto-negotiation: supported speeds received before port table + */ + statePortTable.set("Ethernet12", + { + {"supported_speeds", "100000,50000,40000,25000,10000,1000"} + }); + HandleTable(statePortTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet12"].supported_speeds, "100000,50000,40000,25000,10000,1000"); + + portTable.set("Ethernet12", + { + {"speed", "100000"}, + {"admin_status", "up"}, + {"autoneg", "on"} + }); + HandleTable(portTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet12"].state, PORT_INITIALIZING); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet12"].effective_speed, "100000"); + + cableLengthTable.set("AZURE", + { + {"Ethernet12", "5m"} + }); + HandleTable(cableLengthTable); + ASSERT_EQ(m_dynamicBuffer->m_portInfoLookup["Ethernet12"].state, PORT_READY); + } +} diff --git a/tests/mock_tests/database_config.json b/tests/mock_tests/database_config.json index 8301848683..68f850481d 100644 --- a/tests/mock_tests/database_config.json +++ b/tests/mock_tests/database_config.json @@ -57,6 +57,21 @@ "separator": "|", "instance" : "redis" }, + "GB_ASIC_DB" : { + "id" : 9, + "separator": ":", + "instance" : "redis" + }, + "GB_COUNTERS_DB" : { + "id" : 10, + "separator": ":", + "instance" : "redis" + }, + "GB_FLEX_COUNTER_DB" : { + "id" : 11, + "separator": ":", + "instance" : "redis" + }, "CHASSIS_APP_DB" : { "id" : 12, "separator": "|", diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index 28df6610fd..7d867396d2 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -163,13 +163,14 @@ namespace portsorch_test ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + vector flex_counter_tables = { CFG_FLEX_COUNTER_TABLE_NAME }; auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); gDirectory.set(flexCounterOrch); - gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); vector buffer_tables = { APP_BUFFER_POOL_TABLE_NAME, APP_BUFFER_PROFILE_TABLE_NAME, APP_BUFFER_QUEUE_TABLE_NAME, @@ -862,7 +863,7 @@ namespace portsorch_test * updated to DB. */ TEST_F(PortsOrchTest, PortOperStatusIsUpAndOperSpeedIsZero) - { + { Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); // Get SAI default ports to populate DB @@ -887,7 +888,7 @@ namespace portsorch_test Port port; gPortsOrch->getPort("Ethernet0", port); ASSERT_TRUE(port.m_oper_status != SAI_PORT_OPER_STATUS_UP); - + // save original api since we will spy auto orig_port_api = sai_port_api; sai_port_api = new sai_port_api_t(); @@ -905,14 +906,14 @@ namespace portsorch_test // Return 0 for port operational speed attrs[0].value.u32 = 0; } - + return (sai_status_t)SAI_STATUS_SUCCESS; } ); auto exec = static_cast(gPortsOrch->getExecutor("PORT_STATUS_NOTIFICATIONS")); auto consumer = exec->getNotificationConsumer(); - + // mock a redis reply for notification, it notifies that Ehernet0 is going to up mockReply = (redisReply *)calloc(sizeof(redisReply), 1); mockReply->type = REDIS_REPLY_ARRAY; @@ -934,7 +935,7 @@ namespace portsorch_test // trigger the notification consumer->readData(); gPortsOrch->doTask(*consumer); - mockReply = nullptr; + mockReply = nullptr; gPortsOrch->getPort("Ethernet0", port); ASSERT_TRUE(port.m_oper_status == SAI_PORT_OPER_STATUS_UP); diff --git a/tests/mock_tests/routeorch_ut.cpp b/tests/mock_tests/routeorch_ut.cpp index 66df4bfbcc..2c1c4b8535 100644 --- a/tests/mock_tests/routeorch_ut.cpp +++ b/tests/mock_tests/routeorch_ut.cpp @@ -176,15 +176,15 @@ namespace routeorch_test { APP_LAG_MEMBER_TABLE_NAME, portsorch_base_pri } }; + ASSERT_EQ(gPortsOrch, nullptr); + gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); + vector flex_counter_tables = { CFG_FLEX_COUNTER_TABLE_NAME }; auto* flexCounterOrch = new FlexCounterOrch(m_config_db.get(), flex_counter_tables); gDirectory.set(flexCounterOrch); - ASSERT_EQ(gPortsOrch, nullptr); - gPortsOrch = new PortsOrch(m_app_db.get(), m_state_db.get(), ports_tables, m_chassis_app_db.get()); - static const vector route_pattern_tables = { CFG_FLOW_COUNTER_ROUTE_PATTERN_TABLE_NAME, }; diff --git a/tests/test_buffer_dynamic.py b/tests/test_buffer_dynamic.py index 69c577bd26..f0a57899e0 100644 --- a/tests/test_buffer_dynamic.py +++ b/tests/test_buffer_dynamic.py @@ -11,7 +11,6 @@ def dynamic_buffer(dvs): yield buffer_model.disable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) - @pytest.mark.usefixtures("dynamic_buffer") class TestBufferMgrDyn(object): DEFAULT_POLLING_CONFIG = PollingConfig(polling_interval=0.01, timeout=60, strict=True) @@ -129,16 +128,18 @@ def check_new_profile_in_asic_db(self, dvs, profile): if fvs.get('dynamic_th'): sai_threshold_value = fvs['dynamic_th'] sai_threshold_mode = 'SAI_BUFFER_PROFILE_THRESHOLD_MODE_DYNAMIC' + sai_threshold_name = 'SAI_BUFFER_PROFILE_ATTR_SHARED_DYNAMIC_TH' else: sai_threshold_value = fvs['static_th'] sai_threshold_mode = 'SAI_BUFFER_PROFILE_THRESHOLD_MODE_STATIC' + sai_threshold_name = 'SAI_BUFFER_PROFILE_ATTR_SHARED_STATIC_TH' self.asic_db.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_BUFFER_PROFILE", self.newProfileInAsicDb, {'SAI_BUFFER_PROFILE_ATTR_XON_TH': fvs['xon'], 'SAI_BUFFER_PROFILE_ATTR_XOFF_TH': fvs['xoff'], 'SAI_BUFFER_PROFILE_ATTR_RESERVED_BUFFER_SIZE': fvs['size'], 'SAI_BUFFER_PROFILE_ATTR_POOL_ID': self.ingress_lossless_pool_oid, 'SAI_BUFFER_PROFILE_ATTR_THRESHOLD_MODE': sai_threshold_mode, - 'SAI_BUFFER_PROFILE_ATTR_SHARED_DYNAMIC_TH': sai_threshold_value}, + sai_threshold_name: sai_threshold_value}, self.DEFAULT_POLLING_CONFIG) def make_lossless_profile_name(self, speed, cable_length, mtu = None, dynamic_th = None): diff --git a/tests/test_buffer_traditional.py b/tests/test_buffer_traditional.py index 3d2285fd7b..071217b4e3 100644 --- a/tests/test_buffer_traditional.py +++ b/tests/test_buffer_traditional.py @@ -77,16 +77,15 @@ def get_pg_name_map(self): @pytest.fixture def setup_teardown_test(self, dvs): - try: - self.setup_db(dvs) - self.set_port_qos_table(self.INTF, '2,3,4,6') - pg_name_map = self.get_pg_name_map() - yield pg_name_map - finally: - self.teardown() + self.setup_db(dvs) + self.set_port_qos_table(self.INTF, '2,3,4,6') + time.sleep(2) + + yield + + self.teardown() def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): - self.pg_name_map = setup_teardown_test orig_cable_len = None orig_speed = None try: @@ -112,6 +111,7 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): # Make sure the buffer PG has been created orig_lossless_profile = "pg_lossless_{}_{}_profile".format(orig_speed, cable_len_before_test) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", orig_lossless_profile) + self.pg_name_map = self.get_pg_name_map() self.orig_profiles = self.get_asic_buf_profile() # check if the lossless profile for the test speed is already present @@ -174,7 +174,6 @@ def test_zero_cable_len_profile_update(self, dvs, setup_teardown_test): # To verify the BUFFER_PG is not hardcoded to 3,4 # buffermgrd will read 'pfc_enable' entry and apply lossless profile to that queue def test_buffer_pg_update(self, dvs, setup_teardown_test): - self.pg_name_map = setup_teardown_test orig_cable_len = None orig_speed = None test_speed = None @@ -203,6 +202,7 @@ def test_buffer_pg_update(self, dvs, setup_teardown_test): # Make sure the buffer PG has been created orig_lossless_profile = "pg_lossless_{}_{}_profile".format(orig_speed, cable_len_for_test) self.app_db.wait_for_entry("BUFFER_PROFILE_TABLE", orig_lossless_profile) + self.pg_name_map = self.get_pg_name_map() self.orig_profiles = self.get_asic_buf_profile() # get the orig buf profiles attached to the pgs diff --git a/tests/test_flex_counters.py b/tests/test_flex_counters.py index 76a1a535f9..f5a0b146b2 100644 --- a/tests/test_flex_counters.py +++ b/tests/test_flex_counters.py @@ -7,8 +7,6 @@ ROUTE_TO_PATTERN_MAP = "COUNTERS_ROUTE_TO_PATTERN_MAP" NUMBER_OF_RETRIES = 10 CPU_PORT_OID = "0x0" -PORT = "Ethernet0" -PORT_MAP = "COUNTERS_PORT_NAME_MAP" counter_group_meta = { 'port_counter': { @@ -73,7 +71,6 @@ } } -@pytest.mark.usefixtures('dvs_port_manager') class TestFlexCounters(object): def setup_dbs(self, dvs): @@ -133,6 +130,18 @@ def wait_for_interval_set(self, group, interval): assert False, "Polling interval is not applied to FLEX_COUNTER_GROUP_TABLE for group {}, expect={}, actual={}".format(group, interval, interval_value) + def wait_for_buffer_pg_queue_counter(self, map, port, index, isSet): + for retry in range(NUMBER_OF_RETRIES): + counter_oid = self.counters_db.db_connection.hget(map, port + ':' + index) + if (isSet and counter_oid): + return counter_oid + elif (not isSet and not counter_oid): + return None + else: + time.sleep(1) + + assert False, "Counter not {} for port: {}, type: {}, index: {}".format("created" if isSet else "removed", port, map, index) + def verify_no_flex_counters_tables(self, counter_stat): counters_stat_keys = self.flex_db.get_keys("FLEX_COUNTER_TABLE:" + counter_stat) assert len(counters_stat_keys) == 0, "FLEX_COUNTER_TABLE:" + str(counter_stat) + " tables exist before enabling the flex counter group" @@ -692,64 +701,53 @@ def remove_ip_address(self, interface, ip): def set_admin_status(self, interface, status): self.config_db.update_entry("PORT", interface, {"admin_status": status}) - - def test_add_remove_ports(self, dvs): + + def test_create_remove_buffer_pg_counter(self, dvs): + """ + Test steps: + 1. Enable PG flex counters. + 2. Configure new buffer prioriy group for a port + 3. Verify counter is automatically created + 4. Remove the new buffer prioriy group for the port + 5. Verify counter is automatically removed + + Args: + dvs (object): virtual switch object + """ self.setup_dbs(dvs) - - # set flex counter - counter_key = counter_group_meta['queue_counter']['key'] - counter_stat = counter_group_meta['queue_counter']['group_name'] - counter_map = counter_group_meta['queue_counter']['name_map'] - self.set_flex_counter_group_status(counter_key, counter_map) + meta_data = counter_group_meta['pg_watermark_counter'] + + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) + + self.config_db.update_entry('BUFFER_PG', 'Ethernet0|1', {'profile': 'ingress_lossy_profile'}) + counter_oid = self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '1', True) + self.wait_for_id_list(meta_data['group_name'], "Ethernet0", counter_oid) + + self.config_db.delete_entry('BUFFER_PG', 'Ethernet0|1') + self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '1', False) + self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid) + + def test_create_remove_buffer_queue_counter(self, dvs): + """ + Test steps: + 1. Enable Queue flex counters. + 2. Configure new buffer queue for a port + 3. Verify counter is automatically created + 4. Remove the new buffer queue for the port + 5. Verify counter is automatically removed + + Args: + dvs (object): virtual switch object + """ + self.setup_dbs(dvs) + meta_data = counter_group_meta['queue_counter'] + + self.set_flex_counter_group_status(meta_data['key'], meta_data['name_map']) + + self.config_db.update_entry('BUFFER_QUEUE', 'Ethernet0|7', {'profile': 'egress_lossless_profile'}) + counter_oid = self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', True) + self.wait_for_id_list(meta_data['group_name'], "Ethernet0", counter_oid) - # receive port info - fvs = self.config_db.get_entry("PORT", PORT) - assert len(fvs) > 0 - - # save all the oids of the pg drop counters - oid_list = [] - counters_queue_map = self.counters_db.get_entry("COUNTERS_QUEUE_NAME_MAP", "") - for key, oid in counters_queue_map.items(): - if PORT in key: - oid_list.append(oid) - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", counter_stat + ":%s" % oid) - assert len(fields) == 1 - oid_list_len = len(oid_list) - - # get port oid - port_oid = self.counters_db.get_entry(PORT_MAP, "")[PORT] - - # remove port and verify that it was removed properly - self.dvs_port.remove_port(PORT) - dvs.get_asic_db().wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid) - - # verify counters were removed from flex counter table - for oid in oid_list: - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", counter_stat + ":%s" % oid) - assert len(fields) == 0 - - # verify that port counter maps were removed from counters db - counters_queue_map = self.counters_db.get_entry("COUNTERS_QUEUE_NAME_MAP", "") - for key in counters_queue_map.keys(): - if PORT in key: - assert False - - # add port and wait until the port is added on asic db - num_of_keys_without_port = len(dvs.get_asic_db().get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT")) - - self.config_db.create_entry("PORT", PORT, fvs) - - dvs.get_asic_db().wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_of_keys_without_port + 1) - dvs.get_counters_db().wait_for_fields("COUNTERS_QUEUE_NAME_MAP", "", ["%s:0"%(PORT)]) - - # verify queue counters were added - oid_list = [] - counters_queue_map = self.counters_db.get_entry("COUNTERS_QUEUE_NAME_MAP", "") - - for key, oid in counters_queue_map.items(): - if PORT in key: - oid_list.append(oid) - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", counter_stat + ":%s" % oid) - assert len(fields) == 1 - # the number of the oids needs to be the same as the original number of oids (before removing a port and adding) - assert oid_list_len == len(oid_list) + self.config_db.delete_entry('BUFFER_QUEUE', 'Ethernet0|7') + self.wait_for_buffer_pg_queue_counter(meta_data['name_map'], 'Ethernet0', '7', False) + self.wait_for_id_list_remove(meta_data['group_name'], "Ethernet0", counter_oid) diff --git a/tests/test_gearbox.py b/tests/test_gearbox.py index 00a87c2f96..7d5b568661 100644 --- a/tests/test_gearbox.py +++ b/tests/test_gearbox.py @@ -49,20 +49,20 @@ def __init__(self, dvs): for i in [x for x in intf_table.getKeys() if sr not in x]: (status, fvs) = intf_table.get(i) assert status == True - self.interfaces[i] = {"attrs" : dict(fvs)} + self.interfaces[i] = dict(fvs) - def SanityCheck(self, dvs, testlog): + def SanityCheck(self, testlog): """ Verify data integrity of Gearbox objects in APPL_DB """ for i in self.interfaces: - phy_id = self.interfaces[i]["attrs"]["phy_id"] + phy_id = self.interfaces[i]["phy_id"] assert phy_id in self.phys - assert self.interfaces[i]["attrs"]["index"] in self.phys[phy_id]["ports"] + assert self.interfaces[i]["index"] in self.phys[phy_id]["ports"] - for lane in self.interfaces[i]["attrs"]["system_lanes"].split(','): + for lane in self.interfaces[i]["system_lanes"].split(','): assert lane in self.phys[phy_id]["lanes"] - for lane in self.interfaces[i]["attrs"]["line_lanes"].split(','): + for lane in self.interfaces[i]["line_lanes"].split(','): assert lane in self.phys[phy_id]["lanes"] class GBAsic(DVSDatabase): @@ -85,9 +85,9 @@ def __init__(self, db_id: int, connector: str, gearbox: Gearbox): for i in self.gearbox.interfaces: intf = self.gearbox.interfaces[i] - if intf["attrs"]["system_lanes"] == system_lanes: - assert intf["attrs"]["line_lanes"] == line_lanes - self.ports[intf["attrs"]["index"]] = (system_port_oid, line_port_oid) + if intf["system_lanes"] == system_lanes: + assert intf["line_lanes"] == line_lanes + self.ports[intf["index"]] = (system_port_oid, line_port_oid) assert len(self.ports) == len(self.gearbox.interfaces) @@ -112,13 +112,50 @@ def _verify_db_contents(): init_polling_config = PollingConfig(2, 30, strict=True) wait_for_result(_verify_db_contents, init_polling_config) +@pytest.fixture(scope="module") +def gearbox(dvs): + return Gearbox(dvs) + +@pytest.fixture(scope="module") +def gbasic(dvs, gearbox): + return GBAsic(swsscommon.GB_ASIC_DB, dvs.redis_sock, gearbox) + +@pytest.fixture(scope="module") +def enable_port_counter(dvs): + flex_counter_table = swsscommon.Table(dvs.get_config_db().db_connection, + "FLEX_COUNTER_TABLE") + + # Enable port counter + flex_counter_table.hset("PORT", "FLEX_COUNTER_STATUS", "enable") + yield + # Disable port counter + flex_counter_table.hdel("PORT", "FLEX_COUNTER_STATUS") class TestGearbox(object): - def test_GearboxSanity(self, dvs, testlog): - Gearbox(dvs).SanityCheck(dvs, testlog) + def test_GearboxSanity(self, gearbox, testlog): + gearbox.SanityCheck(testlog) + + def test_GearboxCounter(self, dvs, gbasic, enable_port_counter, testlog): + counters_db = DVSDatabase(swsscommon.COUNTERS_DB, dvs.redis_sock) + gb_counters_db = DVSDatabase(swsscommon.GB_COUNTERS_DB, dvs.redis_sock) + + intf = gbasic.gearbox.interfaces["0"] + port_oid = counters_db.get_entry("COUNTERS_PORT_NAME_MAP", "")[intf["name"]] + system_port_oid, line_port_oid = gbasic.ports["0"] + + fvs = gb_counters_db.wait_for_entry("COUNTERS", system_port_oid) + assert fvs.get("SAI_PORT_STAT_IF_OUT_ERRORS") + + fvs = gb_counters_db.wait_for_entry("COUNTERS", line_port_oid) + assert fvs.get("SAI_PORT_STAT_IF_IN_ERRORS") + + fvs = counters_db.wait_for_entry("COUNTERS", port_oid) + assert fvs.get("SAI_PORT_STAT_IF_IN_ERRORS") + + fvs = counters_db.wait_for_entry("COUNTERS", port_oid) + assert fvs.get("SAI_PORT_STAT_IF_IN_ERRORS") - def test_GbAsicFEC(self, dvs, testlog): - gbasic = GBAsic(swsscommon.GB_ASIC_DB, dvs.redis_sock, Gearbox(dvs)) + def test_GbAsicFEC(self, gbasic, testlog): # set fec rs on port 0 of phy 1 fvs = swsscommon.FieldValuePairs([("system_fec","rs")]) diff --git a/tests/test_pg_drop_counter.py b/tests/test_pg_drop_counter.py index b3682881de..6d97af5f5c 100644 --- a/tests/test_pg_drop_counter.py +++ b/tests/test_pg_drop_counter.py @@ -2,16 +2,12 @@ import re import time import json -import pytest import redis from swsscommon import swsscommon pg_drop_attr = "SAI_INGRESS_PRIORITY_GROUP_STAT_DROPPED_PACKETS" -PORT = "Ethernet0" - -@pytest.mark.usefixtures('dvs_port_manager') class TestPGDropCounter(object): DEFAULT_POLL_INTERVAL = 10 pgs = {} @@ -61,14 +57,11 @@ def verify_value(self, dvs, obj_ids, entry_name, expected_value): assert found, "entry name %s not found" % (entry_name) def set_up_flex_counter(self): - pg_stats_entry = {"PG_COUNTER_ID_LIST": "{}".format(pg_drop_attr)} - for pg in self.pgs: - self.flex_db.create_entry("FLEX_COUNTER_TABLE", "PG_DROP_STAT_COUNTER:{}".format(pg), pg_stats_entry) - fc_status_enable = {"FLEX_COUNTER_STATUS": "enable"} - self.config_db.create_entry("FLEX_COUNTER_TABLE", "PG_DROP", fc_status_enable) self.config_db.create_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK", fc_status_enable) + # Wait for DB's to populate by orchagent + time.sleep(2) def clear_flex_counter(self): for pg in self.pgs: @@ -79,10 +72,12 @@ def clear_flex_counter(self): def test_pg_drop_counters(self, dvs): self.setup_dbs(dvs) - self.pgs = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP") - try: - self.set_up_flex_counter() + self.set_up_flex_counter() + # Get all configured counters OID's + self.pgs = self.counters_db.db_connection.hgetall("COUNTERS_PG_NAME_MAP").values() + assert self.pgs is not None and len(self.pgs) > 0 + try: self.populate_asic(dvs, "0") time.sleep(self.DEFAULT_POLL_INTERVAL) self.verify_value(dvs, self.pgs, pg_drop_attr, "0") @@ -97,48 +92,3 @@ def test_pg_drop_counters(self, dvs): finally: self.clear_flex_counter() - def test_pg_drop_counter_port_add_remove(self, dvs): - self.setup_dbs(dvs) - - try: - # configure pg drop flex counter - self.set_up_flex_counter() - - # receive port info - fvs = self.config_db.get_entry("PORT", PORT) - assert len(fvs) > 0 - - # save all the oids of the pg drop counters - oid_list = [] - for priority in range(0,7): - oid_list.append(dvs.get_counters_db().get_entry("COUNTERS_PG_NAME_MAP", "")["%s:%d"%(PORT, priority)]) - # verify that counters exists on flex counter - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK_STAT_COUNTER:%s"%oid_list[-1]) - assert len(fields) == 1 - - # remove port - port_oid = self.counters_db.get_entry("COUNTERS_PORT_NAME_MAP", "")[PORT] - self.dvs_port.remove_port(PORT) - dvs.get_asic_db().wait_for_deleted_entry("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid) - - # verify counters were removed from flex counter table - for oid in oid_list: - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK_STAT_COUNTER:%s"%oid) - assert len(fields) == 0 - - # add port and wait until the port is added on asic db - num_of_keys_without_port = len(dvs.get_asic_db().get_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT")) - self.config_db.create_entry("PORT", PORT, fvs) - dvs.get_asic_db().wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", num_of_keys_without_port + 1) - dvs.get_counters_db().wait_for_fields("COUNTERS_PG_NAME_MAP", "", ["%s:0"%(PORT)]) - - # verify counter was added - for priority in range(0,7): - oid = dvs.get_counters_db().get_entry("COUNTERS_PG_NAME_MAP", "")["%s:%d"%(PORT, priority)] - - # verify that counters exists on flex counter - fields = self.flex_db.get_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK_STAT_COUNTER:%s"%oid) - assert len(fields) == 1 - - finally: - self.clear_flex_counter() diff --git a/tests/test_watermark.py b/tests/test_watermark.py index 23efedcb42..a8cee70aa1 100644 --- a/tests/test_watermark.py +++ b/tests/test_watermark.py @@ -104,22 +104,8 @@ def verify_value(self, dvs, obj_ids, table_name, watermark_name, expected_value) assert found, "no such watermark found" def set_up_flex_counter(self, dvs): - for q in self.qs: - self.flex_db.create_entry("FLEX_COUNTER_TABLE", - "QUEUE_WATERMARK_STAT_COUNTER:{}".format(q), - WmFCEntry.queue_stats_entry) - - for pg in self.pgs: - self.flex_db.create_entry("FLEX_COUNTER_TABLE", - "PG_WATERMARK_STAT_COUNTER:{}".format(pg), - WmFCEntry.pg_stats_entry) - - for buffer in self.buffers: - self.flex_db.create_entry("FLEX_COUNTER_TABLE", - "BUFFER_POOL_WATERMARK_STAT_COUNTER:{}".format(buffer), - WmFCEntry.buffer_stats_entry) - fc_status_enable = {"FLEX_COUNTER_STATUS": "enable"} + self.config_db.create_entry("FLEX_COUNTER_TABLE", "PG_WATERMARK", fc_status_enable) @@ -130,7 +116,8 @@ def set_up_flex_counter(self, dvs): "BUFFER_POOL_WATERMARK", fc_status_enable) - self.populate_asic_all(dvs, "0") + # Wait for DB's to populate by orchagent + time.sleep(2) def clear_flex_counter(self, dvs): for q in self.qs: @@ -150,10 +137,14 @@ def clear_flex_counter(self, dvs): self.config_db.delete_entry("FLEX_COUNTER_TABLE", "BUFFER_POOL_WATERMARK") def set_up(self, dvs): - self.qs = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_QUEUE") - self.pgs = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_INGRESS_PRIORITY_GROUP") + self.pgs = self.counters_db.db_connection.hgetall("COUNTERS_PG_NAME_MAP").values() + assert self.pgs is not None and len(self.pgs) > 0 + self.qs = self.counters_db.db_connection.hgetall("COUNTERS_QUEUE_NAME_MAP").values() + assert self.qs is not None and len(self.pgs) > 0 self.buffers = self.asic_db.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_BUFFER_POOL") + self.populate_asic_all(dvs, "0") + db = swsscommon.DBConnector(swsscommon.COUNTERS_DB, dvs.redis_sock, 0) tbl = swsscommon.Table(db, "COUNTERS_QUEUE_TYPE_MAP") @@ -180,9 +171,9 @@ def clear_watermark(self, dvs, data): def test_telemetry_period(self, dvs): self.setup_dbs(dvs) + self.set_up_flex_counter(dvs) self.set_up(dvs) try: - self.set_up_flex_counter(dvs) self.enable_unittests(dvs, "true") self.populate_asic_all(dvs, "100")