From 9bcb9b6eb128c0398d331a1e7a42705631e1f981 Mon Sep 17 00:00:00 2001 From: jfeng-arista <98421150+jfeng-arista@users.noreply.github.com> Date: Tue, 4 Jun 2024 19:26:50 -0700 Subject: [PATCH] Fixing appl_db FABRIC_MONITOR notification issue. (#3176) Why I did it appl_db FABRIC_MONITOR notification issue in current code, the notification not able to get processed when the feature is disabled. This change fixed the issue. --- cfgmgr/fabricmgr.h | 2 +- orchagent/fabricportsorch.cpp | 42 +++++++++++++---------------------- orchagent/fabricportsorch.h | 1 + tests/test_fabric_capacity.py | 28 ++++++++++++++++++++--- tests/test_fabric_rate.py | 8 +++++-- 5 files changed, 49 insertions(+), 32 deletions(-) diff --git a/cfgmgr/fabricmgr.h b/cfgmgr/fabricmgr.h index afadd26d57..1fd399fef9 100644 --- a/cfgmgr/fabricmgr.h +++ b/cfgmgr/fabricmgr.h @@ -20,7 +20,7 @@ class FabricMgr : public Orch private: Table m_cfgFabricMonitorTable; Table m_cfgFabricPortTable; - ProducerStateTable m_appFabricMonitorTable; + Table m_appFabricMonitorTable; ProducerStateTable m_appFabricPortTable; void doTask(Consumer &consumer); diff --git a/orchagent/fabricportsorch.cpp b/orchagent/fabricportsorch.cpp index 80a938e38e..e9e7ae972d 100644 --- a/orchagent/fabricportsorch.cpp +++ b/orchagent/fabricportsorch.cpp @@ -1444,32 +1444,6 @@ void FabricPortsOrch::doTask(Consumer &consumer) { doFabricPortTask(consumer); } - if (table_name == APP_FABRIC_MONITOR_DATA_TABLE_NAME) - { - SWSS_LOG_INFO("doTask for APP_FABRIC_MONITOR_DATA_TABLE_NAME"); - auto it = consumer.m_toSync.begin(); - while (it != consumer.m_toSync.end()) - { - KeyOpFieldsValuesTuple t = it->second; - for (auto i : kfvFieldsValues(t)) - { - if (fvField(i) == "monState") - { - if (fvValue(i) == "enable") - { - m_debugTimer->start(); - SWSS_LOG_INFO("debugTimer started"); - } - else - { - m_debugTimer->stop(); - SWSS_LOG_INFO("debugTimer stopped"); - } - } - } - it = consumer.m_toSync.erase(it); - } - } } void FabricPortsOrch::doTask(swss::SelectableTimer &timer) @@ -1487,6 +1461,16 @@ void FabricPortsOrch::doTask(swss::SelectableTimer &timer) { updateFabricPortState(); } + + if (checkFabricPortMonState() && !m_debugTimerEnabled) + { + m_debugTimer->start(); + m_debugTimerEnabled = true; + } + else if (!checkFabricPortMonState()) + { + m_debugTimerEnabled = false; + } } else if (timer.getFd() == m_debugTimer->getFd()) { @@ -1497,6 +1481,12 @@ void FabricPortsOrch::doTask(swss::SelectableTimer &timer) return; } + if (!m_debugTimerEnabled) + { + m_debugTimer->stop(); + return; + } + if (m_getFabricPortListDone) { SWSS_LOG_INFO("Fabric monitor enabled"); diff --git a/orchagent/fabricportsorch.h b/orchagent/fabricportsorch.h index d94ece698e..e9c8b9fe67 100644 --- a/orchagent/fabricportsorch.h +++ b/orchagent/fabricportsorch.h @@ -51,6 +51,7 @@ class FabricPortsOrch : public Orch, public Subject bool m_getFabricPortListDone = false; bool m_isQueueStatsGenerated = false; + bool m_debugTimerEnabled = false; string m_defaultPollWithErrors = "0"; string m_defaultPollWithNoErrors = "8"; diff --git a/tests/test_fabric_capacity.py b/tests/test_fabric_capacity.py index a796e9f6bf..cb10e09af2 100644 --- a/tests/test_fabric_capacity.py +++ b/tests/test_fabric_capacity.py @@ -17,14 +17,17 @@ def test_voq_switch_fabric_capacity(self, vst): dvs = dvss[name] # Get the config information and choose a linecard or fabric card to test. config_db = dvs.get_config_db() + adb = dvs.get_app_db() metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") cfg_switch_type = metatbl.get("switch_type") if cfg_switch_type == "fabric": max_poll = PollingConfig(polling_interval=60, timeout=600, strict=True) + config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'disable'}) + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'disable'}, polling_config=max_poll) + # enable monitoring config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'enable'}) - adb = dvs.get_app_db() adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'enable'}, polling_config=max_poll) # get state_db infor @@ -39,8 +42,12 @@ def test_voq_switch_fabric_capacity(self, vst): sdb.update_entry("FABRIC_PORT_TABLE", sdb_port, {"TEST": "TEST"}) # get current fabric capacity - capacity = sdb.get_entry("FABRIC_CAPACITY_TABLE", "FABRIC_CAPACITY_DATA")['operating_links'] - if sdb.get_entry("FABRIC_PORT_TABLE", sdb_port)['STATUS'] == 'up': + fvs = sdb.wait_for_fields("FABRIC_CAPACITY_TABLE", "FABRIC_CAPACITY_DATA",['operating_links'], polling_config=max_poll) + capacity = fvs['operating_links'] + + fvs = sdb.wait_for_fields("FABRIC_PORT_TABLE", sdb_port, ['STATUS'], polling_config=max_poll) + link_status = fvs['STATUS'] + if link_status == 'up': try: # clean up the testing port. # set TEST_CRC_ERRORS to 0 @@ -57,6 +64,21 @@ def test_voq_switch_fabric_capacity(self, vst): config_db.update_entry("FABRIC_PORT", cdb_port, {"isolateStatus": "False"}) sdb.wait_for_field_match("FABRIC_PORT_TABLE", sdb_port, {"ISOLATED": "0"}, polling_config=max_poll) sdb.wait_for_field_match("FABRIC_CAPACITY_TABLE", "FABRIC_CAPACITY_DATA", {'operating_links': capacity}, polling_config=max_poll) + + # now disable fabric link monitor + config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'disable'}) + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'disable'}, polling_config=max_poll) + # isolate the link from config_db + config_db.update_entry("FABRIC_PORT", cdb_port, {"isolateStatus": "True"}) + try: + max_poll = PollingConfig(polling_interval=30, timeout=90, strict=True) + sdb.wait_for_field_match("FABRIC_PORT_TABLE", sdb_port, {"ISOLATED": "1"}, polling_config=max_poll) + # check if capacity reduced + sdb.wait_for_field_negative_match("FABRIC_CAPACITY_TABLE", "FABRIC_CAPACITY_DATA", {'operating_links': capacity}, polling_config=max_poll) + assert False, "Expecting no change here" + except Exception as e: + # Expect field not change here + pass finally: # cleanup sdb.update_entry("FABRIC_PORT_TABLE", sdb_port, {"TEST_CRC_ERRORS": "0"}) diff --git a/tests/test_fabric_rate.py b/tests/test_fabric_rate.py index 1885aca2a9..4a135b6dd6 100644 --- a/tests/test_fabric_rate.py +++ b/tests/test_fabric_rate.py @@ -17,14 +17,16 @@ def test_voq_switch_fabric_rate(self, vst): dvs = dvss[name] # Get the config info config_db = dvs.get_config_db() + adb = dvs.get_app_db() metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") cfg_switch_type = metatbl.get("switch_type") if cfg_switch_type == "fabric": max_poll = PollingConfig(polling_interval=60, timeout=600, strict=True) + config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'disable'}) + adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'disable'}, polling_config=max_poll) config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'enable'}) - adb = dvs.get_app_db() adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'enable'}, polling_config=max_poll) # get state_db infor sdb = dvs.get_state_db() @@ -35,7 +37,9 @@ def test_voq_switch_fabric_rate(self, vst): portNum = random.randint(1, 16) sdb_port = "PORT"+str(portNum) - tx_rate = sdb.get_entry("FABRIC_PORT_TABLE", sdb_port)['OLD_TX_DATA'] + fvs = sdb.wait_for_fields("FABRIC_PORT_TABLE", sdb_port, ['OLD_TX_DATA'], polling_config=max_poll) + tx_rate = fvs['OLD_TX_DATA'] + sdb.update_entry("FABRIC_PORT_TABLE", sdb_port, {"TEST": "TEST"}) sdb.wait_for_field_negative_match("FABRIC_PORT_TABLE", sdb_port, {'OLD_TX_DATA': tx_rate}, polling_config=max_poll) finally: