Skip to content

Commit

Permalink
Fixing appl_db FABRIC_MONITOR notification issue. (#3176)
Browse files Browse the repository at this point in the history
Why I did it
appl_db FABRIC_MONITOR notification issue in current code, the notification not able to get processed when the feature is disabled. This change fixed the issue.
  • Loading branch information
jfeng-arista authored Jun 5, 2024
1 parent fff544e commit 9bcb9b6
Show file tree
Hide file tree
Showing 5 changed files with 49 additions and 32 deletions.
2 changes: 1 addition & 1 deletion cfgmgr/fabricmgr.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ class FabricMgr : public Orch
private:
Table m_cfgFabricMonitorTable;
Table m_cfgFabricPortTable;
ProducerStateTable m_appFabricMonitorTable;
Table m_appFabricMonitorTable;
ProducerStateTable m_appFabricPortTable;

void doTask(Consumer &consumer);
Expand Down
42 changes: 16 additions & 26 deletions orchagent/fabricportsorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1444,32 +1444,6 @@ void FabricPortsOrch::doTask(Consumer &consumer)
{
doFabricPortTask(consumer);
}
if (table_name == APP_FABRIC_MONITOR_DATA_TABLE_NAME)
{
SWSS_LOG_INFO("doTask for APP_FABRIC_MONITOR_DATA_TABLE_NAME");
auto it = consumer.m_toSync.begin();
while (it != consumer.m_toSync.end())
{
KeyOpFieldsValuesTuple t = it->second;
for (auto i : kfvFieldsValues(t))
{
if (fvField(i) == "monState")
{
if (fvValue(i) == "enable")
{
m_debugTimer->start();
SWSS_LOG_INFO("debugTimer started");
}
else
{
m_debugTimer->stop();
SWSS_LOG_INFO("debugTimer stopped");
}
}
}
it = consumer.m_toSync.erase(it);
}
}
}

void FabricPortsOrch::doTask(swss::SelectableTimer &timer)
Expand All @@ -1487,6 +1461,16 @@ void FabricPortsOrch::doTask(swss::SelectableTimer &timer)
{
updateFabricPortState();
}

if (checkFabricPortMonState() && !m_debugTimerEnabled)
{
m_debugTimer->start();
m_debugTimerEnabled = true;
}
else if (!checkFabricPortMonState())
{
m_debugTimerEnabled = false;
}
}
else if (timer.getFd() == m_debugTimer->getFd())
{
Expand All @@ -1497,6 +1481,12 @@ void FabricPortsOrch::doTask(swss::SelectableTimer &timer)
return;
}

if (!m_debugTimerEnabled)
{
m_debugTimer->stop();
return;
}

if (m_getFabricPortListDone)
{
SWSS_LOG_INFO("Fabric monitor enabled");
Expand Down
1 change: 1 addition & 0 deletions orchagent/fabricportsorch.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ class FabricPortsOrch : public Orch, public Subject

bool m_getFabricPortListDone = false;
bool m_isQueueStatsGenerated = false;
bool m_debugTimerEnabled = false;

string m_defaultPollWithErrors = "0";
string m_defaultPollWithNoErrors = "8";
Expand Down
28 changes: 25 additions & 3 deletions tests/test_fabric_capacity.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,17 @@ def test_voq_switch_fabric_capacity(self, vst):
dvs = dvss[name]
# Get the config information and choose a linecard or fabric card to test.
config_db = dvs.get_config_db()
adb = dvs.get_app_db()
metatbl = config_db.get_entry("DEVICE_METADATA", "localhost")

cfg_switch_type = metatbl.get("switch_type")
if cfg_switch_type == "fabric":

max_poll = PollingConfig(polling_interval=60, timeout=600, strict=True)
config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'disable'})
adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'disable'}, polling_config=max_poll)
# enable monitoring
config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'enable'})
adb = dvs.get_app_db()
adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'enable'}, polling_config=max_poll)

# get state_db infor
Expand All @@ -39,8 +42,12 @@ def test_voq_switch_fabric_capacity(self, vst):
sdb.update_entry("FABRIC_PORT_TABLE", sdb_port, {"TEST": "TEST"})

# get current fabric capacity
capacity = sdb.get_entry("FABRIC_CAPACITY_TABLE", "FABRIC_CAPACITY_DATA")['operating_links']
if sdb.get_entry("FABRIC_PORT_TABLE", sdb_port)['STATUS'] == 'up':
fvs = sdb.wait_for_fields("FABRIC_CAPACITY_TABLE", "FABRIC_CAPACITY_DATA",['operating_links'], polling_config=max_poll)
capacity = fvs['operating_links']

fvs = sdb.wait_for_fields("FABRIC_PORT_TABLE", sdb_port, ['STATUS'], polling_config=max_poll)
link_status = fvs['STATUS']
if link_status == 'up':
try:
# clean up the testing port.
# set TEST_CRC_ERRORS to 0
Expand All @@ -57,6 +64,21 @@ def test_voq_switch_fabric_capacity(self, vst):
config_db.update_entry("FABRIC_PORT", cdb_port, {"isolateStatus": "False"})
sdb.wait_for_field_match("FABRIC_PORT_TABLE", sdb_port, {"ISOLATED": "0"}, polling_config=max_poll)
sdb.wait_for_field_match("FABRIC_CAPACITY_TABLE", "FABRIC_CAPACITY_DATA", {'operating_links': capacity}, polling_config=max_poll)

# now disable fabric link monitor
config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'disable'})
adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'disable'}, polling_config=max_poll)
# isolate the link from config_db
config_db.update_entry("FABRIC_PORT", cdb_port, {"isolateStatus": "True"})
try:
max_poll = PollingConfig(polling_interval=30, timeout=90, strict=True)
sdb.wait_for_field_match("FABRIC_PORT_TABLE", sdb_port, {"ISOLATED": "1"}, polling_config=max_poll)
# check if capacity reduced
sdb.wait_for_field_negative_match("FABRIC_CAPACITY_TABLE", "FABRIC_CAPACITY_DATA", {'operating_links': capacity}, polling_config=max_poll)
assert False, "Expecting no change here"
except Exception as e:
# Expect field not change here
pass
finally:
# cleanup
sdb.update_entry("FABRIC_PORT_TABLE", sdb_port, {"TEST_CRC_ERRORS": "0"})
Expand Down
8 changes: 6 additions & 2 deletions tests/test_fabric_rate.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,16 @@ def test_voq_switch_fabric_rate(self, vst):
dvs = dvss[name]
# Get the config info
config_db = dvs.get_config_db()
adb = dvs.get_app_db()
metatbl = config_db.get_entry("DEVICE_METADATA", "localhost")

cfg_switch_type = metatbl.get("switch_type")
if cfg_switch_type == "fabric":

max_poll = PollingConfig(polling_interval=60, timeout=600, strict=True)
config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'disable'})
adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'disable'}, polling_config=max_poll)
config_db.update_entry("FABRIC_MONITOR", "FABRIC_MONITOR_DATA",{'monState': 'enable'})
adb = dvs.get_app_db()
adb.wait_for_field_match("FABRIC_MONITOR_TABLE","FABRIC_MONITOR_DATA", {'monState': 'enable'}, polling_config=max_poll)
# get state_db infor
sdb = dvs.get_state_db()
Expand All @@ -35,7 +37,9 @@ def test_voq_switch_fabric_rate(self, vst):
portNum = random.randint(1, 16)
sdb_port = "PORT"+str(portNum)

tx_rate = sdb.get_entry("FABRIC_PORT_TABLE", sdb_port)['OLD_TX_DATA']
fvs = sdb.wait_for_fields("FABRIC_PORT_TABLE", sdb_port, ['OLD_TX_DATA'], polling_config=max_poll)
tx_rate = fvs['OLD_TX_DATA']

sdb.update_entry("FABRIC_PORT_TABLE", sdb_port, {"TEST": "TEST"})
sdb.wait_for_field_negative_match("FABRIC_PORT_TABLE", sdb_port, {'OLD_TX_DATA': tx_rate}, polling_config=max_poll)
finally:
Expand Down

0 comments on commit 9bcb9b6

Please sign in to comment.