From cf7bfa29bef5e84850afb798d14aadd7ade4570f Mon Sep 17 00:00:00 2001 From: Yutong Zhang <90831468+yutongzhang-microsoft@users.noreply.github.com> Date: Fri, 21 Jun 2024 15:52:23 +0800 Subject: [PATCH 01/67] Add the definition of `log` in `script decode-syseeprom` (#3383) #### What I did If there is something wrong getting eeprom while exectuing script `decode-syseeprom`, it will raise an exception and log the error. There was no definition of `log` in script `decode-syseeprom`, which will raise such error ``` Traceback (most recent call last): File "/usr/local/bin/decode-syseeprom", line 264, in sys.exit(main()) ^^^^^^ File "/usr/local/bin/decode-syseeprom", line 246, in main print_serial(use_db) File "/usr/local/bin/decode-syseeprom", line 171, in print_serial eeprom = instantiate_eeprom_object() ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/usr/local/bin/decode-syseeprom", line 36, in instantiate_eeprom_object log.log_error('Failed to obtain EEPROM object due to {}'.format(repr(e))) ^^^ NameError: name 'log' is not defined ``` In this PR, I add the definition of log to avoid such error. #### How I did it Add the definition of log. #### How to verify it ``` admin@vlab-01:~$ sudo decode-syseeprom -s Failed to read system EEPROM info ``` --- scripts/decode-syseeprom | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/decode-syseeprom b/scripts/decode-syseeprom index 3d0b8d1db9..5812f38190 100755 --- a/scripts/decode-syseeprom +++ b/scripts/decode-syseeprom @@ -17,13 +17,15 @@ import sys import sonic_platform from sonic_platform_base.sonic_eeprom.eeprom_tlvinfo import TlvInfoDecoder -from sonic_py_common import device_info +from sonic_py_common import device_info, logger from swsscommon.swsscommon import SonicV2Connector from tabulate import tabulate EEPROM_INFO_TABLE = 'EEPROM_INFO' +SYSLOG_IDENTIFIER = 'decode-syseeprom' +log = logger.Logger(SYSLOG_IDENTIFIER) def instantiate_eeprom_object(): eeprom = None From c51758df2ae7a51d3ebd65169f7c0282cbbdf2b4 Mon Sep 17 00:00:00 2001 From: Chenyang Wang <49756587+cyw233@users.noreply.github.com> Date: Mon, 24 Jun 2024 12:03:15 +1000 Subject: [PATCH 02/67] fix: fix show bgp summary output typo (#3375) * fix: fix show bgp summary output typo * fix: remove extra dash * fix: remove extra space --- tests/bgp_commands_test.py | 284 +++++++++++++++++------------------ utilities_common/bgp_util.py | 2 +- 2 files changed, 143 insertions(+), 143 deletions(-) diff --git a/tests/bgp_commands_test.py b/tests/bgp_commands_test.py index a60ba8c81f..2a2179815f 100644 --- a/tests/bgp_commands_test.py +++ b/tests/bgp_commands_test.py @@ -25,32 +25,32 @@ Peer groups 4, using 256 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -10.0.0.1 4 65200 5919 2717 0 0 0 1d21h11m 6402 ARISTA01T2 -10.0.0.5 4 65200 5916 2714 0 0 0 1d21h10m 6402 ARISTA03T2 -10.0.0.9 4 65200 5915 2713 0 0 0 1d21h09m 6402 ARISTA05T2 -10.0.0.13 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA07T2 -10.0.0.17 4 65200 5916 2713 0 0 0 1d21h09m 6402 ARISTA09T2 -10.0.0.21 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA11T2 -10.0.0.25 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA13T2 -10.0.0.29 4 65200 5916 2714 0 0 0 1d21h10m 6402 ARISTA15T2 -10.0.0.33 4 64001 0 0 0 0 0 never Active ARISTA01T0 -10.0.0.35 4 64002 0 0 0 0 0 never Active ARISTA02T0 -10.0.0.37 4 64003 0 0 0 0 0 never Active ARISTA03T0 -10.0.0.39 4 64004 0 0 0 0 0 never Active ARISTA04T0 -10.0.0.41 4 64005 0 0 0 0 0 never Active ARISTA05T0 -10.0.0.43 4 64006 0 0 0 0 0 never Active ARISTA06T0 -10.0.0.45 4 64007 0 0 0 0 0 never Active ARISTA07T0 -10.0.0.47 4 64008 0 0 0 0 0 never Active ARISTA08T0 -10.0.0.49 4 64009 0 0 0 0 0 never Active ARISTA09T0 -10.0.0.51 4 64010 0 0 0 0 0 never Active ARISTA10T0 -10.0.0.53 4 64011 0 0 0 0 0 never Active ARISTA11T0 -10.0.0.55 4 64012 0 0 0 0 0 never Active ARISTA12T0 -10.0.0.57 4 64013 0 0 0 0 0 never Active ARISTA13T0 -10.0.0.59 4 64014 0 0 0 0 0 never Active ARISTA14T0 -10.0.0.61 4 64015 0 0 0 0 0 never Active INT_NEIGH0 -10.0.0.63 4 64016 0 0 0 0 0 never Active INT_NEIGH1 +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +10.0.0.1 4 65200 5919 2717 0 0 0 1d21h11m 6402 ARISTA01T2 +10.0.0.5 4 65200 5916 2714 0 0 0 1d21h10m 6402 ARISTA03T2 +10.0.0.9 4 65200 5915 2713 0 0 0 1d21h09m 6402 ARISTA05T2 +10.0.0.13 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA07T2 +10.0.0.17 4 65200 5916 2713 0 0 0 1d21h09m 6402 ARISTA09T2 +10.0.0.21 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA11T2 +10.0.0.25 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA13T2 +10.0.0.29 4 65200 5916 2714 0 0 0 1d21h10m 6402 ARISTA15T2 +10.0.0.33 4 64001 0 0 0 0 0 never Active ARISTA01T0 +10.0.0.35 4 64002 0 0 0 0 0 never Active ARISTA02T0 +10.0.0.37 4 64003 0 0 0 0 0 never Active ARISTA03T0 +10.0.0.39 4 64004 0 0 0 0 0 never Active ARISTA04T0 +10.0.0.41 4 64005 0 0 0 0 0 never Active ARISTA05T0 +10.0.0.43 4 64006 0 0 0 0 0 never Active ARISTA06T0 +10.0.0.45 4 64007 0 0 0 0 0 never Active ARISTA07T0 +10.0.0.47 4 64008 0 0 0 0 0 never Active ARISTA08T0 +10.0.0.49 4 64009 0 0 0 0 0 never Active ARISTA09T0 +10.0.0.51 4 64010 0 0 0 0 0 never Active ARISTA10T0 +10.0.0.53 4 64011 0 0 0 0 0 never Active ARISTA11T0 +10.0.0.55 4 64012 0 0 0 0 0 never Active ARISTA12T0 +10.0.0.57 4 64013 0 0 0 0 0 never Active ARISTA13T0 +10.0.0.59 4 64014 0 0 0 0 0 never Active ARISTA14T0 +10.0.0.61 4 64015 0 0 0 0 0 never Active INT_NEIGH0 +10.0.0.63 4 64016 0 0 0 0 0 never Active INT_NEIGH1 Total number of neighbors 24 """ @@ -65,32 +65,32 @@ Peer groups 4, using 256 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -fc00::1a 4 65200 6665 6672 0 0 0 2d09h39m 6402 ARISTA07T2 -fc00::2 4 65200 6666 7913 0 0 0 2d09h39m 6402 ARISTA01T2 -fc00::2a 4 65200 6666 7913 0 0 0 2d09h39m 6402 ARISTA11T2 -fc00::3a 4 65200 6666 7912 0 0 0 2d09h39m 6402 ARISTA15T2 -fc00::4a 4 64003 0 0 0 0 0 never Active ARISTA03T0 -fc00::4e 4 64004 0 0 0 0 0 never Active ARISTA04T0 -fc00::5a 4 64007 0 0 0 0 0 never Active ARISTA07T0 -fc00::5e 4 64008 0 0 0 0 0 never Active ARISTA08T0 -fc00::6a 4 64011 0 0 0 0 0 never Connect ARISTA11T0 -fc00::6e 4 64012 0 0 0 0 0 never Active ARISTA12T0 -fc00::7a 4 64015 0 0 0 0 0 never Active ARISTA15T0 -fc00::7e 4 64016 0 0 0 0 0 never Active ARISTA16T0 -fc00::12 4 65200 6666 7915 0 0 0 2d09h39m 6402 ARISTA05T2 -fc00::22 4 65200 6667 7915 0 0 0 2d09h39m 6402 ARISTA09T2 -fc00::32 4 65200 6663 6669 0 0 0 2d09h36m 6402 ARISTA13T2 -fc00::42 4 64001 0 0 0 0 0 never Active ARISTA01T0 -fc00::46 4 64002 0 0 0 0 0 never Active ARISTA02T0 -fc00::52 4 64005 0 0 0 0 0 never Active ARISTA05T0 -fc00::56 4 64006 0 0 0 0 0 never Active ARISTA06T0 -fc00::62 4 64009 0 0 0 0 0 never Active ARISTA09T0 -fc00::66 4 64010 0 0 0 0 0 never Active ARISTA10T0 -fc00::72 4 64013 0 0 0 0 0 never Active ARISTA13T0 -fc00::76 4 64014 0 0 0 0 0 never Active INT_NEIGH0 -fc00::a 4 65200 6665 6671 0 0 0 2d09h38m 6402 INT_NEIGH1 +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +fc00::1a 4 65200 6665 6672 0 0 0 2d09h39m 6402 ARISTA07T2 +fc00::2 4 65200 6666 7913 0 0 0 2d09h39m 6402 ARISTA01T2 +fc00::2a 4 65200 6666 7913 0 0 0 2d09h39m 6402 ARISTA11T2 +fc00::3a 4 65200 6666 7912 0 0 0 2d09h39m 6402 ARISTA15T2 +fc00::4a 4 64003 0 0 0 0 0 never Active ARISTA03T0 +fc00::4e 4 64004 0 0 0 0 0 never Active ARISTA04T0 +fc00::5a 4 64007 0 0 0 0 0 never Active ARISTA07T0 +fc00::5e 4 64008 0 0 0 0 0 never Active ARISTA08T0 +fc00::6a 4 64011 0 0 0 0 0 never Connect ARISTA11T0 +fc00::6e 4 64012 0 0 0 0 0 never Active ARISTA12T0 +fc00::7a 4 64015 0 0 0 0 0 never Active ARISTA15T0 +fc00::7e 4 64016 0 0 0 0 0 never Active ARISTA16T0 +fc00::12 4 65200 6666 7915 0 0 0 2d09h39m 6402 ARISTA05T2 +fc00::22 4 65200 6667 7915 0 0 0 2d09h39m 6402 ARISTA09T2 +fc00::32 4 65200 6663 6669 0 0 0 2d09h36m 6402 ARISTA13T2 +fc00::42 4 64001 0 0 0 0 0 never Active ARISTA01T0 +fc00::46 4 64002 0 0 0 0 0 never Active ARISTA02T0 +fc00::52 4 64005 0 0 0 0 0 never Active ARISTA05T0 +fc00::56 4 64006 0 0 0 0 0 never Active ARISTA06T0 +fc00::62 4 64009 0 0 0 0 0 never Active ARISTA09T0 +fc00::66 4 64010 0 0 0 0 0 never Active ARISTA10T0 +fc00::72 4 64013 0 0 0 0 0 never Active ARISTA13T0 +fc00::76 4 64014 0 0 0 0 0 never Active INT_NEIGH0 +fc00::a 4 65200 6665 6671 0 0 0 2d09h38m 6402 INT_NEIGH1 Total number of neighbors 24 """ @@ -112,8 +112,8 @@ Peer groups 0, using 0 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- Total number of neighbors 0 """ @@ -128,8 +128,8 @@ Peer groups 0, using 0 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- Total number of neighbors 0 """ @@ -146,8 +146,8 @@ Peer groups 0, using 0 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- Total number of neighbors 0 """ @@ -164,8 +164,8 @@ Peer groups 0, using 0 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- Total number of neighbors 0 """ @@ -180,28 +180,28 @@ Peer groups 3, using 192 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -10.0.0.1 4 65200 4632 11028 0 0 0 00:18:31 8514 ARISTA01T2 -10.0.0.9 4 65202 4632 11029 0 0 0 00:18:33 8514 ARISTA05T2 -10.0.0.13 4 65203 4632 11028 0 0 0 00:18:33 8514 ARISTA07T2 -10.0.0.17 4 65204 4631 11028 0 0 0 00:18:31 8514 ARISTA09T2 -10.0.0.21 4 65205 4632 11031 0 0 0 00:18:33 8514 ARISTA11T2 -10.0.0.25 4 65206 4632 11031 0 0 0 00:18:33 8514 ARISTA13T2 -10.0.0.29 4 65207 4632 11028 0 0 0 00:18:31 8514 ARISTA15T2 -10.0.0.33 4 65208 4633 11029 0 0 0 00:18:33 8514 ARISTA01T0 -10.0.0.37 4 65210 4632 11028 0 0 0 00:18:32 8514 ARISTA03T0 -10.0.0.39 4 65211 4629 6767 0 0 0 00:18:22 8514 ARISTA04T0 -10.0.0.41 4 65212 4632 11028 0 0 0 00:18:32 8514 ARISTA05T0 -10.0.0.43 4 65213 4629 6767 0 0 0 00:18:23 8514 ARISTA06T0 -10.0.0.45 4 65214 4633 11029 0 0 0 00:18:33 8514 ARISTA07T0 -10.0.0.47 4 65215 4629 6767 0 0 0 00:18:23 8514 ARISTA08T0 -10.0.0.49 4 65216 4633 11029 0 0 0 00:18:35 8514 ARISTA09T0 -10.0.0.51 4 65217 4633 11029 0 0 0 00:18:33 8514 ARISTA10T0 -10.0.0.53 4 65218 4632 11029 0 0 0 00:18:35 8514 ARISTA11T0 -10.0.0.55 4 65219 4632 11029 0 0 0 00:18:33 8514 ARISTA12T0 -10.0.0.57 4 65220 4632 11029 0 0 0 00:18:35 8514 ARISTA13T0 -10.0.0.59 4 65221 4632 11029 0 0 0 00:18:33 8514 ARISTA14T0 +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +10.0.0.1 4 65200 4632 11028 0 0 0 00:18:31 8514 ARISTA01T2 +10.0.0.9 4 65202 4632 11029 0 0 0 00:18:33 8514 ARISTA05T2 +10.0.0.13 4 65203 4632 11028 0 0 0 00:18:33 8514 ARISTA07T2 +10.0.0.17 4 65204 4631 11028 0 0 0 00:18:31 8514 ARISTA09T2 +10.0.0.21 4 65205 4632 11031 0 0 0 00:18:33 8514 ARISTA11T2 +10.0.0.25 4 65206 4632 11031 0 0 0 00:18:33 8514 ARISTA13T2 +10.0.0.29 4 65207 4632 11028 0 0 0 00:18:31 8514 ARISTA15T2 +10.0.0.33 4 65208 4633 11029 0 0 0 00:18:33 8514 ARISTA01T0 +10.0.0.37 4 65210 4632 11028 0 0 0 00:18:32 8514 ARISTA03T0 +10.0.0.39 4 65211 4629 6767 0 0 0 00:18:22 8514 ARISTA04T0 +10.0.0.41 4 65212 4632 11028 0 0 0 00:18:32 8514 ARISTA05T0 +10.0.0.43 4 65213 4629 6767 0 0 0 00:18:23 8514 ARISTA06T0 +10.0.0.45 4 65214 4633 11029 0 0 0 00:18:33 8514 ARISTA07T0 +10.0.0.47 4 65215 4629 6767 0 0 0 00:18:23 8514 ARISTA08T0 +10.0.0.49 4 65216 4633 11029 0 0 0 00:18:35 8514 ARISTA09T0 +10.0.0.51 4 65217 4633 11029 0 0 0 00:18:33 8514 ARISTA10T0 +10.0.0.53 4 65218 4632 11029 0 0 0 00:18:35 8514 ARISTA11T0 +10.0.0.55 4 65219 4632 11029 0 0 0 00:18:33 8514 ARISTA12T0 +10.0.0.57 4 65220 4632 11029 0 0 0 00:18:35 8514 ARISTA13T0 +10.0.0.59 4 65221 4632 11029 0 0 0 00:18:33 8514 ARISTA14T0 Total number of neighbors 20 """ @@ -216,28 +216,28 @@ Peer groups 3, using 192 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -fc00::1a 4 65203 4438 6578 0 0 0 00:08:57 8514 ARISTA07T2 -fc00::2 4 65200 4439 6578 0 0 0 00:08:56 8513 ARISTA01T2 -fc00::2a 4 65205 4439 6578 0 0 0 00:08:57 8514 ARISTA11T2 -fc00::3a 4 65207 4439 6578 0 0 0 00:08:57 8514 ARISTA15T2 -fc00::4a 4 65210 4439 6579 0 0 0 00:08:59 8514 ARISTA03T0 -fc00::4e 4 65211 4440 6579 0 0 0 00:09:00 8514 ARISTA04T0 -fc00::5a 4 65214 4440 6579 0 0 0 00:09:00 8514 ARISTA07T0 -fc00::5e 4 65215 4438 6576 0 0 0 00:08:50 8514 ARISTA08T0 -fc00::6a 4 65218 4441 6580 0 0 0 00:09:01 8514 ARISTA11T0 -fc00::6e 4 65219 4442 6580 0 0 0 00:09:01 8514 ARISTA12T0 -fc00::7a 4 65222 4441 6580 0 0 0 00:09:01 8514 ARISTA15T0 -fc00::12 4 65202 4438 6578 0 0 0 00:08:57 8514 ARISTA05T2 -fc00::22 4 65204 4438 6578 0 0 0 00:08:57 8514 ARISTA09T2 -fc00::32 4 65206 4438 6578 0 0 0 00:08:57 8514 ARISTA13T2 -fc00::42 4 65208 4442 6580 0 0 0 00:09:01 8514 ARISTA01T0 -fc00::52 4 65212 4439 6579 0 0 0 00:08:59 8514 ARISTA05T0 -fc00::56 4 65213 4439 6579 0 0 0 00:08:59 8514 ARISTA06T0 -fc00::62 4 65216 4438 6576 0 0 0 00:08:50 8514 ARISTA09T0 -fc00::66 4 65217 4442 6580 0 0 0 00:09:01 8514 ARISTA10T0 -fc00::72 4 65220 4441 6580 0 0 0 00:09:01 8514 ARISTA13T0 +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +fc00::1a 4 65203 4438 6578 0 0 0 00:08:57 8514 ARISTA07T2 +fc00::2 4 65200 4439 6578 0 0 0 00:08:56 8513 ARISTA01T2 +fc00::2a 4 65205 4439 6578 0 0 0 00:08:57 8514 ARISTA11T2 +fc00::3a 4 65207 4439 6578 0 0 0 00:08:57 8514 ARISTA15T2 +fc00::4a 4 65210 4439 6579 0 0 0 00:08:59 8514 ARISTA03T0 +fc00::4e 4 65211 4440 6579 0 0 0 00:09:00 8514 ARISTA04T0 +fc00::5a 4 65214 4440 6579 0 0 0 00:09:00 8514 ARISTA07T0 +fc00::5e 4 65215 4438 6576 0 0 0 00:08:50 8514 ARISTA08T0 +fc00::6a 4 65218 4441 6580 0 0 0 00:09:01 8514 ARISTA11T0 +fc00::6e 4 65219 4442 6580 0 0 0 00:09:01 8514 ARISTA12T0 +fc00::7a 4 65222 4441 6580 0 0 0 00:09:01 8514 ARISTA15T0 +fc00::12 4 65202 4438 6578 0 0 0 00:08:57 8514 ARISTA05T2 +fc00::22 4 65204 4438 6578 0 0 0 00:08:57 8514 ARISTA09T2 +fc00::32 4 65206 4438 6578 0 0 0 00:08:57 8514 ARISTA13T2 +fc00::42 4 65208 4442 6580 0 0 0 00:09:01 8514 ARISTA01T0 +fc00::52 4 65212 4439 6579 0 0 0 00:08:59 8514 ARISTA05T0 +fc00::56 4 65213 4439 6579 0 0 0 00:08:59 8514 ARISTA06T0 +fc00::62 4 65216 4438 6576 0 0 0 00:08:50 8514 ARISTA09T0 +fc00::66 4 65217 4442 6580 0 0 0 00:09:01 8514 ARISTA10T0 +fc00::72 4 65220 4441 6580 0 0 0 00:09:01 8514 ARISTA13T0 Total number of neighbors 20 """ @@ -252,31 +252,31 @@ Peer groups 3, using 192 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ----- --------- --------- -------- ----- ------ --------- -------------- ------------------ -3.3.3.6 4 65100 0 0 0 0 0 never Connect str2-chassis-lc6-1 -3.3.3.7 4 65100 808 178891 0 0 0 00:17:47 1458 str2-chassis-lc7-1 -10.0.0.1 4 65200 4632 11028 0 0 0 00:18:31 8514 ARISTA01T2 -10.0.0.9 4 65202 4632 11029 0 0 0 00:18:33 8514 ARISTA05T2 -10.0.0.13 4 65203 4632 11028 0 0 0 00:18:33 8514 ARISTA07T2 -10.0.0.17 4 65204 4631 11028 0 0 0 00:18:31 8514 ARISTA09T2 -10.0.0.21 4 65205 4632 11031 0 0 0 00:18:33 8514 ARISTA11T2 -10.0.0.25 4 65206 4632 11031 0 0 0 00:18:33 8514 ARISTA13T2 -10.0.0.29 4 65207 4632 11028 0 0 0 00:18:31 8514 ARISTA15T2 -10.0.0.33 4 65208 4633 11029 0 0 0 00:18:33 8514 ARISTA01T0 -10.0.0.37 4 65210 4632 11028 0 0 0 00:18:32 8514 ARISTA03T0 -10.0.0.39 4 65211 4629 6767 0 0 0 00:18:22 8514 ARISTA04T0 -10.0.0.41 4 65212 4632 11028 0 0 0 00:18:32 8514 ARISTA05T0 -10.0.0.43 4 65213 4629 6767 0 0 0 00:18:23 8514 ARISTA06T0 -10.0.0.45 4 65214 4633 11029 0 0 0 00:18:33 8514 ARISTA07T0 -10.0.0.47 4 65215 4629 6767 0 0 0 00:18:23 8514 ARISTA08T0 -10.0.0.49 4 65216 4633 11029 0 0 0 00:18:35 8514 ARISTA09T0 -10.0.0.51 4 65217 4633 11029 0 0 0 00:18:33 8514 ARISTA10T0 -10.0.0.53 4 65218 4632 11029 0 0 0 00:18:35 8514 ARISTA11T0 -10.0.0.55 4 65219 4632 11029 0 0 0 00:18:33 8514 ARISTA12T0 -10.0.0.57 4 65220 4632 11029 0 0 0 00:18:35 8514 ARISTA13T0 -10.0.0.59 4 65221 4632 11029 0 0 0 00:18:33 8514 ARISTA14T0 -10.0.0.61 4 65222 4633 11029 0 0 0 00:18:33 8514 INT_NEIGH0 +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ----- --------- --------- -------- ----- ------ --------- -------------- ------------------ +3.3.3.6 4 65100 0 0 0 0 0 never Connect str2-chassis-lc6-1 +3.3.3.7 4 65100 808 178891 0 0 0 00:17:47 1458 str2-chassis-lc7-1 +10.0.0.1 4 65200 4632 11028 0 0 0 00:18:31 8514 ARISTA01T2 +10.0.0.9 4 65202 4632 11029 0 0 0 00:18:33 8514 ARISTA05T2 +10.0.0.13 4 65203 4632 11028 0 0 0 00:18:33 8514 ARISTA07T2 +10.0.0.17 4 65204 4631 11028 0 0 0 00:18:31 8514 ARISTA09T2 +10.0.0.21 4 65205 4632 11031 0 0 0 00:18:33 8514 ARISTA11T2 +10.0.0.25 4 65206 4632 11031 0 0 0 00:18:33 8514 ARISTA13T2 +10.0.0.29 4 65207 4632 11028 0 0 0 00:18:31 8514 ARISTA15T2 +10.0.0.33 4 65208 4633 11029 0 0 0 00:18:33 8514 ARISTA01T0 +10.0.0.37 4 65210 4632 11028 0 0 0 00:18:32 8514 ARISTA03T0 +10.0.0.39 4 65211 4629 6767 0 0 0 00:18:22 8514 ARISTA04T0 +10.0.0.41 4 65212 4632 11028 0 0 0 00:18:32 8514 ARISTA05T0 +10.0.0.43 4 65213 4629 6767 0 0 0 00:18:23 8514 ARISTA06T0 +10.0.0.45 4 65214 4633 11029 0 0 0 00:18:33 8514 ARISTA07T0 +10.0.0.47 4 65215 4629 6767 0 0 0 00:18:23 8514 ARISTA08T0 +10.0.0.49 4 65216 4633 11029 0 0 0 00:18:35 8514 ARISTA09T0 +10.0.0.51 4 65217 4633 11029 0 0 0 00:18:33 8514 ARISTA10T0 +10.0.0.53 4 65218 4632 11029 0 0 0 00:18:35 8514 ARISTA11T0 +10.0.0.55 4 65219 4632 11029 0 0 0 00:18:33 8514 ARISTA12T0 +10.0.0.57 4 65220 4632 11029 0 0 0 00:18:35 8514 ARISTA13T0 +10.0.0.59 4 65221 4632 11029 0 0 0 00:18:33 8514 ARISTA14T0 +10.0.0.61 4 65222 4633 11029 0 0 0 00:18:33 8514 INT_NEIGH0 Total number of neighbors 23 """ @@ -291,8 +291,8 @@ Peer groups 0, using 0 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- Total number of neighbors 0 """ @@ -308,9 +308,9 @@ Peer groups 3, using 3 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -10.0.0.1 4 65222 4633 11029 0 0 0 00:18:33 8514 ARISTA01T2 +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +10.0.0.1 4 65222 4633 11029 0 0 0 00:18:33 8514 ARISTA01T2 Total number of neighbors 1 """ @@ -326,14 +326,14 @@ Peer groups 4, using 256 bytes of memory -Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ------------ --- ----- --------- --------- -------- ----- ------ --------- -------------- ---------------------- -3.3.3.1 4 65100 277 9 0 0 0 00:00:14 33798 str2-sonic-lc1-1-ASIC0 -3.3.3.1 4 65100 280 14 0 0 0 00:00:22 33798 str2-sonic-lc1-1-ASIC1 -3.3.3.2 4 65100 277 9 0 0 0 00:00:14 33798 str2-sonic-lc2-1-ASIC0 -3.3.3.2 4 65100 280 14 0 0 0 00:00:22 33798 str2-sonic-lc3-1-ASIC0 -3.3.3.6 4 65100 14 14 0 0 0 00:00:23 4 str2-sonic-lc3-1-ASIC1 -3.3.3.8 4 65100 12 10 0 0 0 00:00:15 4 str2-sonic-lc1-1-ASIC1 +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +---------- --- ----- --------- --------- -------- ----- ------ --------- -------------- ---------------------- +3.3.3.1 4 65100 277 9 0 0 0 00:00:14 33798 str2-sonic-lc1-1-ASIC0 +3.3.3.1 4 65100 280 14 0 0 0 00:00:22 33798 str2-sonic-lc1-1-ASIC1 +3.3.3.2 4 65100 277 9 0 0 0 00:00:14 33798 str2-sonic-lc2-1-ASIC0 +3.3.3.2 4 65100 280 14 0 0 0 00:00:22 33798 str2-sonic-lc3-1-ASIC0 +3.3.3.6 4 65100 14 14 0 0 0 00:00:23 4 str2-sonic-lc3-1-ASIC1 +3.3.3.8 4 65100 12 10 0 0 0 00:00:15 4 str2-sonic-lc1-1-ASIC1 Total number of neighbors 6 """ diff --git a/utilities_common/bgp_util.py b/utilities_common/bgp_util.py index 668ef344d5..df2e4963b6 100644 --- a/utilities_common/bgp_util.py +++ b/utilities_common/bgp_util.py @@ -299,7 +299,7 @@ def display_bgp_summary(bgp_summary, af): af: IPV4 or IPV6 ''' - headers = ["Neighbhor", "V", "AS", "MsgRcvd", "MsgSent", "TblVer", + headers = ["Neighbor", "V", "AS", "MsgRcvd", "MsgSent", "TblVer", "InQ", "OutQ", "Up/Down", "State/PfxRcd", "NeighborName"] try: From 0e6a55ef5eac306ef61d6f0241625a6baee42ab8 Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Mon, 24 Jun 2024 09:48:14 +0300 Subject: [PATCH 03/67] [fast-reboot] Backup database after syncd/swss stopped (#3342) - What I did Backup DB after syncd and swss are stopped. I observed an issue with fast-reboot that in a rare circumstances a queued FDB event might be written to ASIC_DB by a thread inside syncd after a call to FLUSHDB ASIC_DB was made. That left ASIC_DB only with one record about that FDB entry and caused syncd to crash at start: Mar 15 13:28:42.765108 sonic NOTICE syncd#SAI: :- Syncd: syncd started Mar 15 13:28:42.765268 sonic NOTICE syncd#SAI: :- onSyncdStart: performing hard reinit since COLD start was performed Mar 15 13:28:42.765451 sonic NOTICE syncd#SAI: :- readAsicState: loaded 1 switches Mar 15 13:28:42.765465 sonic NOTICE syncd#SAI: :- readAsicState: switch VID: oid:0x21000000000000 Mar 15 13:28:42.765465 sonic NOTICE syncd#SAI: :- readAsicState: read asic state took 0.000205 sec Mar 15 13:28:42.766364 sonic NOTICE syncd#SAI: :- onSyncdStart: on syncd start took 0.001097 sec Mar 15 13:28:42.766376 sonic ERR syncd#SAI: :- run: Runtime error during syncd init: map::at Mar 15 13:28:42.766376 sonic NOTICE syncd#SAI: :- sendShutdownRequest: sending switch_shutdown_request notification to OA for switch: oid:0x0 Mar 15 13:28:42.766518 sonic NOTICE syncd#SAI: :- sendShutdownRequestAfterException: notification send successfully - How I did it Backup DB after syncd/swss have stopped. - How to verify it Run fast-reboot. Signed-off-by: Stepan Blyschak --- scripts/fast-reboot | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 53dcffd7d2..2eeca11112 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -244,6 +244,19 @@ function wait_for_pre_shutdown_complete_or_fail() function backup_database() { debug "Backing up database ..." + + if [[ "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "fast-reboot" ]]; then + # Advanced reboot: dump state to host disk + sonic-db-cli ASIC_DB FLUSHDB > /dev/null + sonic-db-cli COUNTERS_DB FLUSHDB > /dev/null + sonic-db-cli FLEX_COUNTER_DB FLUSHDB > /dev/null + fi + + if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then + # Flush RESTAP_DB in fast-reboot to avoid stale status + sonic-db-cli RESTAPI_DB FLUSHDB > /dev/null + fi + # Dump redis content to a file 'dump.rdb' in warmboot directory mkdir -p $WARM_DIR # Delete keys in stateDB except FDB_TABLE|*, MIRROR_SESSION_TABLE|*, WARM_RESTART_ENABLE_TABLE|*, FG_ROUTE_TABLE|* @@ -806,23 +819,11 @@ for service in ${SERVICES_TO_STOP}; do wait_for_pre_shutdown_complete_or_fail fi - if [[ "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "fast-reboot" ]]; then - # Advanced reboot: dump state to host disk - sonic-db-cli ASIC_DB FLUSHDB > /dev/null - sonic-db-cli COUNTERS_DB FLUSHDB > /dev/null - sonic-db-cli FLEX_COUNTER_DB FLUSHDB > /dev/null - fi - - if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then - # Flush RESTAP_DB in fast-reboot to avoid stale status - sonic-db-cli RESTAPI_DB FLUSHDB > /dev/null - fi - - backup_database - fi done +backup_database + # Stop the docker container engine. Otherwise we will have a broken docker storage systemctl stop docker.service || debug "Ignore stopping docker service error $?" From 667a1509c21aa42c268ecd6bff3cdb9b8d7b66c8 Mon Sep 17 00:00:00 2001 From: Nazarii Hnydyn Date: Thu, 27 Jun 2024 19:37:59 +0300 Subject: [PATCH 04/67] [pbh]: Fix show PBH counters when cache is partial (#3356) * [pbh]: Fix show PBH counters when cache is partial. Signed-off-by: Nazarii Hnydyn --- show/plugins/pbh.py | 2 +- tests/pbh_input/assert_show_output.py | 8 +++++++ tests/pbh_input/counters_db_partial.json | 11 ++++++++++ tests/pbh_test.py | 28 ++++++++++++++++++++++++ 4 files changed, 48 insertions(+), 1 deletion(-) create mode 100644 tests/pbh_input/counters_db_partial.json diff --git a/show/plugins/pbh.py b/show/plugins/pbh.py index 407c596163..f47b43fbdc 100644 --- a/show/plugins/pbh.py +++ b/show/plugins/pbh.py @@ -395,7 +395,7 @@ def get_counter_value(pbh_counters, saved_pbh_counters, key, type): if not pbh_counters[key]: return '0' - if key in saved_pbh_counters: + if key in saved_pbh_counters and saved_pbh_counters[key]: new_value = int(pbh_counters[key][type]) - int(saved_pbh_counters[key][type]) if new_value >= 0: return str(new_value) diff --git a/tests/pbh_input/assert_show_output.py b/tests/pbh_input/assert_show_output.py index 7a701ba4bc..5538f3aada 100644 --- a/tests/pbh_input/assert_show_output.py +++ b/tests/pbh_input/assert_show_output.py @@ -78,6 +78,14 @@ """ +show_pbh_statistics_partial = """\ +TABLE RULE RX PACKETS COUNT RX BYTES COUNT +---------- ------ ------------------ ---------------- +pbh_table1 nvgre 100 200 +pbh_table2 vxlan 0 0 +""" + + show_pbh_statistics_updated="""\ TABLE RULE RX PACKETS COUNT RX BYTES COUNT ---------- ------ ------------------ ---------------- diff --git a/tests/pbh_input/counters_db_partial.json b/tests/pbh_input/counters_db_partial.json new file mode 100644 index 0000000000..aa140188c8 --- /dev/null +++ b/tests/pbh_input/counters_db_partial.json @@ -0,0 +1,11 @@ +{ + "COUNTERS:oid:0x9000000000000": { }, + "COUNTERS:oid:0x9000000000001": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "300", + "SAI_ACL_COUNTER_ATTR_BYTES": "400" + }, + "ACL_COUNTER_RULE_MAP": { + "pbh_table1:nvgre": "oid:0x9000000000000", + "pbh_table2:vxlan": "oid:0x9000000000001" + } +} diff --git a/tests/pbh_test.py b/tests/pbh_test.py index 7dddfea9ca..0d68f458ee 100644 --- a/tests/pbh_test.py +++ b/tests/pbh_test.py @@ -946,6 +946,34 @@ def test_show_pbh_statistics_after_clear(self): assert result.exit_code == SUCCESS assert result.output == assert_show_output.show_pbh_statistics_zero + def test_show_pbh_statistics_after_clear_and_counters_partial(self): + dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db_partial') + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') + + self.remove_pbh_counters_file() + + db = Db() + runner = CliRunner() + + result = runner.invoke( + clear.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db') + + result = runner.invoke( + show.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == assert_show_output.show_pbh_statistics_partial def test_show_pbh_statistics_after_clear_and_counters_updated(self): dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db') From 3a8f0be0b0e5c25e843510cd1f53a91475dbebd2 Mon Sep 17 00:00:00 2001 From: Vivek Date: Thu, 27 Jun 2024 23:17:26 -0700 Subject: [PATCH 05/67] [Mellanox] Add support for ACS-4280 (#3368) - What I did Add support for ACS-4280 SKU in GCU - How I did it - How to verify it Verified GCU tests in sonic-mgmt --- .../gcu_field_operation_validators.conf.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index c49fe08f37..77b504b313 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -20,8 +20,8 @@ "spc1": [ "ACS-MSN2700", "ACS-MSN2740", "ACS-MSN2100", "ACS-MSN2410", "ACS-MSN2010", "Mellanox-SN2700", "Mellanox-SN2700-C28D8", "Mellanox-SN2700-D40C8S8", "Mellanox-SN2700-D44C10", "Mellanox-SN2700-D48C8", "ACS-MSN2700-A1", "Mellanox-SN2700-A1", "Mellanox-SN2700-A1-C28D8", "Mellanox-SN2700-A1-D40C8S8", "Mellanox-SN2700-A1-D44C10", "Mellanox-SN2700-A1-D48C8" ], "spc2": [ "ACS-MSN3800", "Mellanox-SN3800-D112C8", "ACS-MSN3420", "ACS-MSN3700C", "ACS-MSN3700", "Mellanox-SN3800-C64", "Mellanox-SN3800-D100C12S2", "Mellanox-SN3800-D24C52", "Mellanox-SN3800-D28C49S1", "Mellanox-SN3800-D28C50" ], - "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40", - "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32"], + "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "ACS-SN4280", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40", + "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32", "Mellanox-SN4280-O28"], "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "ACS-SN5400" ] }, "broadcom_asics": { From 06965df2c431ec63e0706499f90ea4bf0a5a1b4a Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Mon, 1 Jul 2024 09:50:03 +0300 Subject: [PATCH 06/67] Remove suppress-fib-pending CLI and make route_check.py check suppress-fib in BGP configuration (#3331) What I did Revert suppress FIB pending feature Why I did it Some unresolved FRR issues in current version How I verified it Build and run [route_check] check if suppress fib is enabled in bgp Signed-off-by: Stepan Blyschak --- config/main.py | 12 ---------- doc/Command-Reference.md | 38 ------------------------------ scripts/route_check.py | 32 +++++++++++++++++-------- show/main.py | 19 ++++----------- tests/route_check_test.py | 7 ++++-- tests/suppress_pending_fib_test.py | 34 -------------------------- 6 files changed, 31 insertions(+), 111 deletions(-) delete mode 100644 tests/suppress_pending_fib_test.py diff --git a/config/main.py b/config/main.py index 89572bd788..167c9c45bd 100644 --- a/config/main.py +++ b/config/main.py @@ -2336,18 +2336,6 @@ def synchronous_mode(sync_mode): config reload -y \n Option 2. systemctl restart swss""" % sync_mode) -# -# 'suppress-fib-pending' command ('config suppress-fib-pending ...') -# -@config.command('suppress-fib-pending') -@click.argument('state', metavar='', required=True, type=click.Choice(['enabled', 'disabled'])) -@clicommon.pass_db -def suppress_pending_fib(db, state): - ''' Enable or disable pending FIB suppression. Once enabled, BGP will not advertise routes that are not yet installed in the hardware ''' - - config_db = db.cfgdb - config_db.mod_entry('DEVICE_METADATA' , 'localhost', {"suppress-fib-pending" : state}) - # # 'yang_config_validation' command ('config yang_config_validation ...') # diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 757438dad0..78474d5948 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -2610,26 +2610,6 @@ This command displays the routing policy that takes precedence over the other ro Exit routemap ``` -**show suppress-fib-pending** - -This command is used to show the status of suppress pending FIB feature. -When enabled, BGP will not advertise routes which aren't yet offloaded. - -- Usage: - ``` - show suppress-fib-pending - ``` - -- Examples: - ``` - admin@sonic:~$ show suppress-fib-pending - Enabled - ``` - ``` - admin@sonic:~$ show suppress-fib-pending - Disabled - ``` - **show bgp device-global** This command displays BGP device global configuration. @@ -2742,24 +2722,6 @@ This command is used to remove particular IPv4 or IPv6 BGP neighbor configuratio admin@sonic:~$ sudo config bgp remove neighbor SONIC02SPINE ``` -**config suppress-fib-pending** - -This command is used to enable or disable announcements of routes not yet installed in the HW. -Once enabled, BGP will not advertise routes which aren't yet offloaded. - -- Usage: - ``` - config suppress-fib-pending - ``` - -- Examples: - ``` - admin@sonic:~$ sudo config suppress-fib-pending enabled - ``` - ``` - admin@sonic:~$ sudo config suppress-fib-pending disabled - ``` - **config bgp device-global tsa/w-ecmp** This command is used to manage BGP device global configuration. diff --git a/scripts/route_check.py b/scripts/route_check.py index ee417dc49c..2fbe041547 100755 --- a/scripts/route_check.py +++ b/scripts/route_check.py @@ -328,6 +328,16 @@ def get_asicdb_routes(namespace): return (selector, subs, sorted(rt)) +def is_bgp_suppress_fib_pending_enabled(namespace): + """ + Retruns True if FIB suppression is enabled in BGP config, False otherwise + """ + show_run_cmd = ['show', 'runningconfiguration', 'bgp', '-n', namespace] + + output = subprocess.check_output(show_run_cmd, text=True) + return 'bgp suppress-fib-pending' in output + + def is_suppress_fib_pending_enabled(namespace): """ Returns True if FIB suppression is enabled, False otherwise @@ -781,18 +791,20 @@ def check_routes(namespace): results[namespace] = {} results[namespace]["Unaccounted_ROUTE_ENTRY_TABLE_entries"] = rt_asic_miss - rt_frr_miss = check_frr_pending_routes(namespace) + if is_bgp_suppress_fib_pending_enabled(namespace): + rt_frr_miss = check_frr_pending_routes(namespace) - if rt_frr_miss: - if namespace not in results: - results[namespace] = {} - results[namespace]["missed_FRR_routes"] = rt_frr_miss + if rt_frr_miss: + if namespace not in results: + results[namespace] = {} + results[namespace]["missed_FRR_routes"] = rt_frr_miss - if results: - if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: - print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR{} but all routes in APPL_DB and ASIC_DB are in sync".format(namespace)) - if is_suppress_fib_pending_enabled(namespace): - mitigate_installed_not_offloaded_frr_routes(namespace, rt_frr_miss, rt_appl) + if results: + if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: + print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR{} but all " + "routes in APPL_DB and ASIC_DB are in sync".format(namespace)) + if is_suppress_fib_pending_enabled(namespace): + mitigate_installed_not_offloaded_frr_routes(namespace, rt_frr_miss, rt_appl) if results: print_message(syslog.LOG_WARNING, "Failure results: {", json.dumps(results, indent=4), "}") diff --git a/show/main.py b/show/main.py index c4d99b8eab..a3a72c70e7 100755 --- a/show/main.py +++ b/show/main.py @@ -165,7 +165,7 @@ def get_config_json_by_namespace(namespace): iface_alias_converter = lazy_object_proxy.Proxy(lambda: clicommon.InterfaceAliasConverter()) # -# Display all storm-control data +# Display all storm-control data # def display_storm_all(): """ Show storm-control """ @@ -465,7 +465,7 @@ def is_mgmt_vrf_enabled(ctx): return False # -# 'storm-control' group +# 'storm-control' group # "show storm-control [interface ]" # @cli.group('storm-control', invoke_without_command=True) @@ -2111,7 +2111,7 @@ def summary(db): key_values = key.split('|') values = db.db.get_all(db.db.STATE_DB, key) if "local_discriminator" not in values.keys(): - values["local_discriminator"] = "NA" + values["local_discriminator"] = "NA" bfd_body.append([key_values[3], key_values[2], key_values[1], values["state"], values["type"], values["local_addr"], values["tx_interval"], values["rx_interval"], values["multiplier"], values["multihop"], values["local_discriminator"]]) @@ -2142,24 +2142,13 @@ def peer(db, peer_ip): key_values = key.split(delimiter) values = db.db.get_all(db.db.STATE_DB, key) if "local_discriminator" not in values.keys(): - values["local_discriminator"] = "NA" + values["local_discriminator"] = "NA" bfd_body.append([key_values[3], key_values[2], key_values[1], values.get("state"), values.get("type"), values.get("local_addr"), values.get("tx_interval"), values.get("rx_interval"), values.get("multiplier"), values.get("multihop"), values.get("local_discriminator")]) click.echo(tabulate(bfd_body, bfd_headers)) -# 'suppress-fib-pending' subcommand ("show suppress-fib-pending") -@cli.command('suppress-fib-pending') -@clicommon.pass_db -def suppress_pending_fib(db): - """ Show the status of suppress pending FIB feature """ - - field_values = db.cfgdb.get_entry('DEVICE_METADATA', 'localhost') - state = field_values.get('suppress-fib-pending', 'disabled').title() - click.echo(state) - - # asic-sdk-health-event subcommand ("show asic-sdk-health-event") @cli.group(cls=clicommon.AliasedGroup) def asic_sdk_health_event(): diff --git a/tests/route_check_test.py b/tests/route_check_test.py index 1f92b3d19a..26c632d742 100644 --- a/tests/route_check_test.py +++ b/tests/route_check_test.py @@ -252,8 +252,11 @@ def run_test(self, ct_data): def mock_check_output(self, ct_data, *args, **kwargs): ns = self.extract_namespace_from_args(args[0]) - routes = ct_data.get(FRR_ROUTES, {}).get(ns, {}) - return json.dumps(routes) + if 'show runningconfiguration bgp' in ' '.join(args[0]): + return 'bgp suppress-fib-pending' + else: + routes = ct_data.get(FRR_ROUTES, {}).get(ns, {}) + return json.dumps(routes) def assert_results(self, ct_data, ret, res): expect_ret = ct_data.get(RET, 0) diff --git a/tests/suppress_pending_fib_test.py b/tests/suppress_pending_fib_test.py deleted file mode 100644 index 04064d306e..0000000000 --- a/tests/suppress_pending_fib_test.py +++ /dev/null @@ -1,34 +0,0 @@ -from click.testing import CliRunner - -import config.main as config -import show.main as show -from utilities_common.db import Db - - -class TestSuppressFibPending: - def test_synchronous_mode(self): - runner = CliRunner() - - db = Db() - - result = runner.invoke(config.config.commands['suppress-fib-pending'], ['enabled'], obj=db) - print(result.output) - assert result.exit_code == 0 - assert db.cfgdb.get_entry('DEVICE_METADATA' , 'localhost')['suppress-fib-pending'] == 'enabled' - - result = runner.invoke(show.cli.commands['suppress-fib-pending'], obj=db) - assert result.exit_code == 0 - assert result.output == 'Enabled\n' - - result = runner.invoke(config.config.commands['suppress-fib-pending'], ['disabled'], obj=db) - print(result.output) - assert result.exit_code == 0 - assert db.cfgdb.get_entry('DEVICE_METADATA' , 'localhost')['suppress-fib-pending'] == 'disabled' - - result = runner.invoke(show.cli.commands['suppress-fib-pending'], obj=db) - assert result.exit_code == 0 - assert result.output == 'Disabled\n' - - result = runner.invoke(config.config.commands['suppress-fib-pending'], ['invalid-input'], obj=db) - print(result.output) - assert result.exit_code != 0 From 414cf3bbce13d45c59bde3a74dd341a37b4e0d99 Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Tue, 2 Jul 2024 02:14:10 +0530 Subject: [PATCH 07/67] [DPB]Fix return code in case of failure (#3389) * [DPB]Fix return code in case of failure * Updating UT --- config/main.py | 2 +- tests/config_dpb_test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/config/main.py b/config/main.py index 167c9c45bd..83637c1421 100644 --- a/config/main.py +++ b/config/main.py @@ -4697,7 +4697,7 @@ def breakout(ctx, interface_name, mode, verbose, force_remove_dependencies, load except Exception as e: click.secho("Failed to break out Port. Error: {}".format(str(e)), fg='magenta') - sys.exit(0) + sys.exit(1) def _get_all_mgmtinterface_keys(): """Returns list of strings containing mgmt interface keys diff --git a/tests/config_dpb_test.py b/tests/config_dpb_test.py index 5dcf814911..0a3d99cbcd 100644 --- a/tests/config_dpb_test.py +++ b/tests/config_dpb_test.py @@ -350,7 +350,7 @@ def test_config_breakout_extra_table_warning(self, breakout_cfg_file, sonic_db): commands["breakout"], ['{}'.format(interface), '{}'.format(newMode), '-v', '-y'], obj=obj) print(result.exit_code, result.output) - assert result.exit_code == 0 + assert result.exit_code == 1 assert 'Below Config can not be verified' in result.output assert 'UNKNOWN_TABLE' in result.output assert 'Do you wish to Continue?' in result.output From fb2e5cda90ced88249e06b18d8c5717a89ff62b9 Mon Sep 17 00:00:00 2001 From: Hua Liu <58683130+liuh-80@users.noreply.github.com> Date: Thu, 4 Jul 2024 09:36:55 +0800 Subject: [PATCH 08/67] Remove secret from golden_config_db.json and old_config files (#3390) --- scripts/generate_dump | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/scripts/generate_dump b/scripts/generate_dump index 06d163a45e..b163366bb0 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -2155,7 +2155,7 @@ finalize() { ############################################################################### -# Remove secret from pipeline inout and output result to pipeline. +# Remove secret from pipeline input and output result to pipeline. # Globals: # None # Arguments: @@ -2168,6 +2168,18 @@ remove_secret_from_config_db_dump() { sed -E 's/\"passkey\"\s*:\s*\"([^\"]*)\"/\"passkey\":\"****\"/g; /SNMP_COMMUNITY/,/\s{2,4}\},/d' } + +############################################################################### +# Remove secret from file. +############################################################################### +remove_secret_from_config_db_dump_file() { + local dumpfile=$1 + if [ -e ${dumpfile} ]; then + cat $dumpfile | remove_secret_from_config_db_dump > $dumpfile.temp + mv $dumpfile.temp $dumpfile + fi +} + ############################################################################### # Remove secret from dump files. # Globals: @@ -2201,8 +2213,24 @@ remove_secret_from_etc_files() { sed -i -E 's/(\s*snmp_\S*community\s*:\s*)(\S*)/\1****/g' $dumppath/etc/sonic/snmp.yml # Remove secret from /etc/sonic/config_db.json - cat $dumppath/etc/sonic/config_db.json | remove_secret_from_config_db_dump > $dumppath/etc/sonic/config_db.json.temp - mv $dumppath/etc/sonic/config_db.json.temp $dumppath/etc/sonic/config_db.json + remove_secret_from_config_db_dump_file $dumppath/etc/sonic/config_db.json + + # Remove secret from /etc/sonic/golden_config_db.json + remove_secret_from_config_db_dump_file $dumppath/etc/sonic/golden_config_db.json + + # Remove secret from /etc/sonic/old_config/ + + # Remove snmp community string from old_config/snmp.yml + local oldsnmp=${dumppath}/etc/sonic/old_config/snmp.yml + if [ -e ${oldsnmp} ]; then + sed -i -E 's/(\s*snmp_\S*community\s*:\s*)(\S*)/\1****/g' $oldsnmp + fi + + # Remove secret from /etc/sonic/config_db.json + remove_secret_from_config_db_dump_file ${dumppath}/etc/sonic/old_config/config_db.json + + # Remove secret from /etc/sonic/golden_config_db.json + remove_secret_from_config_db_dump_file ${dumppath}/etc/sonic/old_config/golden_config_db.json } ############################################################################### From 789ef634022f39ee2126fcf541eaf99edfd806b7 Mon Sep 17 00:00:00 2001 From: Xincun Li <147451452+xincunli-sonic@users.noreply.github.com> Date: Tue, 9 Jul 2024 13:03:04 -0700 Subject: [PATCH 09/67] Add Parallel option for apply-patch (#3373) * Add Parallel option for apply-patch * fix format * fix format * Add UT to check if parallel option ran as expected. * fix format. * Remove Dry Run. * add parallel run checker * Modify to number of asics * Modify UT. --- config/main.py | 47 +++++++-- tests/config_test.py | 244 +++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 276 insertions(+), 15 deletions(-) diff --git a/config/main.py b/config/main.py index 83637c1421..709c96402a 100644 --- a/config/main.py +++ b/config/main.py @@ -1,6 +1,8 @@ #!/usr/sbin/env python +import threading import click +import concurrent.futures import datetime import ipaddress import json @@ -1212,6 +1214,11 @@ def multiasic_save_to_singlefile(db, filename): with open(filename, 'w') as file: json.dump(all_current_config, file, indent=4) + +def apply_patch_wrapper(args): + return apply_patch_for_scope(*args) + + # Function to apply patch for a single ASIC. def apply_patch_for_scope(scope_changes, results, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path): scope, changes = scope_changes @@ -1220,16 +1227,19 @@ def apply_patch_for_scope(scope_changes, results, config_format, verbose, dry_ru scope = multi_asic.DEFAULT_NAMESPACE scope_for_log = scope if scope else HOST_NAMESPACE + thread_id = threading.get_ident() + log.log_notice(f"apply_patch_for_scope started for {scope_for_log} by {changes} in thread:{thread_id}") + try: # Call apply_patch with the ASIC-specific changes and predefined parameters GenericUpdater(scope=scope).apply_patch(jsonpatch.JsonPatch(changes), - config_format, - verbose, - dry_run, - ignore_non_yang_tables, - ignore_path) + config_format, + verbose, + dry_run, + ignore_non_yang_tables, + ignore_path) results[scope_for_log] = {"success": True, "message": "Success"} - log.log_notice(f"'apply-patch' executed successfully for {scope_for_log} by {changes}") + log.log_notice(f"'apply-patch' executed successfully for {scope_for_log} by {changes} in thread:{thread_id}") except Exception as e: results[scope_for_log] = {"success": False, "message": str(e)} log.log_error(f"'apply-patch' executed failed for {scope_for_log} by {changes} due to {str(e)}") @@ -1549,11 +1559,12 @@ def print_dry_run_message(dry_run): help='format of config of the patch is either ConfigDb(ABNF) or SonicYang', show_default=True) @click.option('-d', '--dry-run', is_flag=True, default=False, help='test out the command without affecting config state') +@click.option('-p', '--parallel', is_flag=True, default=False, help='applying the change to all ASICs parallelly') @click.option('-n', '--ignore-non-yang-tables', is_flag=True, default=False, help='ignore validation for tables without YANG models', hidden=True) @click.option('-i', '--ignore-path', multiple=True, help='ignore validation for config specified by given path which is a JsonPointer', hidden=True) @click.option('-v', '--verbose', is_flag=True, default=False, help='print additional details of what the operation is doing') @click.pass_context -def apply_patch(ctx, patch_file_path, format, dry_run, ignore_non_yang_tables, ignore_path, verbose): +def apply_patch(ctx, patch_file_path, format, dry_run, parallel, ignore_non_yang_tables, ignore_path, verbose): """Apply given patch of updates to Config. A patch is a JsonPatch which follows rfc6902. This command can be used do partial updates to the config with minimum disruption to running processes. It allows addition as well as deletion of configs. The patch file represents a diff of ConfigDb(ABNF) @@ -1599,8 +1610,26 @@ def apply_patch(ctx, patch_file_path, format, dry_run, ignore_non_yang_tables, i changes_by_scope[asic] = [] # Apply changes for each scope - for scope_changes in changes_by_scope.items(): - apply_patch_for_scope(scope_changes, results, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path) + if parallel: + with concurrent.futures.ThreadPoolExecutor() as executor: + # Prepare the argument tuples + arguments = [(scope_changes, results, config_format, + verbose, dry_run, ignore_non_yang_tables, ignore_path) + for scope_changes in changes_by_scope.items()] + + # Submit all tasks and wait for them to complete + futures = [executor.submit(apply_patch_wrapper, args) for args in arguments] + + # Wait for all tasks to complete + concurrent.futures.wait(futures) + else: + for scope_changes in changes_by_scope.items(): + apply_patch_for_scope(scope_changes, + results, + config_format, + verbose, dry_run, + ignore_non_yang_tables, + ignore_path) # Check if any updates failed failures = [scope for scope, result in results.items() if not result['success']] diff --git a/tests/config_test.py b/tests/config_test.py index db62bf3249..748d434fc2 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -3229,6 +3229,199 @@ def test_apply_patch_multiasic(self): @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch_dryrun_multiasic(self): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--dry-run", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + @patch('config.main.concurrent.futures.wait', autospec=True) + def test_apply_patch_dryrun_parallel_multiasic(self, MockThreadPoolWait): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--dry-run", + "--parallel", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Assertions to check if ThreadPoolExecutor was used correctly + MockThreadPoolWait.assert_called_once() + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + @patch('config.main.concurrent.futures.wait', autospec=True) + def test_apply_patch_check_running_in_parallel_multiasic(self, MockThreadPoolWait): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--parallel", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Assertions to check if ThreadPoolExecutor was used correctly + MockThreadPoolWait.assert_called_once() + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + @patch('config.main.apply_patch_wrapper') + def test_apply_patch_check_apply_call_parallel_multiasic(self, mock_apply_patch): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--parallel", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Assertions to check if ThreadPoolExecutor was used correctly + self.assertEqual(mock_apply_patch.call_count, + multi_asic.get_num_asics() + 1, + "apply_patch_wrapper function should be called number of ASICs plus host times") + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + @patch('config.main.concurrent.futures.wait', autospec=True) + def test_apply_patch_check_running_in_not_parallel_multiasic(self, MockThreadPoolWait): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Assertions to check if ThreadPoolExecutor was used correctly + MockThreadPoolWait.assert_not_called() + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + def test_apply_patch_parallel_with_error_multiasic(self): # Mock open to simulate file reading with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: # Mock GenericUpdater to avoid actual patch application @@ -3243,12 +3436,13 @@ def test_apply_patch_dryrun_multiasic(self): result = self.runner.invoke(config.config.commands["apply-patch"], [self.patch_file_path, "--format", ConfigFormat.SONICYANG.name, - "--dry-run", - "--ignore-non-yang-tables", - "--ignore-path", "/ANY_TABLE", - "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", - "--ignore-path", "", - "--verbose"], + "--dry-run", + "--parallel", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], catch_exceptions=False) print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) @@ -3326,6 +3520,44 @@ def test_apply_patch_validate_patch_with_badpath_multiasic(self, mock_subprocess # Verify mocked_open was called as expected mocked_open.assert_called_with(self.patch_file_path, 'r') + @patch('config.main.subprocess.Popen') + @patch('config.main.SonicYangCfgDbGenerator.validate_config_db_json', mock.Mock(return_value=True)) + def test_apply_patch_parallel_badpath_multiasic(self, mock_subprocess_popen): + mock_instance = MagicMock() + mock_instance.communicate.return_value = (json.dumps(self.all_config), 0) + mock_subprocess_popen.return_value = mock_instance + + bad_patch = copy.deepcopy(self.patch_content) + bad_patch.append({ + "value": { + "policy_desc": "New ACL Table", + "ports": ["Ethernet3", "Ethernet4"], + "stage": "ingress", + "type": "L3" + } + }) + + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(bad_patch)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--parallel"], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertNotEqual(result.exit_code, 0, "Command should failed.") + self.assertIn("Failed to apply patch", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + @patch('config.main.subprocess.Popen') @patch('config.main.SonicYangCfgDbGenerator.validate_config_db_json', mock.Mock(return_value=True)) def test_apply_patch_validate_patch_with_wrong_fetch_config(self, mock_subprocess_popen): From 1f944447434033bd4262d1f961b7ec745e7d1f69 Mon Sep 17 00:00:00 2001 From: bktsim <144830673+bktsim-arista@users.noreply.github.com> Date: Wed, 10 Jul 2024 16:22:43 -0700 Subject: [PATCH 10/67] Fix multi-asic behaviour for pg-drop (#3058) * Fixes multi-asic behaviour for pg-drop script. show priority-group drop is not behaving correctly on multi-asic devices, as the namespace option '-n' is not available and correct namespaces were not traversed to retrieve drop counters. This change fixes the multi-asic behaviour of this command. * add additional test and simplify branching Co-authored-by: Kenneth Cheung --- scripts/pg-drop | 87 +++++++++++++------ show/main.py | 5 +- tests/mock_tables/asic1/counters_db.json | 102 +++++++++++++++++++++++ tests/multi_asic_pgdropstat_test.py | 95 +++++++++++++++++++++ 4 files changed, 261 insertions(+), 28 deletions(-) create mode 100644 tests/multi_asic_pgdropstat_test.py diff --git a/scripts/pg-drop b/scripts/pg-drop index 7741593081..9078d28ad6 100755 --- a/scripts/pg-drop +++ b/scripts/pg-drop @@ -5,6 +5,7 @@ # pg-drop is a tool for show/clear ingress pg dropped packet stats. # ##################################################################### +from importlib import reload import json import argparse import os @@ -13,6 +14,8 @@ from collections import OrderedDict from natsort import natsorted from tabulate import tabulate +from utilities_common.general import load_db_config +from sonic_py_common import multi_asic # mock the redis for unit test purposes # try: @@ -22,7 +25,9 @@ try: sys.path.insert(0, modules_path) sys.path.insert(0, tests_path) import mock_tables.dbconnector - + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import mock_tables.mock_multi_asic + mock_tables.dbconnector.load_namespace_config() except KeyError: pass @@ -43,13 +48,11 @@ def get_dropstat_dir(): class PgDropStat(object): - def __init__(self): - self.counters_db = SonicV2Connector(host='127.0.0.1') - self.counters_db.connect(self.counters_db.COUNTERS_DB) - - self.configdb = ConfigDBConnector() + def __init__(self, namespace): + self.namespace = namespace + self.ns_list = multi_asic.get_namespace_list(namespace) + self.configdb = ConfigDBConnector(namespace=namespace) self.configdb.connect() - dropstat_dir = get_dropstat_dir() self.port_drop_stats_file = os.path.join(dropstat_dir, 'pg_drop_stats') @@ -57,14 +60,14 @@ class PgDropStat(object): """ Get port ID using object ID """ - port_id = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_PG_PORT_MAP, oid) + port_id = self.get_counters_mapdata(COUNTERS_PG_PORT_MAP, oid) if not port_id: print("Port is not available for oid '{}'".format(oid)) sys.exit(1) return port_id # Get all ports - self.counter_port_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) + self.counter_port_name_map = self.get_counters_mapall(COUNTERS_PORT_NAME_MAP) if not self.counter_port_name_map: print("COUNTERS_PORT_NAME_MAP is empty!") sys.exit(1) @@ -77,7 +80,7 @@ class PgDropStat(object): self.port_name_map[self.counter_port_name_map[port]] = port # Get PGs for each port - counter_pg_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_PG_NAME_MAP) + counter_pg_name_map = self.get_counters_mapall(COUNTERS_PG_NAME_MAP) if not counter_pg_name_map: print("COUNTERS_PG_NAME_MAP is empty!") sys.exit(1) @@ -94,13 +97,32 @@ class PgDropStat(object): "header_prefix": "PG"}, } + def get_counters_mapdata(self, tablemap, index): + for ns in self.ns_list: + counters_db = SonicV2Connector(namespace=ns) + counters_db.connect(counters_db.COUNTERS_DB) + data = counters_db.get(counters_db.COUNTERS_DB, tablemap, index) + if data: + return data + return None + + def get_counters_mapall(self, tablemap): + mapdata = {} + for ns in self.ns_list: + counters_db = SonicV2Connector(namespace=ns) + counters_db.connect(counters_db.COUNTERS_DB) + map_result = counters_db.get_all(counters_db.COUNTERS_DB, tablemap) + if map_result: + mapdata.update(map_result) + return mapdata + def get_pg_index(self, oid): """ return PG index (0-7) oid - object ID for entry in redis """ - pg_index = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_PG_INDEX_MAP, oid) + pg_index = self.get_counters_mapdata(COUNTERS_PG_INDEX_MAP, oid) if not pg_index: print("Priority group index is not available for oid '{}'".format(oid)) sys.exit(1) @@ -154,7 +176,7 @@ class PgDropStat(object): old_collected_data = port_drop_ckpt.get(name,{})[full_table_id] if len(port_drop_ckpt) > 0 else 0 idx = int(idx_func(obj_id)) pos = self.header_idx_to_pos[idx] - counter_data = self.counters_db.get(self.counters_db.COUNTERS_DB, full_table_id, counter_name) + counter_data = self.get_counters_mapdata(full_table_id, counter_name) if counter_data is None: fields[pos] = STATUS_NA elif fields[pos] != STATUS_NA: @@ -180,18 +202,18 @@ class PgDropStat(object): print(tabulate(table, self.header_list, tablefmt='simple', stralign='right')) def get_counts(self, counters, oid): - """ - Get the PG drop counts for an individual counter. - """ - counts = {} - table_id = COUNTER_TABLE_PREFIX + oid - for counter in counters: - counter_data = self.counters_db.get(self.counters_db.COUNTERS_DB, table_id, counter) - if counter_data is None: - counts[table_id] = 0 - else: - counts[table_id] = int(counter_data) - return counts + """ + Get the PG drop counts for an individual counter. + """ + counts = {} + table_id = COUNTER_TABLE_PREFIX + oid + for counter in counters: + counter_data = self.get_counters_mapdata(table_id, counter) + if counter_data is None: + counts[table_id] = 0 + else: + counts[table_id] = int(counter_data) + return counts def get_counts_table(self, counters, object_table): """ @@ -199,10 +221,10 @@ class PgDropStat(object): to its PG drop counts. Counts are contained in a dictionary that maps counter oid to its counts. """ - counter_object_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, object_table) + counter_object_name_map = self.get_counters_mapall(object_table) current_stat_dict = OrderedDict() - if counter_object_name_map is None: + if not counter_object_name_map: return current_stat_dict for obj in natsorted(counter_object_name_map): @@ -239,10 +261,12 @@ def main(): epilog=""" Examples: pg-drop -c show +pg-drop -c show --namespace asic0 pg-drop -c clear """) parser.add_argument('-c', '--command', type=str, help='Desired action to perform') + parser.add_argument('-n', '--namespace', type=str, help='Namespace name or skip for all', default=None) args = parser.parse_args() command = args.command @@ -256,7 +280,16 @@ pg-drop -c clear print(e) sys.exit(e.errno) - pgdropstat = PgDropStat() + # Load database config files + load_db_config() + namespaces = multi_asic.get_namespace_list() + if args.namespace and args.namespace not in namespaces: + namespacelist = ', '.join(namespaces) + print(f"Input value for '--namespace' / '-n'. Choose from one of ({namespacelist})") + sys.exit(1) + + # For 'clear' command force applying to all namespaces + pgdropstat = PgDropStat(args.namespace if command != 'clear' else None) if command == 'clear': pgdropstat.clear_drop_counts() diff --git a/show/main.py b/show/main.py index a3a72c70e7..d20073fb01 100755 --- a/show/main.py +++ b/show/main.py @@ -857,9 +857,12 @@ def drop(): pass @drop.command('counters') -def pg_drop_counters(): +@multi_asic_util.multi_asic_click_option_namespace +def pg_drop_counters(namespace): """Show dropped packets for priority-group""" command = ['pg-drop', '-c', 'show'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @priority_group.group(name='persistent-watermark') diff --git a/tests/mock_tables/asic1/counters_db.json b/tests/mock_tables/asic1/counters_db.json index c364d8599e..f919742157 100644 --- a/tests/mock_tables/asic1/counters_db.json +++ b/tests/mock_tables/asic1/counters_db.json @@ -207,6 +207,108 @@ "Ethernet-BP256": "oid:0x1000000000b06", "Ethernet-BP260": "oid:0x1000000000b08" }, + "COUNTERS_PG_NAME_MAP": { + "Ethernet-BP256:0": "oid:100000000b0f0", + "Ethernet-BP256:1": "oid:100000000b0f1", + "Ethernet-BP256:2": "oid:100000000b0f2", + "Ethernet-BP256:3": "oid:100000000b0f3", + "Ethernet-BP256:4": "oid:100000000b0f4", + "Ethernet-BP256:5": "oid:100000000b0f5", + "Ethernet-BP256:6": "oid:100000000b0f6", + "Ethernet-BP256:7": "oid:100000000b0f7", + "Ethernet-BP256:8": "oid:100000000b0f8", + "Ethernet-BP256:9": "oid:100000000b0f9", + "Ethernet-BP256:10": "oid:100000000b0fa", + "Ethernet-BP256:11": "oid:100000000b0fb", + "Ethernet-BP256:12": "oid:100000000b0fc", + "Ethernet-BP256:13": "oid:100000000b0fd", + "Ethernet-BP256:14": "oid:100000000b0fe", + "Ethernet-BP256:15": "oid:100000000b0ff", + "Ethernet-BP260:0": "oid:0x100000000b1f0", + "Ethernet-BP260:1": "oid:0x100000000b1f1", + "Ethernet-BP260:2": "oid:0x100000000b1f2", + "Ethernet-BP260:3": "oid:0x100000000b1f3", + "Ethernet-BP260:4": "oid:0x100000000b1f4", + "Ethernet-BP260:5": "oid:0x100000000b1f5", + "Ethernet-BP260:6": "oid:0x100000000b1f6", + "Ethernet-BP260:7": "oid:0x100000000b1f7", + "Ethernet-BP260:8": "oid:0x100000000b1f8", + "Ethernet-BP260:9": "oid:0x100000000b1f9", + "Ethernet-BP260:10": "oid:0x100000000b1fa", + "Ethernet-BP260:11": "oid:0x100000000b1fb", + "Ethernet-BP260:12": "oid:0x100000000b1fc", + "Ethernet-BP260:13": "oid:0x100000000b1fd", + "Ethernet-BP260:14": "oid:0x100000000b1fe", + "Ethernet-BP260:15": "oid:0x100000000b1ff" + }, + "COUNTERS_PG_PORT_MAP": { + "oid:100000000b0f0": "oid:0x1000000000b06", + "oid:100000000b0f1": "oid:0x1000000000b06", + "oid:100000000b0f2": "oid:0x1000000000b06", + "oid:100000000b0f3": "oid:0x1000000000b06", + "oid:100000000b0f4": "oid:0x1000000000b06", + "oid:100000000b0f5": "oid:0x1000000000b06", + "oid:100000000b0f6": "oid:0x1000000000b06", + "oid:100000000b0f7": "oid:0x1000000000b06", + "oid:100000000b0f8": "oid:0x1000000000b06", + "oid:100000000b0f9": "oid:0x1000000000b06", + "oid:100000000b0fa": "oid:0x1000000000b06", + "oid:100000000b0fb": "oid:0x1000000000b06", + "oid:100000000b0fc": "oid:0x1000000000b06", + "oid:100000000b0fd": "oid:0x1000000000b06", + "oid:100000000b0fe": "oid:0x1000000000b06", + "oid:100000000b0ff": "oid:0x1000000000b06", + "oid:0x100000000b1f0": "oid:0x1000000000b08", + "oid:0x100000000b1f1": "oid:0x1000000000b08", + "oid:0x100000000b1f2": "oid:0x1000000000b08", + "oid:0x100000000b1f3": "oid:0x1000000000b08", + "oid:0x100000000b1f4": "oid:0x1000000000b08", + "oid:0x100000000b1f5": "oid:0x1000000000b08", + "oid:0x100000000b1f6": "oid:0x1000000000b08", + "oid:0x100000000b1f7": "oid:0x1000000000b08", + "oid:0x100000000b1f8": "oid:0x1000000000b08", + "oid:0x100000000b1f9": "oid:0x1000000000b08", + "oid:0x100000000b1fa": "oid:0x1000000000b08", + "oid:0x100000000b1fb": "oid:0x1000000000b08", + "oid:0x100000000b1fc": "oid:0x1000000000b08", + "oid:0x100000000b1fd": "oid:0x1000000000b08", + "oid:0x100000000b1fe": "oid:0x1000000000b08", + "oid:0x100000000b1ff" : "oid:0x1000000000b08" + }, + "COUNTERS_PG_INDEX_MAP": { + "oid:100000000b0f0": "0", + "oid:100000000b0f1": "1", + "oid:100000000b0f2": "2", + "oid:100000000b0f3": "3", + "oid:100000000b0f4": "4", + "oid:100000000b0f5": "5", + "oid:100000000b0f6": "6", + "oid:100000000b0f7": "7", + "oid:100000000b0f8": "8", + "oid:100000000b0f9": "9", + "oid:100000000b0fa": "10", + "oid:100000000b0fb": "11", + "oid:100000000b0fc": "12", + "oid:100000000b0fd": "13", + "oid:100000000b0fe": "14", + "oid:100000000b0ff": "15", + "oid:0x100000000b1f0": "0", + "oid:0x100000000b1f1": "1", + "oid:0x100000000b1f2": "2", + "oid:0x100000000b1f3": "3", + "oid:0x100000000b1f4": "4", + "oid:0x100000000b1f5": "5", + "oid:0x100000000b1f6": "6", + "oid:0x100000000b1f7": "7", + "oid:0x100000000b1f8": "8", + "oid:0x100000000b1f9": "9", + "oid:0x100000000b1fa": "10", + "oid:0x100000000b1fb": "11", + "oid:0x100000000b1fc": "12", + "oid:0x100000000b1fd": "13", + "oid:0x100000000b1fe": "14", + "oid:0x100000000b1ff" : "15" + }, "COUNTERS_LAG_NAME_MAP": { "PortChannel0001": "oid:0x60000000005a1", "PortChannel0002": "oid:0x60000000005a2", diff --git a/tests/multi_asic_pgdropstat_test.py b/tests/multi_asic_pgdropstat_test.py new file mode 100644 index 0000000000..94bb13011b --- /dev/null +++ b/tests/multi_asic_pgdropstat_test.py @@ -0,0 +1,95 @@ +import os +import sys +from utilities_common.cli import UserCache +from .utils import get_result_and_return_code + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +pg_drop_masic_one_result = """\ +Ingress PG dropped packets: + Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7 PG8 PG9 PG10 PG11 PG12 PG13\ + PG14 PG15 +-------------- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ------ ------ ------ ------\ + ------ ------ +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A +""" + +pg_drop_masic_all_result = """\ +Ingress PG dropped packets: + Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7 PG8 PG9 PG10 PG11 PG12 PG13\ + PG14 PG15 +-------------- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ------ ------ ------ ------\ + ------ ------ + Ethernet0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 + Ethernet4 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 + Ethernet-BP0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 + Ethernet-BP4 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A +""" + + +class TestMultiAsicPgDropstat(object): + @classmethod + def setup_class(cls): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ['UTILITIES_UNIT_TESTING'] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + print("SETUP") + + def test_show_pg_drop_masic_all(self): + return_code, result = get_result_and_return_code([ + 'pg-drop', '-c', 'show' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == pg_drop_masic_all_result + + def test_show_pg_drop_masic(self): + return_code, result = get_result_and_return_code([ + 'pg-drop', '-c', 'show', '-n', 'asic1' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == pg_drop_masic_one_result + + def test_show_pg_drop_masic_not_exist(self): + return_code, result = get_result_and_return_code([ + 'pg-drop', '-c', 'show', '-n', 'asic5' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 1 + assert result == "Input value for '--namespace' / '-n'. Choose from one of (asic0, asic1)" + + def test_clear_pg_drop(self): + return_code, result = get_result_and_return_code([ + 'pg-drop', '-c', 'clear' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == "Cleared PG drop counter\n" + + @classmethod + def teardown_class(cls): + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + UserCache('pg-drop').remove_all() + print("TEARDOWN") From b6f7c2b7d138299475d994d251721455deab668f Mon Sep 17 00:00:00 2001 From: Xinyu Lin Date: Fri, 12 Jul 2024 08:01:37 +0800 Subject: [PATCH 11/67] =?UTF-8?q?[sfputil]=20Add=20loopback=20sub-command?= =?UTF-8?q?=20for=20debugging=20and=20module=20diagnosti=E2=80=A6=20(#3369?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [sfputil] Add loopback sub-command for debugging and module diagnostic control Signed-off-by: xinyu * [sfputil] Correct and update the reference of sfputil debug loopback command Signed-off-by: xinyu --------- Signed-off-by: xinyu --- doc/Command-Reference.md | 27 ++++++++++++++++++++++++ sfputil/main.py | 45 ++++++++++++++++++++++++++++++++++++++++ tests/sfputil_test.py | 43 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 115 insertions(+) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 78474d5948..689ca23b73 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -47,6 +47,8 @@ * [CMIS firmware version show commands](#cmis-firmware-version-show-commands) * [CMIS firmware upgrade commands](#cmis-firmware-upgrade-commands) * [CMIS firmware target mode commands](#cmis-firmware-target-mode-commands) +* [CMIS debug](#cmis-debug) +* [CMIS debug loopback](#cmis-debug-loopback) * [DHCP Relay](#dhcp-relay) * [DHCP Relay show commands](#dhcp-relay-show-commands) * [DHCP Relay clear commands](#dhcp-relay-clear-commands) @@ -3094,6 +3096,31 @@ Example of the module supporting target mode Target Mode set to 1 ``` +## CMIS debug + +### CMIS debug loopback + +This command is the standard CMIS diagnostic control used for troubleshooting link and performance issues between the host switch and transceiver module. + +**sfputil debug loopback** + +- Usage: + ``` + sfputil debug loopback PORT_NAME LOOPBACK_MODE + + Set the loopback mode + host-side-input: host side input loopback mode + host-side-output: host side output loopback mode + media-side-input: media side input loopback mode + media-side-output: media side output loopback mode + none: disable loopback mode + ``` + +- Example: + ``` + admin@sonic:~$ sfputil debug loopback Ethernet88 host-side-input + ``` + ## DHCP Relay ### DHCP Relay show commands diff --git a/sfputil/main.py b/sfputil/main.py index 4bb9058d79..2674c51b10 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -1888,5 +1888,50 @@ def get_overall_offset_sff8472(api, page, offset, size, wire_addr): return page * PAGE_SIZE + offset + PAGE_SIZE_FOR_A0H +# 'debug' subgroup +@cli.group() +def debug(): + """Module debug and diagnostic control""" + pass + + +# 'loopback' subcommand +@debug.command() +@click.argument('port_name', required=True, default=None) +@click.argument('loopback_mode', required=True, default="none", + type=click.Choice(["none", "host-side-input", "host-side-output", + "media-side-input", "media-side-output"])) +def loopback(port_name, loopback_mode): + """Set module diagnostic loopback mode + """ + physical_port = logical_port_to_physical_port_index(port_name) + sfp = platform_chassis.get_sfp(physical_port) + + if is_port_type_rj45(port_name): + click.echo("{}: This functionality is not applicable for RJ45 port".format(port_name)) + sys.exit(EXIT_FAIL) + + if not is_sfp_present(port_name): + click.echo("{}: SFP EEPROM not detected".format(port_name)) + sys.exit(EXIT_FAIL) + + try: + api = sfp.get_xcvr_api() + except NotImplementedError: + click.echo("{}: This functionality is not implemented".format(port_name)) + sys.exit(ERROR_NOT_IMPLEMENTED) + + try: + status = api.set_loopback_mode(loopback_mode) + except AttributeError: + click.echo("{}: Set loopback mode is not applicable for this module".format(port_name)) + sys.exit(ERROR_NOT_IMPLEMENTED) + + if status: + click.echo("{}: Set {} loopback".format(port_name, loopback_mode)) + else: + click.echo("{}: Set {} loopback failed".format(port_name, loopback_mode)) + sys.exit(EXIT_FAIL) + if __name__ == '__main__': cli() diff --git a/tests/sfputil_test.py b/tests/sfputil_test.py index 523848ec45..537c329819 100644 --- a/tests/sfputil_test.py +++ b/tests/sfputil_test.py @@ -1510,3 +1510,46 @@ def test_load_port_config(self, mock_is_multi_asic): mock_is_multi_asic.return_value = False assert sfputil.load_port_config() == True + + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + def test_debug_loopback(self, mock_chassis): + mock_sfp = MagicMock() + mock_api = MagicMock() + mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) + mock_sfp.get_presence.return_value = True + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + + runner = CliRunner() + mock_sfp.get_presence.return_value = False + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "host-side-input"]) + assert result.output == 'Ethernet0: SFP EEPROM not detected\n' + mock_sfp.get_presence.return_value = True + + mock_sfp.get_xcvr_api = MagicMock(side_effect=NotImplementedError) + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "host-side-input"]) + assert result.output == 'Ethernet0: This functionality is not implemented\n' + assert result.exit_code == ERROR_NOT_IMPLEMENTED + + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "host-side-input"]) + assert result.output == 'Ethernet0: Set host-side-input loopback\n' + assert result.exit_code != ERROR_NOT_IMPLEMENTED + + mock_api.set_loopback_mode.return_value = False + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "none"]) + assert result.output == 'Ethernet0: Set none loopback failed\n' + assert result.exit_code == EXIT_FAIL + + mock_api.set_loopback_mode.return_value = True + mock_api.set_loopback_mode.side_effect = AttributeError + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "none"]) + assert result.output == 'Ethernet0: Set loopback mode is not applicable for this module\n' + assert result.exit_code == ERROR_NOT_IMPLEMENTED From c03c9c84ae3a015f857bf80806e7ba576b39d4ed Mon Sep 17 00:00:00 2001 From: Jianquan Ye Date: Tue, 16 Jul 2024 11:53:20 +1000 Subject: [PATCH 12/67] Revert "fix: fix show bgp summary output typo" (#3423) Reverts #3375 It will impact many automation scripts that the community users may have developped for years... So if we change it now, all those scripts will be impacted... not something we want to do just to correct a miss spelled word at this stage... Revert this change What I did Reverts fix: fix show bgp summary output typo #3375 Add comments in case someone else fix the typo without notification. co-authorized by: jianquanye@microsoft.com --- tests/bgp_commands_test.py | 286 +++++++++++++++++------------------ utilities_common/bgp_util.py | 6 +- 2 files changed, 148 insertions(+), 144 deletions(-) diff --git a/tests/bgp_commands_test.py b/tests/bgp_commands_test.py index 2a2179815f..11415e8727 100644 --- a/tests/bgp_commands_test.py +++ b/tests/bgp_commands_test.py @@ -25,32 +25,32 @@ Peer groups 4, using 256 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -10.0.0.1 4 65200 5919 2717 0 0 0 1d21h11m 6402 ARISTA01T2 -10.0.0.5 4 65200 5916 2714 0 0 0 1d21h10m 6402 ARISTA03T2 -10.0.0.9 4 65200 5915 2713 0 0 0 1d21h09m 6402 ARISTA05T2 -10.0.0.13 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA07T2 -10.0.0.17 4 65200 5916 2713 0 0 0 1d21h09m 6402 ARISTA09T2 -10.0.0.21 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA11T2 -10.0.0.25 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA13T2 -10.0.0.29 4 65200 5916 2714 0 0 0 1d21h10m 6402 ARISTA15T2 -10.0.0.33 4 64001 0 0 0 0 0 never Active ARISTA01T0 -10.0.0.35 4 64002 0 0 0 0 0 never Active ARISTA02T0 -10.0.0.37 4 64003 0 0 0 0 0 never Active ARISTA03T0 -10.0.0.39 4 64004 0 0 0 0 0 never Active ARISTA04T0 -10.0.0.41 4 64005 0 0 0 0 0 never Active ARISTA05T0 -10.0.0.43 4 64006 0 0 0 0 0 never Active ARISTA06T0 -10.0.0.45 4 64007 0 0 0 0 0 never Active ARISTA07T0 -10.0.0.47 4 64008 0 0 0 0 0 never Active ARISTA08T0 -10.0.0.49 4 64009 0 0 0 0 0 never Active ARISTA09T0 -10.0.0.51 4 64010 0 0 0 0 0 never Active ARISTA10T0 -10.0.0.53 4 64011 0 0 0 0 0 never Active ARISTA11T0 -10.0.0.55 4 64012 0 0 0 0 0 never Active ARISTA12T0 -10.0.0.57 4 64013 0 0 0 0 0 never Active ARISTA13T0 -10.0.0.59 4 64014 0 0 0 0 0 never Active ARISTA14T0 -10.0.0.61 4 64015 0 0 0 0 0 never Active INT_NEIGH0 -10.0.0.63 4 64016 0 0 0 0 0 never Active INT_NEIGH1 +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +10.0.0.1 4 65200 5919 2717 0 0 0 1d21h11m 6402 ARISTA01T2 +10.0.0.5 4 65200 5916 2714 0 0 0 1d21h10m 6402 ARISTA03T2 +10.0.0.9 4 65200 5915 2713 0 0 0 1d21h09m 6402 ARISTA05T2 +10.0.0.13 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA07T2 +10.0.0.17 4 65200 5916 2713 0 0 0 1d21h09m 6402 ARISTA09T2 +10.0.0.21 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA11T2 +10.0.0.25 4 65200 5917 2716 0 0 0 1d21h11m 6402 ARISTA13T2 +10.0.0.29 4 65200 5916 2714 0 0 0 1d21h10m 6402 ARISTA15T2 +10.0.0.33 4 64001 0 0 0 0 0 never Active ARISTA01T0 +10.0.0.35 4 64002 0 0 0 0 0 never Active ARISTA02T0 +10.0.0.37 4 64003 0 0 0 0 0 never Active ARISTA03T0 +10.0.0.39 4 64004 0 0 0 0 0 never Active ARISTA04T0 +10.0.0.41 4 64005 0 0 0 0 0 never Active ARISTA05T0 +10.0.0.43 4 64006 0 0 0 0 0 never Active ARISTA06T0 +10.0.0.45 4 64007 0 0 0 0 0 never Active ARISTA07T0 +10.0.0.47 4 64008 0 0 0 0 0 never Active ARISTA08T0 +10.0.0.49 4 64009 0 0 0 0 0 never Active ARISTA09T0 +10.0.0.51 4 64010 0 0 0 0 0 never Active ARISTA10T0 +10.0.0.53 4 64011 0 0 0 0 0 never Active ARISTA11T0 +10.0.0.55 4 64012 0 0 0 0 0 never Active ARISTA12T0 +10.0.0.57 4 64013 0 0 0 0 0 never Active ARISTA13T0 +10.0.0.59 4 64014 0 0 0 0 0 never Active ARISTA14T0 +10.0.0.61 4 64015 0 0 0 0 0 never Active INT_NEIGH0 +10.0.0.63 4 64016 0 0 0 0 0 never Active INT_NEIGH1 Total number of neighbors 24 """ @@ -65,32 +65,32 @@ Peer groups 4, using 256 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -fc00::1a 4 65200 6665 6672 0 0 0 2d09h39m 6402 ARISTA07T2 -fc00::2 4 65200 6666 7913 0 0 0 2d09h39m 6402 ARISTA01T2 -fc00::2a 4 65200 6666 7913 0 0 0 2d09h39m 6402 ARISTA11T2 -fc00::3a 4 65200 6666 7912 0 0 0 2d09h39m 6402 ARISTA15T2 -fc00::4a 4 64003 0 0 0 0 0 never Active ARISTA03T0 -fc00::4e 4 64004 0 0 0 0 0 never Active ARISTA04T0 -fc00::5a 4 64007 0 0 0 0 0 never Active ARISTA07T0 -fc00::5e 4 64008 0 0 0 0 0 never Active ARISTA08T0 -fc00::6a 4 64011 0 0 0 0 0 never Connect ARISTA11T0 -fc00::6e 4 64012 0 0 0 0 0 never Active ARISTA12T0 -fc00::7a 4 64015 0 0 0 0 0 never Active ARISTA15T0 -fc00::7e 4 64016 0 0 0 0 0 never Active ARISTA16T0 -fc00::12 4 65200 6666 7915 0 0 0 2d09h39m 6402 ARISTA05T2 -fc00::22 4 65200 6667 7915 0 0 0 2d09h39m 6402 ARISTA09T2 -fc00::32 4 65200 6663 6669 0 0 0 2d09h36m 6402 ARISTA13T2 -fc00::42 4 64001 0 0 0 0 0 never Active ARISTA01T0 -fc00::46 4 64002 0 0 0 0 0 never Active ARISTA02T0 -fc00::52 4 64005 0 0 0 0 0 never Active ARISTA05T0 -fc00::56 4 64006 0 0 0 0 0 never Active ARISTA06T0 -fc00::62 4 64009 0 0 0 0 0 never Active ARISTA09T0 -fc00::66 4 64010 0 0 0 0 0 never Active ARISTA10T0 -fc00::72 4 64013 0 0 0 0 0 never Active ARISTA13T0 -fc00::76 4 64014 0 0 0 0 0 never Active INT_NEIGH0 -fc00::a 4 65200 6665 6671 0 0 0 2d09h38m 6402 INT_NEIGH1 +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +fc00::1a 4 65200 6665 6672 0 0 0 2d09h39m 6402 ARISTA07T2 +fc00::2 4 65200 6666 7913 0 0 0 2d09h39m 6402 ARISTA01T2 +fc00::2a 4 65200 6666 7913 0 0 0 2d09h39m 6402 ARISTA11T2 +fc00::3a 4 65200 6666 7912 0 0 0 2d09h39m 6402 ARISTA15T2 +fc00::4a 4 64003 0 0 0 0 0 never Active ARISTA03T0 +fc00::4e 4 64004 0 0 0 0 0 never Active ARISTA04T0 +fc00::5a 4 64007 0 0 0 0 0 never Active ARISTA07T0 +fc00::5e 4 64008 0 0 0 0 0 never Active ARISTA08T0 +fc00::6a 4 64011 0 0 0 0 0 never Connect ARISTA11T0 +fc00::6e 4 64012 0 0 0 0 0 never Active ARISTA12T0 +fc00::7a 4 64015 0 0 0 0 0 never Active ARISTA15T0 +fc00::7e 4 64016 0 0 0 0 0 never Active ARISTA16T0 +fc00::12 4 65200 6666 7915 0 0 0 2d09h39m 6402 ARISTA05T2 +fc00::22 4 65200 6667 7915 0 0 0 2d09h39m 6402 ARISTA09T2 +fc00::32 4 65200 6663 6669 0 0 0 2d09h36m 6402 ARISTA13T2 +fc00::42 4 64001 0 0 0 0 0 never Active ARISTA01T0 +fc00::46 4 64002 0 0 0 0 0 never Active ARISTA02T0 +fc00::52 4 64005 0 0 0 0 0 never Active ARISTA05T0 +fc00::56 4 64006 0 0 0 0 0 never Active ARISTA06T0 +fc00::62 4 64009 0 0 0 0 0 never Active ARISTA09T0 +fc00::66 4 64010 0 0 0 0 0 never Active ARISTA10T0 +fc00::72 4 64013 0 0 0 0 0 never Active ARISTA13T0 +fc00::76 4 64014 0 0 0 0 0 never Active INT_NEIGH0 +fc00::a 4 65200 6665 6671 0 0 0 2d09h38m 6402 INT_NEIGH1 Total number of neighbors 24 """ @@ -112,8 +112,8 @@ Peer groups 0, using 0 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- Total number of neighbors 0 """ @@ -128,8 +128,8 @@ Peer groups 0, using 0 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- Total number of neighbors 0 """ @@ -146,8 +146,8 @@ Peer groups 0, using 0 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- Total number of neighbors 0 """ @@ -164,8 +164,8 @@ Peer groups 0, using 0 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- Total number of neighbors 0 """ @@ -180,28 +180,28 @@ Peer groups 3, using 192 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -10.0.0.1 4 65200 4632 11028 0 0 0 00:18:31 8514 ARISTA01T2 -10.0.0.9 4 65202 4632 11029 0 0 0 00:18:33 8514 ARISTA05T2 -10.0.0.13 4 65203 4632 11028 0 0 0 00:18:33 8514 ARISTA07T2 -10.0.0.17 4 65204 4631 11028 0 0 0 00:18:31 8514 ARISTA09T2 -10.0.0.21 4 65205 4632 11031 0 0 0 00:18:33 8514 ARISTA11T2 -10.0.0.25 4 65206 4632 11031 0 0 0 00:18:33 8514 ARISTA13T2 -10.0.0.29 4 65207 4632 11028 0 0 0 00:18:31 8514 ARISTA15T2 -10.0.0.33 4 65208 4633 11029 0 0 0 00:18:33 8514 ARISTA01T0 -10.0.0.37 4 65210 4632 11028 0 0 0 00:18:32 8514 ARISTA03T0 -10.0.0.39 4 65211 4629 6767 0 0 0 00:18:22 8514 ARISTA04T0 -10.0.0.41 4 65212 4632 11028 0 0 0 00:18:32 8514 ARISTA05T0 -10.0.0.43 4 65213 4629 6767 0 0 0 00:18:23 8514 ARISTA06T0 -10.0.0.45 4 65214 4633 11029 0 0 0 00:18:33 8514 ARISTA07T0 -10.0.0.47 4 65215 4629 6767 0 0 0 00:18:23 8514 ARISTA08T0 -10.0.0.49 4 65216 4633 11029 0 0 0 00:18:35 8514 ARISTA09T0 -10.0.0.51 4 65217 4633 11029 0 0 0 00:18:33 8514 ARISTA10T0 -10.0.0.53 4 65218 4632 11029 0 0 0 00:18:35 8514 ARISTA11T0 -10.0.0.55 4 65219 4632 11029 0 0 0 00:18:33 8514 ARISTA12T0 -10.0.0.57 4 65220 4632 11029 0 0 0 00:18:35 8514 ARISTA13T0 -10.0.0.59 4 65221 4632 11029 0 0 0 00:18:33 8514 ARISTA14T0 +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +10.0.0.1 4 65200 4632 11028 0 0 0 00:18:31 8514 ARISTA01T2 +10.0.0.9 4 65202 4632 11029 0 0 0 00:18:33 8514 ARISTA05T2 +10.0.0.13 4 65203 4632 11028 0 0 0 00:18:33 8514 ARISTA07T2 +10.0.0.17 4 65204 4631 11028 0 0 0 00:18:31 8514 ARISTA09T2 +10.0.0.21 4 65205 4632 11031 0 0 0 00:18:33 8514 ARISTA11T2 +10.0.0.25 4 65206 4632 11031 0 0 0 00:18:33 8514 ARISTA13T2 +10.0.0.29 4 65207 4632 11028 0 0 0 00:18:31 8514 ARISTA15T2 +10.0.0.33 4 65208 4633 11029 0 0 0 00:18:33 8514 ARISTA01T0 +10.0.0.37 4 65210 4632 11028 0 0 0 00:18:32 8514 ARISTA03T0 +10.0.0.39 4 65211 4629 6767 0 0 0 00:18:22 8514 ARISTA04T0 +10.0.0.41 4 65212 4632 11028 0 0 0 00:18:32 8514 ARISTA05T0 +10.0.0.43 4 65213 4629 6767 0 0 0 00:18:23 8514 ARISTA06T0 +10.0.0.45 4 65214 4633 11029 0 0 0 00:18:33 8514 ARISTA07T0 +10.0.0.47 4 65215 4629 6767 0 0 0 00:18:23 8514 ARISTA08T0 +10.0.0.49 4 65216 4633 11029 0 0 0 00:18:35 8514 ARISTA09T0 +10.0.0.51 4 65217 4633 11029 0 0 0 00:18:33 8514 ARISTA10T0 +10.0.0.53 4 65218 4632 11029 0 0 0 00:18:35 8514 ARISTA11T0 +10.0.0.55 4 65219 4632 11029 0 0 0 00:18:33 8514 ARISTA12T0 +10.0.0.57 4 65220 4632 11029 0 0 0 00:18:35 8514 ARISTA13T0 +10.0.0.59 4 65221 4632 11029 0 0 0 00:18:33 8514 ARISTA14T0 Total number of neighbors 20 """ @@ -216,28 +216,28 @@ Peer groups 3, using 192 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -fc00::1a 4 65203 4438 6578 0 0 0 00:08:57 8514 ARISTA07T2 -fc00::2 4 65200 4439 6578 0 0 0 00:08:56 8513 ARISTA01T2 -fc00::2a 4 65205 4439 6578 0 0 0 00:08:57 8514 ARISTA11T2 -fc00::3a 4 65207 4439 6578 0 0 0 00:08:57 8514 ARISTA15T2 -fc00::4a 4 65210 4439 6579 0 0 0 00:08:59 8514 ARISTA03T0 -fc00::4e 4 65211 4440 6579 0 0 0 00:09:00 8514 ARISTA04T0 -fc00::5a 4 65214 4440 6579 0 0 0 00:09:00 8514 ARISTA07T0 -fc00::5e 4 65215 4438 6576 0 0 0 00:08:50 8514 ARISTA08T0 -fc00::6a 4 65218 4441 6580 0 0 0 00:09:01 8514 ARISTA11T0 -fc00::6e 4 65219 4442 6580 0 0 0 00:09:01 8514 ARISTA12T0 -fc00::7a 4 65222 4441 6580 0 0 0 00:09:01 8514 ARISTA15T0 -fc00::12 4 65202 4438 6578 0 0 0 00:08:57 8514 ARISTA05T2 -fc00::22 4 65204 4438 6578 0 0 0 00:08:57 8514 ARISTA09T2 -fc00::32 4 65206 4438 6578 0 0 0 00:08:57 8514 ARISTA13T2 -fc00::42 4 65208 4442 6580 0 0 0 00:09:01 8514 ARISTA01T0 -fc00::52 4 65212 4439 6579 0 0 0 00:08:59 8514 ARISTA05T0 -fc00::56 4 65213 4439 6579 0 0 0 00:08:59 8514 ARISTA06T0 -fc00::62 4 65216 4438 6576 0 0 0 00:08:50 8514 ARISTA09T0 -fc00::66 4 65217 4442 6580 0 0 0 00:09:01 8514 ARISTA10T0 -fc00::72 4 65220 4441 6580 0 0 0 00:09:01 8514 ARISTA13T0 +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +fc00::1a 4 65203 4438 6578 0 0 0 00:08:57 8514 ARISTA07T2 +fc00::2 4 65200 4439 6578 0 0 0 00:08:56 8513 ARISTA01T2 +fc00::2a 4 65205 4439 6578 0 0 0 00:08:57 8514 ARISTA11T2 +fc00::3a 4 65207 4439 6578 0 0 0 00:08:57 8514 ARISTA15T2 +fc00::4a 4 65210 4439 6579 0 0 0 00:08:59 8514 ARISTA03T0 +fc00::4e 4 65211 4440 6579 0 0 0 00:09:00 8514 ARISTA04T0 +fc00::5a 4 65214 4440 6579 0 0 0 00:09:00 8514 ARISTA07T0 +fc00::5e 4 65215 4438 6576 0 0 0 00:08:50 8514 ARISTA08T0 +fc00::6a 4 65218 4441 6580 0 0 0 00:09:01 8514 ARISTA11T0 +fc00::6e 4 65219 4442 6580 0 0 0 00:09:01 8514 ARISTA12T0 +fc00::7a 4 65222 4441 6580 0 0 0 00:09:01 8514 ARISTA15T0 +fc00::12 4 65202 4438 6578 0 0 0 00:08:57 8514 ARISTA05T2 +fc00::22 4 65204 4438 6578 0 0 0 00:08:57 8514 ARISTA09T2 +fc00::32 4 65206 4438 6578 0 0 0 00:08:57 8514 ARISTA13T2 +fc00::42 4 65208 4442 6580 0 0 0 00:09:01 8514 ARISTA01T0 +fc00::52 4 65212 4439 6579 0 0 0 00:08:59 8514 ARISTA05T0 +fc00::56 4 65213 4439 6579 0 0 0 00:08:59 8514 ARISTA06T0 +fc00::62 4 65216 4438 6576 0 0 0 00:08:50 8514 ARISTA09T0 +fc00::66 4 65217 4442 6580 0 0 0 00:09:01 8514 ARISTA10T0 +fc00::72 4 65220 4441 6580 0 0 0 00:09:01 8514 ARISTA13T0 Total number of neighbors 20 """ @@ -252,31 +252,31 @@ Peer groups 3, using 192 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- ------------------ -3.3.3.6 4 65100 0 0 0 0 0 never Connect str2-chassis-lc6-1 -3.3.3.7 4 65100 808 178891 0 0 0 00:17:47 1458 str2-chassis-lc7-1 -10.0.0.1 4 65200 4632 11028 0 0 0 00:18:31 8514 ARISTA01T2 -10.0.0.9 4 65202 4632 11029 0 0 0 00:18:33 8514 ARISTA05T2 -10.0.0.13 4 65203 4632 11028 0 0 0 00:18:33 8514 ARISTA07T2 -10.0.0.17 4 65204 4631 11028 0 0 0 00:18:31 8514 ARISTA09T2 -10.0.0.21 4 65205 4632 11031 0 0 0 00:18:33 8514 ARISTA11T2 -10.0.0.25 4 65206 4632 11031 0 0 0 00:18:33 8514 ARISTA13T2 -10.0.0.29 4 65207 4632 11028 0 0 0 00:18:31 8514 ARISTA15T2 -10.0.0.33 4 65208 4633 11029 0 0 0 00:18:33 8514 ARISTA01T0 -10.0.0.37 4 65210 4632 11028 0 0 0 00:18:32 8514 ARISTA03T0 -10.0.0.39 4 65211 4629 6767 0 0 0 00:18:22 8514 ARISTA04T0 -10.0.0.41 4 65212 4632 11028 0 0 0 00:18:32 8514 ARISTA05T0 -10.0.0.43 4 65213 4629 6767 0 0 0 00:18:23 8514 ARISTA06T0 -10.0.0.45 4 65214 4633 11029 0 0 0 00:18:33 8514 ARISTA07T0 -10.0.0.47 4 65215 4629 6767 0 0 0 00:18:23 8514 ARISTA08T0 -10.0.0.49 4 65216 4633 11029 0 0 0 00:18:35 8514 ARISTA09T0 -10.0.0.51 4 65217 4633 11029 0 0 0 00:18:33 8514 ARISTA10T0 -10.0.0.53 4 65218 4632 11029 0 0 0 00:18:35 8514 ARISTA11T0 -10.0.0.55 4 65219 4632 11029 0 0 0 00:18:33 8514 ARISTA12T0 -10.0.0.57 4 65220 4632 11029 0 0 0 00:18:35 8514 ARISTA13T0 -10.0.0.59 4 65221 4632 11029 0 0 0 00:18:33 8514 ARISTA14T0 -10.0.0.61 4 65222 4633 11029 0 0 0 00:18:33 8514 INT_NEIGH0 +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- ------------------ +3.3.3.6 4 65100 0 0 0 0 0 never Connect str2-chassis-lc6-1 +3.3.3.7 4 65100 808 178891 0 0 0 00:17:47 1458 str2-chassis-lc7-1 +10.0.0.1 4 65200 4632 11028 0 0 0 00:18:31 8514 ARISTA01T2 +10.0.0.9 4 65202 4632 11029 0 0 0 00:18:33 8514 ARISTA05T2 +10.0.0.13 4 65203 4632 11028 0 0 0 00:18:33 8514 ARISTA07T2 +10.0.0.17 4 65204 4631 11028 0 0 0 00:18:31 8514 ARISTA09T2 +10.0.0.21 4 65205 4632 11031 0 0 0 00:18:33 8514 ARISTA11T2 +10.0.0.25 4 65206 4632 11031 0 0 0 00:18:33 8514 ARISTA13T2 +10.0.0.29 4 65207 4632 11028 0 0 0 00:18:31 8514 ARISTA15T2 +10.0.0.33 4 65208 4633 11029 0 0 0 00:18:33 8514 ARISTA01T0 +10.0.0.37 4 65210 4632 11028 0 0 0 00:18:32 8514 ARISTA03T0 +10.0.0.39 4 65211 4629 6767 0 0 0 00:18:22 8514 ARISTA04T0 +10.0.0.41 4 65212 4632 11028 0 0 0 00:18:32 8514 ARISTA05T0 +10.0.0.43 4 65213 4629 6767 0 0 0 00:18:23 8514 ARISTA06T0 +10.0.0.45 4 65214 4633 11029 0 0 0 00:18:33 8514 ARISTA07T0 +10.0.0.47 4 65215 4629 6767 0 0 0 00:18:23 8514 ARISTA08T0 +10.0.0.49 4 65216 4633 11029 0 0 0 00:18:35 8514 ARISTA09T0 +10.0.0.51 4 65217 4633 11029 0 0 0 00:18:33 8514 ARISTA10T0 +10.0.0.53 4 65218 4632 11029 0 0 0 00:18:35 8514 ARISTA11T0 +10.0.0.55 4 65219 4632 11029 0 0 0 00:18:33 8514 ARISTA12T0 +10.0.0.57 4 65220 4632 11029 0 0 0 00:18:35 8514 ARISTA13T0 +10.0.0.59 4 65221 4632 11029 0 0 0 00:18:33 8514 ARISTA14T0 +10.0.0.61 4 65222 4633 11029 0 0 0 00:18:33 8514 INT_NEIGH0 Total number of neighbors 23 """ @@ -291,8 +291,8 @@ Peer groups 0, using 0 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ---- --------- --------- -------- ----- ------ --------- -------------- -------------- Total number of neighbors 0 """ @@ -308,9 +308,9 @@ Peer groups 3, using 3 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- -10.0.0.1 4 65222 4633 11029 0 0 0 00:18:33 8514 ARISTA01T2 +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- -------------- +10.0.0.1 4 65222 4633 11029 0 0 0 00:18:33 8514 ARISTA01T2 Total number of neighbors 1 """ @@ -326,17 +326,17 @@ Peer groups 4, using 256 bytes of memory -Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName ----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- ---------------------- -3.3.3.1 4 65100 277 9 0 0 0 00:00:14 33798 str2-sonic-lc1-1-ASIC0 -3.3.3.1 4 65100 280 14 0 0 0 00:00:22 33798 str2-sonic-lc1-1-ASIC1 -3.3.3.2 4 65100 277 9 0 0 0 00:00:14 33798 str2-sonic-lc2-1-ASIC0 -3.3.3.2 4 65100 280 14 0 0 0 00:00:22 33798 str2-sonic-lc3-1-ASIC0 -3.3.3.6 4 65100 14 14 0 0 0 00:00:23 4 str2-sonic-lc3-1-ASIC1 -3.3.3.8 4 65100 12 10 0 0 0 00:00:15 4 str2-sonic-lc1-1-ASIC1 +Neighbhor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd NeighborName +----------- --- ----- --------- --------- -------- ----- ------ --------- -------------- ---------------------- +3.3.3.1 4 65100 277 9 0 0 0 00:00:14 33798 str2-sonic-lc1-1-ASIC0 +3.3.3.1 4 65100 280 14 0 0 0 00:00:22 33798 str2-sonic-lc1-1-ASIC1 +3.3.3.2 4 65100 277 9 0 0 0 00:00:14 33798 str2-sonic-lc2-1-ASIC0 +3.3.3.2 4 65100 280 14 0 0 0 00:00:22 33798 str2-sonic-lc3-1-ASIC0 +3.3.3.6 4 65100 14 14 0 0 0 00:00:23 4 str2-sonic-lc3-1-ASIC1 +3.3.3.8 4 65100 12 10 0 0 0 00:00:15 4 str2-sonic-lc1-1-ASIC1 Total number of neighbors 6 -""" +""" # noqa: E501 class TestBgpCommandsSingleAsic(object): diff --git a/utilities_common/bgp_util.py b/utilities_common/bgp_util.py index df2e4963b6..cb49123c4b 100644 --- a/utilities_common/bgp_util.py +++ b/utilities_common/bgp_util.py @@ -299,7 +299,11 @@ def display_bgp_summary(bgp_summary, af): af: IPV4 or IPV6 ''' - headers = ["Neighbor", "V", "AS", "MsgRcvd", "MsgSent", "TblVer", + + # "Neighbhor" is a known typo, + # but fix it will impact lots of automation scripts that the community users may have developed for years + # for now, let's keep it as it is. + headers = ["Neighbhor", "V", "AS", "MsgRcvd", "MsgSent", "TblVer", "InQ", "OutQ", "Up/Down", "State/PfxRcd", "NeighborName"] try: From b9a6049a954f6053b49de198bbacf550d5728de7 Mon Sep 17 00:00:00 2001 From: Changrong Wu Date: Tue, 16 Jul 2024 15:53:16 -0700 Subject: [PATCH 13/67] [Bug Fix] Fix disk check test and drops group test (#3424) * tests/disk_check_test.py: remove temp files during teardown - modify teardown_class() to remove /tmp/tmp* * tests/drops_group_test.py: add code to remove temporary files when setting up test class - add a remove_tmp_dropstat_file() function as a helper to clean the cache - add an invocation of remove_tmp_dropstat_file() in setup_class() of TestDropCounters class * tests/disk_check_test.py: fix the subprocess command in the teardown_class() function * tests/disk_check_test.py: fix formatting for pre-commit check * tests/drops_group_test.py: fix formatting for pre-commit check --- tests/disk_check_test.py | 5 ++++- tests/drops_group_test.py | 10 +++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/tests/disk_check_test.py b/tests/disk_check_test.py index 82b8b16ff6..ac541b05b9 100644 --- a/tests/disk_check_test.py +++ b/tests/disk_check_test.py @@ -1,7 +1,6 @@ import sys import syslog from unittest.mock import patch -import pytest import subprocess sys.path.append("scripts") @@ -178,3 +177,7 @@ def test_readonly(self, mock_proc, mock_log): assert max_log_lvl == syslog.LOG_ERR + @classmethod + def teardown_class(cls): + subprocess.run("rm -rf /tmp/tmp*", shell=True) # cleanup the temporary dirs + print("TEARDOWN") diff --git a/tests/drops_group_test.py b/tests/drops_group_test.py index ad8c8a4203..93f99e3f1b 100644 --- a/tests/drops_group_test.py +++ b/tests/drops_group_test.py @@ -3,6 +3,7 @@ import shutil from click.testing import CliRunner +from utilities_common.cli import UserCache test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -97,14 +98,17 @@ sonic_drops_test 0 0 """ -dropstat_path = "/tmp/dropstat-27" + +def remove_tmp_dropstat_file(): + # remove the tmp portstat + cache = UserCache("dropstat") + cache.remove_all() class TestDropCounters(object): @classmethod def setup_class(cls): print("SETUP") - if os.path.exists(dropstat_path): - shutil.rmtree(dropstat_path) + remove_tmp_dropstat_file() os.environ["PATH"] += os.pathsep + scripts_path os.environ["UTILITIES_UNIT_TESTING"] = "1" From fd3096c74db57a0ea98af5907ca659ec6da8fc23 Mon Sep 17 00:00:00 2001 From: Changrong Wu Date: Thu, 18 Jul 2024 11:03:02 -0700 Subject: [PATCH 14/67] Enable show ip bgp on sup and -n all for show ip bgp network (#3417) #### What I did 1. Enable "show ip bgp" on sup and "-n all" for show ip bgp network. 2. Modify README.doc to make the instructions of building and installing the wheel package more clear. 3. Improve the output format of rexec command #### How I did it Modify the code in show/main.py to enable "show ip bgp ..." on supervisors and modify show/bgp_frr_v4.py to add support for the new features. Update README.md. Modify the rexec implementation to improve the output format. Add unit tests for the above change. #### How to verify it Run on a SONiC chassis --- README.md | 7 + rcli/linecard.py | 14 +- rcli/rexec.py | 12 +- rcli/rshell.py | 4 +- rcli/utils.py | 15 ++ show/bgp_frr_v4.py | 38 +++++- show/main.py | 6 +- .../bgp_network_test_vector.py | 128 +++++++++++++++++- tests/conftest.py | 7 + tests/mock_tables/chassis_state_db.json | 3 + tests/remote_cli_test.py | 8 +- tests/remote_show_test.py | 57 ++++++++ tests/show_bgp_network_test.py | 7 +- 13 files changed, 282 insertions(+), 24 deletions(-) create mode 100644 tests/remote_show_test.py diff --git a/README.md b/README.md index f63b0832a2..91146bc9d0 100644 --- a/README.md +++ b/README.md @@ -66,6 +66,7 @@ A convenient alternative is to let the SONiC build system configure a build envi ``` python3 setup.py bdist_wheel ``` +Note: This command by default will not update the wheel package in target/. To specify the destination location of wheel package, use "-d" option. #### To run unit tests @@ -73,6 +74,12 @@ python3 setup.py bdist_wheel python3 setup.py test ``` +#### To install the package on a SONiC machine +``` +sudo pip uninstall sonic-utilities +sudo pip install YOUR_WHEEL_PACKAGE +``` +Note: Don't use "--force-reinstall". ### sonic-utilities-data diff --git a/rcli/linecard.py b/rcli/linecard.py index 73c13a73ef..f893428a42 100644 --- a/rcli/linecard.py +++ b/rcli/linecard.py @@ -8,7 +8,7 @@ import termios import tty -from .utils import get_linecard_ip +from .utils import get_linecard_ip, get_linecard_hostname_from_module_name, get_linecard_module_name_from_hostname from paramiko.py3compat import u from paramiko import Channel @@ -31,7 +31,17 @@ def __init__(self, linecard_name, username, password): if not self.ip: sys.exit(1) - self.linecard_name = linecard_name + # if the user passes linecard hostname, then try to get the module name for that linecard + module_name = get_linecard_module_name_from_hostname(linecard_name) + if module_name is None: + # if the module name cannot be found from host, assume the user has passed module name + self.module_name = linecard_name + self.hostname = get_linecard_hostname_from_module_name(linecard_name) + else: + # the user has passed linecard hostname + self.hostname = linecard_name + self.module_name = module_name + self.username = username self.password = password diff --git a/rcli/rexec.py b/rcli/rexec.py index 8831d5585f..21929c8012 100644 --- a/rcli/rexec.py +++ b/rcli/rexec.py @@ -30,20 +30,22 @@ def cli(linecard_names, command, username): if list(linecard_names) == ["all"]: # Get all linecard names using autocompletion helper - linecard_names = rcli_utils.get_all_linecards(None, None, "") + module_names = sorted(rcli_utils.get_all_linecards(None, None, "")) + else: + module_names = linecard_names linecards = [] # Iterate through each linecard, check if the login was successful - for linecard_name in linecard_names: - linecard = Linecard(linecard_name, username, password) + for module_name in module_names: + linecard = Linecard(module_name, username, password) if not linecard.connection: - click.echo(f"Failed to connect to {linecard_name} with username {username}") + click.echo(f"Failed to connect to {module_name} with username {username}") sys.exit(1) linecards.append(linecard) for linecard in linecards: if linecard.connection: - click.echo(f"======== {linecard.linecard_name} output: ========") + click.echo(f"======== {linecard.module_name}|{linecard.hostname} output: ========") click.echo(linecard.execute_cmd(command)) diff --git a/rcli/rshell.py b/rcli/rshell.py index bac02d42d8..b22187a0f3 100644 --- a/rcli/rshell.py +++ b/rcli/rshell.py @@ -28,14 +28,14 @@ def cli(linecard_name, username): try: linecard = Linecard(linecard_name, username, password) if linecard.connection: - click.echo(f"Connecting to {linecard.linecard_name}") + click.echo(f"Connecting to {linecard.module_name}") # If connection was created, connection exists. # Otherwise, user will see an error message. linecard.start_shell() click.echo("Connection Closed") except paramiko.ssh_exception.AuthenticationException: click.echo( - f"Login failed on '{linecard.linecard_name}' with username '{linecard.username}'") + f"Login failed on '{linecard.module_name}' with username '{linecard.username}'") if __name__=="__main__": diff --git a/rcli/utils.py b/rcli/utils.py index 510e360581..e2f48788ba 100644 --- a/rcli/utils.py +++ b/rcli/utils.py @@ -43,6 +43,20 @@ def get_linecard_module_name_from_hostname(linecard_name: str): return None + +def get_linecard_hostname_from_module_name(linecard_name: str): + + chassis_state_db = connect_to_chassis_state_db() + keys = chassis_state_db.keys(chassis_state_db.CHASSIS_STATE_DB, '{}|{}'.format(CHASSIS_MODULE_HOSTNAME_TABLE, '*')) + for key in keys: + module_name = key.split('|')[1] + if module_name.replace('-', '').lower() == linecard_name.replace('-', '').lower(): + hostname = chassis_state_db.get(chassis_state_db.CHASSIS_STATE_DB, key, CHASSIS_MODULE_HOSTNAME) + return hostname + + return None + + def get_linecard_ip(linecard_name: str): """ Given a linecard name, lookup its IP address in the midplane table @@ -69,6 +83,7 @@ def get_linecard_ip(linecard_name: str): return None return module_ip + def get_module_ip_and_access_from_state_db(module_name): state_db = connect_state_db() data_dict = state_db.get_all( diff --git a/show/bgp_frr_v4.py b/show/bgp_frr_v4.py index 6343e8b7b2..10e5d982cd 100644 --- a/show/bgp_frr_v4.py +++ b/show/bgp_frr_v4.py @@ -1,6 +1,8 @@ import click +import sys +import subprocess -from sonic_py_common import multi_asic +from sonic_py_common import multi_asic, device_info from show.main import ip import utilities_common.bgp_util as bgp_util import utilities_common.cli as clicommon @@ -17,6 +19,12 @@ @ip.group(cls=clicommon.AliasedGroup) def bgp(): """Show IPv4 BGP (Border Gateway Protocol) information""" + if device_info.is_supervisor(): + # if the device is a chassis, the command need to be executed by rexec + click.echo("Since the current device is a chassis supervisor, " + + "this command will be executed remotely on all linecards") + proc = subprocess.run(["rexec", "all"] + ["-c", " ".join(sys.argv)]) + sys.exit(proc.returncode) pass @@ -102,10 +110,16 @@ def neighbors(ipaddress, info_type, namespace): def network(ipaddress, info_type, namespace): """Show IP (IPv4) BGP network""" - if multi_asic.is_multi_asic() and namespace not in multi_asic.get_namespace_list(): - ctx = click.get_current_context() - ctx.fail('-n/--namespace option required. provide namespace from list {}'\ - .format(multi_asic.get_namespace_list())) + namespace = namespace.strip() + if multi_asic.is_multi_asic(): + if namespace == multi_asic.DEFAULT_NAMESPACE: + ctx = click.get_current_context() + ctx.fail('-n/--namespace option required. provide namespace from list {}' + .format(multi_asic.get_namespace_list())) + if namespace != "all" and namespace not in multi_asic.get_namespace_list(): + ctx = click.get_current_context() + ctx.fail('invalid namespace {}. provide namespace from list {}' + .format(namespace, multi_asic.get_namespace_list())) command = 'show ip bgp' if ipaddress is not None: @@ -125,5 +139,15 @@ def network(ipaddress, info_type, namespace): if info_type is not None: command += ' {}'.format(info_type) - output = bgp_util.run_bgp_show_command(command, namespace) - click.echo(output.rstrip('\n')) + if namespace == "all": + if multi_asic.is_multi_asic(): + for ns in multi_asic.get_namespace_list(): + click.echo("\n======== namespace {} ========".format(ns)) + output = bgp_util.run_bgp_show_command(command, ns) + click.echo(output.rstrip('\n')) + else: + output = bgp_util.run_bgp_show_command(command, "") + click.echo(output.rstrip('\n')) + else: + output = bgp_util.run_bgp_show_command(command, namespace) + click.echo(output.rstrip('\n')) diff --git a/show/main.py b/show/main.py index d20073fb01..06114eb79f 100755 --- a/show/main.py +++ b/show/main.py @@ -1190,7 +1190,11 @@ def protocol(verbose): ip.add_command(bgp) from .bgp_frr_v6 import bgp ipv6.add_command(bgp) - +elif device_info.is_supervisor(): + from .bgp_frr_v4 import bgp + ip.add_command(bgp) + from .bgp_frr_v6 import bgp + ipv6.add_command(bgp) # # 'link-local-mode' subcommand ("show ipv6 link-local-mode") # diff --git a/tests/bgp_commands_input/bgp_network_test_vector.py b/tests/bgp_commands_input/bgp_network_test_vector.py index da93e8e8e8..73ece16a66 100644 --- a/tests/bgp_commands_input/bgp_network_test_vector.py +++ b/tests/bgp_commands_input/bgp_network_test_vector.py @@ -227,6 +227,9 @@ multi_asic_bgp_network_err = \ """Error: -n/--namespace option required. provide namespace from list ['asic0', 'asic1']""" +multi_asic_bgp_network_asic_unknown_err = \ + """Error: invalid namespace asic_unknown. provide namespace from list ['asic0', 'asic1']""" + bgp_v4_network_asic0 = \ """ BGP table version is 11256, local router ID is 10.1.0.32, vrf id 0 @@ -276,7 +279,7 @@ *=i10.0.0.42/31 10.1.0.2 0 100 0 ? *>i 10.1.0.0 0 100 0 ? *=i10.0.0.44/31 10.1.0.2 0 100 0 ? -*>i 10.1.0.0 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? """ bgp_v4_network_ip_address_asic0 = \ @@ -311,6 +314,111 @@ Last update: Thu Apr 22 02:13:30 2021 """ +bgp_v4_network_all_asic = \ + """ +======== namespace asic0 ======== + +BGP table version is 11256, local router ID is 10.1.0.32, vrf id 0 +Default local pref 100, local AS 65100 +Status codes: s suppressed, d damped, h history, * valid, > best, = multipath, + i internal, r RIB-failure, S Stale, R Removed +Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self +Origin codes: i - IGP, e - EGP, ? - incomplete + + Network Next Hop Metric LocPrf Weight Path +* i0.0.0.0/0 10.1.0.2 100 0 65200 6666 6667 i +* i 10.1.0.0 100 0 65200 6666 6667 i +*= 10.0.0.5 0 65200 6666 6667 i +*> 10.0.0.1 0 65200 6666 6667 i +* i8.0.0.0/32 10.1.0.2 0 100 0 i +* i 10.1.0.0 0 100 0 i +* 0.0.0.0 0 32768 ? +*> 0.0.0.0 0 32768 i +*=i8.0.0.1/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.2/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.3/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*>i8.0.0.4/32 10.1.0.0 0 100 0 i +*>i8.0.0.5/32 10.1.0.2 0 100 0 i +* i10.0.0.0/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +* i10.0.0.4/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +*=i10.0.0.8/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.12/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.32/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.34/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.36/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.38/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.40/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.42/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.44/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? + +======== namespace asic1 ======== + +BGP table version is 11256, local router ID is 10.1.0.32, vrf id 0 +Default local pref 100, local AS 65100 +Status codes: s suppressed, d damped, h history, * valid, > best, = multipath, + i internal, r RIB-failure, S Stale, R Removed +Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self +Origin codes: i - IGP, e - EGP, ? - incomplete + + Network Next Hop Metric LocPrf Weight Path +* i0.0.0.0/0 10.1.0.2 100 0 65200 6666 6667 i +* i 10.1.0.0 100 0 65200 6666 6667 i +*= 10.0.0.5 0 65200 6666 6667 i +*> 10.0.0.1 0 65200 6666 6667 i +* i8.0.0.0/32 10.1.0.2 0 100 0 i +* i 10.1.0.0 0 100 0 i +* 0.0.0.0 0 32768 ? +*> 0.0.0.0 0 32768 i +*=i8.0.0.1/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.2/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.3/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*>i8.0.0.4/32 10.1.0.0 0 100 0 i +*>i8.0.0.5/32 10.1.0.2 0 100 0 i +* i10.0.0.0/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +* i10.0.0.4/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +*=i10.0.0.8/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.12/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.32/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.34/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.36/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.38/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.40/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.42/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.44/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +""" + bgp_v6_network_asic0 = \ """ BGP table version is 12849, local router ID is 10.1.0.32, vrf id 0 @@ -429,6 +537,9 @@ def mock_show_bgp_network_multi_asic(param): return bgp_v6_network_ip_address_asic0 elif param == 'bgp_v6_network_bestpath_asic0': return bgp_v6_network_ip_address_asic0_bestpath + elif param == "bgp_v4_network_all_asic": + # this is mocking the output of a single LC + return bgp_v4_network_asic0 else: return '' @@ -454,6 +565,11 @@ def mock_show_bgp_network_multi_asic(param): 'rc': 1, 'rc_output': bgp_v4_network_longer_prefixes_error }, + 'bgp_v4_network_all_asic_on_single_asic': { + 'args': ['-nall'], + 'rc': 0, + 'rc_output': bgp_v4_network + }, 'bgp_v6_network': { 'args': [], 'rc': 0, @@ -499,6 +615,16 @@ def mock_show_bgp_network_multi_asic(param): 'rc': 0, 'rc_output': bgp_v4_network_bestpath_asic0 }, + 'bgp_v4_network_all_asic': { + 'args': ['-nall'], + 'rc': 0, + 'rc_output': bgp_v4_network_all_asic + }, + 'bgp_v4_network_asic_unknown': { + 'args': ['-nasic_unknown'], + 'rc': 2, + 'rc_err_msg': multi_asic_bgp_network_asic_unknown_err + }, 'bgp_v6_network_multi_asic': { 'args': [], 'rc': 2, diff --git a/tests/conftest.py b/tests/conftest.py index 72b28515bb..5dd31d523a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -386,6 +386,13 @@ def mock_run_show_summ_bgp_command_no_ext_neigh_on_asic1( else: return "" + def mock_multi_asic_list(): + return ["asic0", "asic1"] + + # mock multi-asic list + if request.param == "bgp_v4_network_all_asic": + multi_asic.get_namespace_list = mock_multi_asic_list + _old_run_bgp_command = bgp_util.run_bgp_command if request.param == 'ip_route_for_int_ip': bgp_util.run_bgp_command = mock_run_bgp_command_for_static diff --git a/tests/mock_tables/chassis_state_db.json b/tests/mock_tables/chassis_state_db.json index 5178c49ca0..6af9e19da4 100644 --- a/tests/mock_tables/chassis_state_db.json +++ b/tests/mock_tables/chassis_state_db.json @@ -4,6 +4,9 @@ }, "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD1": { "module_hostname": "sonic-lc2" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD2": { + "module_hostname": "sonic-lc3" } } \ No newline at end of file diff --git a/tests/remote_cli_test.py b/tests/remote_cli_test.py index d9fd672102..9883dfa16b 100644 --- a/tests/remote_cli_test.py +++ b/tests/remote_cli_test.py @@ -12,9 +12,9 @@ import socket import termios -MULTI_LC_REXEC_OUTPUT = '''======== sonic-lc1 output: ======== +MULTI_LC_REXEC_OUTPUT = '''======== LINE-CARD0|sonic-lc1 output: ======== hello world -======== LINE-CARD2 output: ======== +======== LINE-CARD2|sonic-lc3 output: ======== hello world ''' REXEC_HELP = '''Usage: cli [OPTIONS] LINECARD_NAMES... @@ -152,12 +152,12 @@ def test_rexec_all(self): @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) def test_rexec_invalid_lc(self): runner = CliRunner() - LINECARD_NAME = "sonic-lc-3" + LINECARD_NAME = "sonic-lc-100" result = runner.invoke( rexec.cli, [LINECARD_NAME, "-c", "show version"]) print(result.output) assert result.exit_code == 1, result.output - assert "Linecard sonic-lc-3 not found\n" == result.output + assert "Linecard sonic-lc-100 not found\n" == result.output @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) diff --git a/tests/remote_show_test.py b/tests/remote_show_test.py new file mode 100644 index 0000000000..6acbb8185f --- /dev/null +++ b/tests/remote_show_test.py @@ -0,0 +1,57 @@ +import mock +import subprocess +from io import BytesIO +from click.testing import CliRunner + + +def mock_rexec_command(*args): + mock_stdout = BytesIO(b"""hello world""") + print(mock_stdout.getvalue().decode()) + return subprocess.CompletedProcess(args=[], returncode=0, stdout=mock_stdout, stderr=BytesIO()) + + +def mock_rexec_error_cmd(*args): + mock_stderr = BytesIO(b"""Error""") + print(mock_stderr.getvalue().decode()) + return subprocess.CompletedProcess(args=[], returncode=1, stdout=BytesIO(), stderr=mock_stderr) + + +MULTI_LC_REXEC_OUTPUT = '''Since the current device is a chassis supervisor, this command will be executed remotely on all linecards +hello world +''' + +MULTI_LC_ERR_OUTPUT = '''Since the current device is a chassis supervisor, this command will be executed remotely on all linecards +Error +''' + + +class TestRexecBgp(object): + @classmethod + def setup_class(cls): + pass + + @mock.patch("sonic_py_common.device_info.is_supervisor", mock.MagicMock(return_value=True)) + def test_show_ip_bgp_rexec(self, setup_bgp_commands): + show = setup_bgp_commands + runner = CliRunner() + + _old_subprocess_run = subprocess.run + subprocess.run = mock_rexec_command + result = runner.invoke(show.cli.commands["ip"].commands["bgp"], args=["summary"]) + print(result.output) + subprocess.run = _old_subprocess_run + assert result.exit_code == 0 + assert MULTI_LC_REXEC_OUTPUT == result.output + + @mock.patch("sonic_py_common.device_info.is_supervisor", mock.MagicMock(return_value=True)) + def test_show_ip_bgp_error_rexec(self, setup_bgp_commands): + show = setup_bgp_commands + runner = CliRunner() + + _old_subprocess_run = subprocess.run + subprocess.run = mock_rexec_error_cmd + result = runner.invoke(show.cli.commands["ip"].commands["bgp"], args=["summary"]) + print(result.output) + subprocess.run = _old_subprocess_run + assert result.exit_code == 1 + assert MULTI_LC_ERR_OUTPUT == result.output diff --git a/tests/show_bgp_network_test.py b/tests/show_bgp_network_test.py index f610199538..d3f24c8571 100644 --- a/tests/show_bgp_network_test.py +++ b/tests/show_bgp_network_test.py @@ -57,7 +57,8 @@ def setup_class(cls): ('bgp_v4_network_bestpath', 'bgp_v4_network_bestpath'), ('bgp_v6_network_longer_prefixes', 'bgp_v6_network_longer_prefixes'), ('bgp_v4_network', 'bgp_v4_network_longer_prefixes_error'), - ('bgp_v4_network', 'bgp_v6_network_longer_prefixes_error')], + ('bgp_v4_network', 'bgp_v6_network_longer_prefixes_error'), + ('bgp_v4_network', 'bgp_v4_network_all_asic_on_single_asic')], indirect=['setup_single_bgp_instance']) def test_bgp_network(self, setup_bgp_commands, test_vector, setup_single_bgp_instance): @@ -84,7 +85,9 @@ def setup_class(cls): ('bgp_v4_network_bestpath_asic0', 'bgp_v4_network_bestpath_asic0'), ('bgp_v6_network_asic0', 'bgp_v6_network_asic0'), ('bgp_v6_network_ip_address_asic0', 'bgp_v6_network_ip_address_asic0'), - ('bgp_v6_network_bestpath_asic0', 'bgp_v6_network_bestpath_asic0')], + ('bgp_v6_network_bestpath_asic0', 'bgp_v6_network_bestpath_asic0'), + ('bgp_v4_network_all_asic', 'bgp_v4_network_all_asic'), + ('bgp_v4_network', 'bgp_v4_network_asic_unknown')], indirect=['setup_multi_asic_bgp_instance']) def test_bgp_network(self, setup_bgp_commands, test_vector, setup_multi_asic_bgp_instance): From f2b762138c3236807bf1995e2e2130f7b8e5f386 Mon Sep 17 00:00:00 2001 From: mihirpat1 <112018033+mihirpat1@users.noreply.github.com> Date: Thu, 18 Jul 2024 15:58:23 -0700 Subject: [PATCH 15/67] [SfpUtil] sfp eeprom with option dom is not working on Xcvrs with flat memory (#3385) Signed-off-by: Mihir Patel --- sfputil/main.py | 14 +++++++++ tests/sfputil_test.py | 73 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 87 insertions(+) diff --git a/sfputil/main.py b/sfputil/main.py index 2674c51b10..309d5c98dd 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -673,6 +673,20 @@ def eeprom(port, dump_dom, namespace): output += convert_sfp_info_to_output_string(xcvr_info) if dump_dom: + try: + api = platform_chassis.get_sfp(physical_port).get_xcvr_api() + except NotImplementedError: + output += "API is currently not implemented for this platform\n" + click.echo(output) + sys.exit(ERROR_NOT_IMPLEMENTED) + if api is None: + output += "API is none while getting DOM info!\n" + click.echo(output) + sys.exit(ERROR_NOT_IMPLEMENTED) + else: + if api.is_flat_memory(): + output += "DOM values not supported for flat memory module\n" + continue try: xcvr_dom_info = platform_chassis.get_sfp(physical_port).get_transceiver_bulk_status() except NotImplementedError: diff --git a/tests/sfputil_test.py b/tests/sfputil_test.py index 537c329819..5854bb201b 100644 --- a/tests/sfputil_test.py +++ b/tests/sfputil_test.py @@ -20,6 +20,46 @@ ERROR_NOT_IMPLEMENTED = 5 ERROR_INVALID_PORT = 6 +FLAT_MEMORY_MODULE_EEPROM_SFP_INFO_DICT = { + 'type': 'QSFP28 or later', + 'type_abbrv_name': 'QSFP28', + 'manufacturer': 'Mellanox', + 'model': 'MCP1600-C003', + 'vendor_rev': 'A2', + 'serial': 'MT1636VS10561', + 'vendor_oui': '00-02-c9', + 'vendor_date': '2016-07-18', + 'connector': 'No separable connector', + 'encoding': '64B66B', + 'ext_identifier': 'Power Class 1(1.5W max)', + 'ext_rateselect_compliance': 'QSFP+ Rate Select Version 1', + 'cable_type': 'Length Cable Assembly(m)', + 'cable_length': '3', + 'application_advertisement': 'N/A', + 'specification_compliance': "{'10/40G Ethernet Compliance Code': '40GBASE-CR4'}", + 'dom_capability': "{'Tx_power_support': 'no', 'Rx_power_support': 'no',\ + 'Voltage_support': 'no', 'Temp_support': 'no'}", + 'nominal_bit_rate': '255' +} +FLAT_MEMORY_MODULE_EEPROM = """Ethernet16: SFP EEPROM detected + Application Advertisement: N/A + Connector: No separable connector + Encoding: 64B66B + Extended Identifier: Power Class 1(1.5W max) + Extended RateSelect Compliance: QSFP+ Rate Select Version 1 + Identifier: QSFP28 or later + Length Cable Assembly(m): 3 + Nominal Bit Rate(100Mbs): 255 + Specification compliance: + 10/40G Ethernet Compliance Code: 40GBASE-CR4 + Vendor Date Code(YYYY-MM-DD Lot): 2016-07-18 + Vendor Name: Mellanox + Vendor OUI: 00-02-c9 + Vendor PN: MCP1600-C003 + Vendor Rev: A2 + Vendor SN: MT1636VS10561 +""" + class TestSfputil(object): def test_format_dict_value_to_string(self): sorted_key_table = [ @@ -585,6 +625,39 @@ def test_show_eeprom_RJ45(self, mock_chassis): expected_output = "Ethernet16: SFP EEPROM is not applicable for RJ45 port\n\n\n" assert result.output == expected_output + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.logical_port_name_to_physical_port_list', MagicMock(return_value=[1])) + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) + @pytest.mark.parametrize("exception, xcvr_api_none, expected_output", [ + (None, False, '''DOM values not supported for flat memory module\n\n'''), + (NotImplementedError, False, '''API is currently not implemented for this platform\n\n'''), + (None, True, '''API is none while getting DOM info!\n\n''') + ]) + @patch('sfputil.main.platform_chassis') + def test_show_eeprom_dom_conditions(self, mock_chassis, exception, xcvr_api_none, expected_output): + mock_sfp = MagicMock() + mock_sfp.get_presence.return_value = True + mock_sfp.get_transceiver_info.return_value = FLAT_MEMORY_MODULE_EEPROM_SFP_INFO_DICT + mock_chassis.get_sfp.return_value = mock_sfp + + if exception: + mock_chassis.get_sfp().get_xcvr_api.side_effect = exception + elif xcvr_api_none: + mock_chassis.get_sfp().get_xcvr_api.return_value = None + else: + mock_api = MagicMock() + mock_chassis.get_sfp().get_xcvr_api.return_value = mock_api + + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['show'].commands['eeprom'], ["-p", "Ethernet16", "-d"]) + + if exception or xcvr_api_none: + assert result.exit_code == ERROR_NOT_IMPLEMENTED + else: + assert result.exit_code == 0 + assert result.output == FLAT_MEMORY_MODULE_EEPROM + expected_output + @patch('sfputil.main.platform_chassis') @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=0))) def test_show_eeprom_hexdump_invalid_port(self, mock_chassis): From d1ca905e3b0733170d19abeecad1cfbfd0180698 Mon Sep 17 00:00:00 2001 From: ryanzhu706 Date: Fri, 19 Jul 2024 13:41:01 -0700 Subject: [PATCH 16/67] Update DB version to 202411 on master branch. (#3414) * Update DB version to 202411 on master branch. --- scripts/db_migrator.py | 14 +++++++++--- tests/db_migrator_test.py | 45 +++++++++++++++++++++------------------ 2 files changed, 35 insertions(+), 24 deletions(-) diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index afd5e638de..9be3ce325b 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -58,7 +58,7 @@ def __init__(self, namespace, socket=None): none-zero values. build: sequentially increase within a minor version domain. """ - self.CURRENT_VERSION = 'version_202405_01' + self.CURRENT_VERSION = 'version_202411_01' self.TABLE_NAME = 'VERSIONS' self.TABLE_KEY = 'DATABASE' @@ -1228,10 +1228,18 @@ def version_202311_03(self): def version_202405_01(self): """ - Version 202405_01, this version should be the final version for - master branch until 202405 branch is created. + Version 202405_01. """ log.log_info('Handling version_202405_01') + self.set_version('version_202411_01') + return 'version_202411_01' + + def version_202411_01(self): + """ + Version 202411_01, this version should be the final version for + master branch until 202411 branch is created. + """ + log.log_info('Handling version_202411_01') return None def get_version(self): diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index e21539766a..cdf4251bd7 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -74,24 +74,27 @@ class TestVersionComparison(object): def setup_class(cls): cls.version_comp_list = [ # Old format v.s old format - { 'v1' : 'version_1_0_1', 'v2' : 'version_1_0_2', 'result' : False }, - { 'v1' : 'version_1_0_2', 'v2' : 'version_1_0_1', 'result' : True }, - { 'v1' : 'version_1_0_1', 'v2' : 'version_2_0_1', 'result' : False }, - { 'v1' : 'version_2_0_1', 'v2' : 'version_1_0_1', 'result' : True }, + {'v1': 'version_1_0_1', 'v2': 'version_1_0_2', 'result': False}, + {'v1': 'version_1_0_2', 'v2': 'version_1_0_1', 'result': True}, + {'v1': 'version_1_0_1', 'v2': 'version_2_0_1', 'result': False}, + {'v1': 'version_2_0_1', 'v2': 'version_1_0_1', 'result': True}, # New format v.s old format - { 'v1' : 'version_1_0_1', 'v2' : 'version_202311_01', 'result' : False }, - { 'v1' : 'version_202311_01', 'v2' : 'version_1_0_1', 'result' : True }, - { 'v1' : 'version_1_0_1', 'v2' : 'version_master_01', 'result' : False }, - { 'v1' : 'version_master_01', 'v2' : 'version_1_0_1', 'result' : True }, + {'v1': 'version_1_0_1', 'v2': 'version_202311_01', 'result': False}, + {'v1': 'version_202311_01', 'v2': 'version_1_0_1', 'result': True}, + {'v1': 'version_1_0_1', 'v2': 'version_master_01', 'result': False}, + {'v1': 'version_master_01', 'v2': 'version_1_0_1', 'result': True}, # New format v.s new format - { 'v1' : 'version_202311_01', 'v2' : 'version_202311_02', 'result' : False }, - { 'v1' : 'version_202311_02', 'v2' : 'version_202311_01', 'result' : True }, - { 'v1' : 'version_202305_01', 'v2' : 'version_202311_01', 'result' : False }, - { 'v1' : 'version_202311_01', 'v2' : 'version_202305_01', 'result' : True }, - { 'v1' : 'version_202311_01', 'v2' : 'version_master_01', 'result' : False }, - { 'v1' : 'version_master_01', 'v2' : 'version_202311_01', 'result' : True }, - { 'v1' : 'version_master_01', 'v2' : 'version_master_02', 'result' : False }, - { 'v1' : 'version_master_02', 'v2' : 'version_master_01', 'result' : True }, + {'v1': 'version_202311_01', 'v2': 'version_202311_02', 'result': False}, + {'v1': 'version_202311_02', 'v2': 'version_202311_01', 'result': True}, + {'v1': 'version_202305_01', 'v2': 'version_202311_01', 'result': False}, + {'v1': 'version_202311_01', 'v2': 'version_202305_01', 'result': True}, + {'v1': 'version_202405_01', 'v2': 'version_202411_01', 'result': False}, + {'v1': 'version_202411_01', 'v2': 'version_202405_01', 'result': True}, + {'v1': 'version_202411_01', 'v2': 'version_master_01', 'result': False}, + {'v1': 'version_202311_01', 'v2': 'version_master_01', 'result': False}, + {'v1': 'version_master_01', 'v2': 'version_202311_01', 'result': True}, + {'v1': 'version_master_01', 'v2': 'version_master_02', 'result': False}, + {'v1': 'version_master_02', 'v2': 'version_master_01', 'result': True}, ] def test_version_comparison(self): @@ -383,7 +386,7 @@ def test_dns_nameserver_migrator(self): dbmgtr.migrate() dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'dns-nameserver-expected') expected_db = Db() - advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202405_01') + advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202411_01') resulting_keys = dbmgtr.configDB.keys(dbmgtr.configDB.CONFIG_DB, 'DNS_NAMESERVER*') expected_keys = expected_db.cfgdb.keys(expected_db.cfgdb.CONFIG_DB, 'DNS_NAMESERVER*') @@ -895,7 +898,7 @@ def test_init(self, mock_args): @mock.patch('swsscommon.swsscommon.SonicDBConfig.isInit', mock.MagicMock(return_value=False)) @mock.patch('swsscommon.swsscommon.SonicDBConfig.initialize', mock.MagicMock()) def test_init_no_namespace(self, mock_args): - mock_args.return_value=argparse.Namespace(namespace=None, operation='version_202405_01', socket=None) + mock_args.return_value = argparse.Namespace(namespace=None, operation='version_202411_01', socket=None) import db_migrator db_migrator.main() @@ -903,7 +906,7 @@ def test_init_no_namespace(self, mock_args): @mock.patch('swsscommon.swsscommon.SonicDBConfig.isGlobalInit', mock.MagicMock(return_value=False)) @mock.patch('swsscommon.swsscommon.SonicDBConfig.initializeGlobalConfig', mock.MagicMock()) def test_init_namespace(self, mock_args): - mock_args.return_value=argparse.Namespace(namespace="asic0", operation='version_202405_01', socket=None) + mock_args.return_value = argparse.Namespace(namespace="asic0", operation='version_202411_01', socket=None) import db_migrator db_migrator.main() @@ -940,7 +943,7 @@ def test_dns_nameserver_migrator_minigraph(self): dbmgtr.migrate() dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'gnmi-minigraph-expected') expected_db = Db() - advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202405_01') + advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202411_01') resulting_table = dbmgtr.configDB.get_table("GNMI") expected_table = expected_db.cfgdb.get_table("GNMI") @@ -956,7 +959,7 @@ def test_dns_nameserver_migrator_configdb(self): dbmgtr.migrate() dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'gnmi-configdb-expected') expected_db = Db() - advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202405_01') + advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202411_01') resulting_table = dbmgtr.configDB.get_table("GNMI") expected_table = expected_db.cfgdb.get_table("GNMI") From f19662277acba963e34cf8ca45c0fd7ab234be65 Mon Sep 17 00:00:00 2001 From: ganglv <88995770+ganglyu@users.noreply.github.com> Date: Tue, 23 Jul 2024 15:43:24 +0800 Subject: [PATCH 17/67] fix show techsupport date issue (#3437) What I did Show techsupport is designed to collect logs and core files since given date. I find that some core files are missing when given date is relative, for example "5 minutes ago". Microsoft ADO: 28737486 How I did it Create the reference file at the start of the script, and don't update it in find_files. How to verify it Run end to end test: show_techsupport/test_auto_techsupport.py --- scripts/generate_dump | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/generate_dump b/scripts/generate_dump index b163366bb0..3d0ef3430d 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -1120,7 +1120,6 @@ save_file() { find_files() { trap 'handle_error $? $LINENO' ERR local -r directory=$1 - $TOUCH --date="${SINCE_DATE}" "${REFERENCE_FILE}" local -r find_command="find -L $directory -type f -newer ${REFERENCE_FILE}" echo $($find_command) @@ -1914,6 +1913,8 @@ main() { ${CMD_PREFIX}renice +5 -p $$ >> /dev/null ${CMD_PREFIX}ionice -c 2 -n 5 -p $$ >> /dev/null + # Created file as a reference to compare modification time + $TOUCH --date="${SINCE_DATE}" "${REFERENCE_FILE}" $MKDIR $V -p $TARDIR # Start with this script so its obvious what code is responsible From 772ee793d067be40eeb8779d20b645aa7f97ea30 Mon Sep 17 00:00:00 2001 From: Rida Hanif Date: Tue, 23 Jul 2024 14:42:13 -0700 Subject: [PATCH 18/67] IP Assignment Issue (#3408) #### What I did Added Check for IP Assignment on Port when a Vlan is configured. This PR is created in response to [Issue](https://github.com/sonic-net/sonic-buildimage/issues/19505) #### How I did it Modified config/main.py to add check for IP Assignment when Port has vlan membership #### How to verify it After this, ip cannot be assigned on port which is configured to a VLAN. --- config/main.py | 8 ++++++++ tests/vlan_test.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/config/main.py b/config/main.py index 709c96402a..46bfc332b0 100644 --- a/config/main.py +++ b/config/main.py @@ -4853,6 +4853,14 @@ def add_interface_ip(ctx, interface_name, ip_addr, gw, secondary): interface_name = interface_alias_to_name(config_db, interface_name) if interface_name is None: ctx.fail("'interface_name' is None!") + # Add a validation to check this interface is not a member in vlan before + # changing it to a router port mode + vlan_member_table = config_db.get_table('VLAN_MEMBER') + + if (interface_is_in_vlan(vlan_member_table, interface_name)): + click.echo("Interface {} is a member of vlan\nAborting!".format(interface_name)) + return + portchannel_member_table = config_db.get_table('PORTCHANNEL_MEMBER') diff --git a/tests/vlan_test.py b/tests/vlan_test.py index 2d3c1dcf1b..fc3569b87d 100644 --- a/tests/vlan_test.py +++ b/tests/vlan_test.py @@ -1426,7 +1426,7 @@ def test_config_set_router_port_on_member_interface(self): ["Ethernet4", "10.10.10.1/24"], obj=obj) print(result.exit_code, result.output) assert result.exit_code == 0 - assert 'Interface Ethernet4 is in trunk mode and needs to be in routed mode!' in result.output + assert 'Interface Ethernet4 is a member of vlan\nAborting!\n' in result.output def test_config_vlan_add_member_of_portchannel(self): runner = CliRunner() From a81321595b1f2cf34b26255fb6953f304ba2df14 Mon Sep 17 00:00:00 2001 From: bktsim <144830673+bktsim-arista@users.noreply.github.com> Date: Wed, 24 Jul 2024 14:21:47 -0700 Subject: [PATCH 19/67] Fix multi-asic behaviour for dropstat (#3059) * Fixes dropstat multi-asic behaviour by using multi-asic helpers and ensuring that dropstat iterates through correct namespaces when 'show' command is run. Co-authored-by: rdjeric Co-authored-by: Kenneth Cheung --- scripts/dropstat | 118 ++++++++++++++------------ show/dropcounters.py | 7 +- tests/mock_tables/asic1/asic_db.json | 6 ++ tests/multi_asic_dropstat_test.py | 122 +++++++++++++++++++++++++++ tests/single_asic_dropstat_test.py | 72 ++++++++++++++++ 5 files changed, 272 insertions(+), 53 deletions(-) create mode 100644 tests/mock_tables/asic1/asic_db.json create mode 100644 tests/multi_asic_dropstat_test.py create mode 100644 tests/single_asic_dropstat_test.py diff --git a/scripts/dropstat b/scripts/dropstat index 485ac65637..219ad2b494 100755 --- a/scripts/dropstat +++ b/scripts/dropstat @@ -11,8 +11,8 @@ # - Refactor calls to COUNTERS_DB to reduce redundancy # - Cache DB queries to reduce # of expensive queries +import click import json -import argparse import os import socket import sys @@ -20,6 +20,9 @@ import sys from collections import OrderedDict from natsort import natsorted from tabulate import tabulate +from sonic_py_common import multi_asic +from utilities_common.general import load_db_config +import utilities_common.multi_asic as multi_asic_util # mock the redis for unit test purposes # try: @@ -28,9 +31,14 @@ try: test_path = os.path.join(modules_path, "tests") sys.path.insert(0, modules_path) sys.path.insert(0, test_path) - import mock_tables.dbconnector + from tests.mock_tables import dbconnector socket.gethostname = lambda: 'sonic_drops_test' os.getuid = lambda: 27 + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import tests.mock_tables.mock_multi_asic + dbconnector.load_namespace_config() + else: + dbconnector.load_database_config() except KeyError: pass @@ -90,30 +98,32 @@ def get_dropstat_dir(): class DropStat(object): - def __init__(self): - self.config_db = ConfigDBConnector() - self.config_db.connect() - - self.db = SonicV2Connector(use_unix_socket_path=False) - self.db.connect(self.db.COUNTERS_DB) - self.db.connect(self.db.ASIC_DB) - self.db.connect(self.db.APPL_DB) - self.db.connect(self.db.CONFIG_DB) + def __init__(self, namespace): + self.namespaces = multi_asic.get_namespace_list(namespace) + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.db = None + self.config_db = None + self.cached_namespace = None dropstat_dir = get_dropstat_dir() self.port_drop_stats_file = os.path.join(dropstat_dir, 'port-stats') - self.switch_drop_stats_file = os.path.join(dropstat_dir + 'switch-stats') - self.switch_std_drop_stats_file = os.path.join(dropstat_dir, 'switch-std-drop-stats') + self.switch_drop_stats_file = os.path.join(dropstat_dir, 'switch-stats') + self.switch_std_drop_stats_file = os.path.join(dropstat_dir, 'switch-std-drop-stats') self.stat_lookup = {} self.reverse_stat_lookup = {} + @multi_asic_util.run_on_multi_asic def show_drop_counts(self, group, counter_type): """ Prints out the current drop counts at the port-level and switch-level. """ + if os.environ.get("UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE", "0") == "1": + # Temp cache needs to be cleard to avoid interference from previous test cases + UserCache().remove() + self.show_switch_std_drop_counts(group, counter_type) self.show_port_drop_counts(group, counter_type) print('') @@ -124,16 +134,36 @@ class DropStat(object): Clears the current drop counts. """ - try: - json.dump(self.get_counts_table(self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP), COUNTERS_PORT_NAME_MAP), - open(self.port_drop_stats_file, 'w+')) + counters_port_drop = {} + counters_switch_drop = {} + counters_switch_std_drop = {} + for ns in self.namespaces: + self.config_db = multi_asic.connect_config_db_for_ns(ns) + self.db = multi_asic.connect_to_all_dbs_for_ns(ns) + + counts = self.get_counts_table(self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP), COUNTERS_PORT_NAME_MAP) + if counts: + counters_port_drop.update(counts) + counters = self.gather_counters([], DEBUG_COUNTER_SWITCH_STAT_MAP) if counters: - json.dump(self.get_counts(counters, self.get_switch_id()), open(self.switch_drop_stats_file, 'w+')) + counts = self.get_counts(counters, self.get_switch_id()) + counters_switch_drop.update(counts) counters = self.get_configured_counters(DEBUG_COUNTER_SWITCH_STAT_MAP, True) if counters: - json.dump(self.get_counts(counters, self.get_switch_id()), open(self.switch_std_drop_stats_file, 'w+')) + counts = self.get_counts(counters, self.get_switch_id()) + counters_switch_std_drop.update(counts) + + try: + if counters_port_drop: + json.dump(counters_port_drop, open(self.port_drop_stats_file, 'w+')) + + if counters_switch_drop: + json.dump(counters_switch_drop, open(self.switch_drop_stats_file, 'w+')) + + if counters_switch_std_drop: + json.dump(counters_switch_std_drop, open(self.switch_std_drop_stats_file, 'w+')) except IOError as e: print(e) sys.exit(e.errno) @@ -321,12 +351,13 @@ class DropStat(object): the given object type. """ + if self.cached_namespace != self.multi_asic.current_namespace: + self.stat_lookup = {} + self.cached_namespace = self.multi_asic.current_namespace + if not self.stat_lookup.get(object_stat_map, None): stats_map = self.db.get_all(self.db.COUNTERS_DB, object_stat_map) - if stats_map: - self.stat_lookup[object_stat_map] = stats_map - else: - self.stat_lookup[object_stat_map] = None + self.stat_lookup[object_stat_map] = stats_map if stats_map else None return self.stat_lookup[object_stat_map] @@ -457,39 +488,22 @@ class DropStat(object): else: return PORT_STATE_NA - -def main(): - parser = argparse.ArgumentParser(description='Display drop counters', - formatter_class=argparse.RawTextHelpFormatter, - epilog=""" -Examples: - dropstat -""") - - # Version - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - - # Actions - parser.add_argument('-c', '--command', type=str, help='Desired action to perform') - - # Variables - parser.add_argument('-g', '--group', type=str, help='The group of the target drop counter', default=None) - parser.add_argument('-t', '--type', type=str, help='The type of the target drop counter', default=None) - - args = parser.parse_args() - - command = args.command - - group = args.group - counter_type = args.type - - dcstat = DropStat() +@click.command(help='Display drop counters') +@click.option('-c', '--command', required=True, help='Desired action to perform', + type=click.Choice(['clear', 'show'], case_sensitive=False)) +@click.option('-g', '--group', default=None, help='The group of the target drop counter') +@click.option('-t', '--type', 'counter_type', default=None, help='The type of the target drop counter') +@click.option('-n', '--namespace', help='Namespace name', default=None, + type=click.Choice(multi_asic.get_namespace_list())) +@click.version_option(version='1.0') +def main(command, group, counter_type, namespace): + load_db_config() + + dcstat = DropStat(namespace) if command == 'clear': dcstat.clear_drop_counts() - elif command == 'show': - dcstat.show_drop_counts(group, counter_type) else: - print("Command not recognized") + dcstat.show_drop_counts(group, counter_type) if __name__ == '__main__': diff --git a/show/dropcounters.py b/show/dropcounters.py index 30779b9364..9bb988fc5b 100644 --- a/show/dropcounters.py +++ b/show/dropcounters.py @@ -1,5 +1,6 @@ import click import utilities_common.cli as clicommon +import utilities_common.multi_asic as multi_asic_util # @@ -41,7 +42,8 @@ def capabilities(verbose): @click.option('-g', '--group', required=False) @click.option('-t', '--counter_type', required=False) @click.option('--verbose', is_flag=True, help="Enable verbose output") -def counts(group, counter_type, verbose): +@multi_asic_util.multi_asic_click_option_namespace +def counts(group, counter_type, verbose, namespace): """Show drop counts""" cmd = ['dropstat', '-c', 'show'] @@ -51,4 +53,7 @@ def counts(group, counter_type, verbose): if counter_type: cmd += ['-t', str(counter_type)] + if namespace: + cmd += ['-n', str(namespace)] + clicommon.run_command(cmd, display_cmd=verbose) diff --git a/tests/mock_tables/asic1/asic_db.json b/tests/mock_tables/asic1/asic_db.json new file mode 100644 index 0000000000..1a769b82b5 --- /dev/null +++ b/tests/mock_tables/asic1/asic_db.json @@ -0,0 +1,6 @@ +{ + "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH:oid:0x21000000000000": { + "SAI_SWITCH_ATTR_INIT_SWITCH": "true", + "SAI_SWITCH_ATTR_SRC_MAC_ADDRESS": "DE:AD:BE:EF:CA:FE" + } +} diff --git a/tests/multi_asic_dropstat_test.py b/tests/multi_asic_dropstat_test.py new file mode 100644 index 0000000000..8b9dd72826 --- /dev/null +++ b/tests/multi_asic_dropstat_test.py @@ -0,0 +1,122 @@ +import os +import sys +from .utils import get_result_and_return_code + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +dropstat_masic_result_asic0 = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +------------ ------- -------- ---------- -------- ---------- --------- --------- + Ethernet0 U 10 100 0 0 80 20 + Ethernet4 U 0 1000 0 0 800 100 +Ethernet-BP0 U 0 1000 0 0 800 100 +Ethernet-BP4 U 0 1000 0 0 800 100 + + DEVICE DEBUG_1 +---------------- --------- +sonic_drops_test 1000 +""" + +dropstat_masic_result_asic1 = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +-------------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet-BP256 U 10 100 0 0 80 20 +Ethernet-BP260 U 0 1000 0 0 800 100 + + DEVICE DEBUG_1 +---------------- --------- +sonic_drops_test 1000 +""" + +dropstat_masic_result_clear_all = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +------------ ------- -------- ---------- -------- ---------- --------- --------- + Ethernet0 U 0 0 0 0 0 0 + Ethernet4 U 0 0 0 0 0 0 +Ethernet-BP0 U 0 0 0 0 0 0 +Ethernet-BP4 U 0 0 0 0 0 0 + + DEVICE DEBUG_1 +---------------- --------- +sonic_drops_test 0 + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +-------------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet-BP256 U 0 0 0 0 0 0 +Ethernet-BP260 U 0 0 0 0 0 0 + + DEVICE DEBUG_1 +---------------- --------- +sonic_drops_test 0 +""" + + +class TestMultiAsicDropstat(object): + @classmethod + def setup_class(cls): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "1" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + print("SETUP") + + def test_show_dropcount_masic_asic0(self): + os.environ["UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE"] = "1" + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show', '-n', 'asic0' + ]) + os.environ.pop("UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE") + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_masic_result_asic0 and return_code == 0 + + def test_show_dropcount_masic_all_and_clear(self): + os.environ["UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE"] = "1" + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show' + ]) + os.environ.pop("UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE") + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_masic_result_asic0 + dropstat_masic_result_asic1 + assert return_code == 0 + + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'clear' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == 'Cleared drop counters\n' and return_code == 0 + + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_masic_result_clear_all and return_code == 0 + + def test_show_dropcount_masic_invalid_ns(self): + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show', '-n', 'asic5' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 2 + assert "invalid choice: asic5" in result + + def test_show_dropcount_version(self): + return_code, result = get_result_and_return_code([ + 'dropstat', '--version' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + + @classmethod + def teardown_class(cls): + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ.pop("UTILITIES_UNIT_TESTING") + os.environ.pop("UTILITIES_UNIT_TESTING_TOPOLOGY") + print("TEARDOWN") diff --git a/tests/single_asic_dropstat_test.py b/tests/single_asic_dropstat_test.py new file mode 100644 index 0000000000..c521bcfa60 --- /dev/null +++ b/tests/single_asic_dropstat_test.py @@ -0,0 +1,72 @@ +import os +import sys +from .utils import get_result_and_return_code + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +dropstat_result = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +--------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet0 D 10 100 0 0 80 20 +Ethernet4 N/A 0 1000 0 0 800 100 +Ethernet8 N/A 100 10 0 0 10 0 + + DEVICE SWITCH_DROPS lowercase_counter +---------------- -------------- ------------------- +sonic_drops_test 1000 0 +""" + +dropstat_result_clear_all = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +--------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet0 D 0 0 0 0 0 0 +Ethernet4 N/A 0 0 0 0 0 0 +Ethernet8 N/A 0 0 0 0 0 0 + + DEVICE SWITCH_DROPS lowercase_counter +---------------- -------------- ------------------- +sonic_drops_test 0 0 +""" + + +class TestMultiAsicDropstat(object): + @classmethod + def setup_class(cls): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "1" + print("SETUP") + + def test_show_dropcount_and_clear(self): + os.environ["UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE"] = "1" + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show' + ]) + os.environ.pop("UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE") + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_result + assert return_code == 0 + + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'clear' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == 'Cleared drop counters\n' and return_code == 0 + + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_result_clear_all and return_code == 0 + + @classmethod + def teardown_class(cls): + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ.pop("UTILITIES_UNIT_TESTING") + print("TEARDOWN") From 9b24421aacaaa496f65fbab39e918283886205e5 Mon Sep 17 00:00:00 2001 From: Anoop Kamath <115578705+AnoopKamath@users.noreply.github.com> Date: Thu, 25 Jul 2024 18:01:49 -0700 Subject: [PATCH 20/67] Add sfputil power enable/disable command (#3418) --- sfputil/main.py | 56 +++++++++++++++++++++++++++++++++++++++++++ tests/sfputil_test.py | 45 ++++++++++++++++++++++++++++++++++ 2 files changed, 101 insertions(+) diff --git a/sfputil/main.py b/sfputil/main.py index 309d5c98dd..2c8f85d016 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -1320,6 +1320,62 @@ def reset(port_name): i += 1 + +# 'power' subgroup +@cli.group() +def power(): + """Enable or disable power of SFP transceiver""" + pass + + +# Helper method for setting low-power mode +def set_power(port_name, enable): + physical_port = logical_port_to_physical_port_index(port_name) + sfp = platform_chassis.get_sfp(physical_port) + + if is_port_type_rj45(port_name): + click.echo("Power disable/enable is not available for RJ45 port {}.".format(port_name)) + sys.exit(EXIT_FAIL) + + try: + presence = sfp.get_presence() + except NotImplementedError: + click.echo("sfp get_presence() NOT implemented!") + sys.exit(EXIT_FAIL) + + if not presence: + click.echo("{}: SFP EEPROM not detected\n".format(port_name)) + sys.exit(EXIT_FAIL) + + try: + result = platform_chassis.get_sfp(physical_port).set_power(enable) + except (NotImplementedError, AttributeError): + click.echo("This functionality is currently not implemented for this platform") + sys.exit(ERROR_NOT_IMPLEMENTED) + + if result: + click.echo("OK") + else: + click.echo("Failed") + sys.exit(EXIT_FAIL) + + +# 'disable' subcommand +@power.command() +@click.argument('port_name', metavar='') +def disable(port_name): + """Disable power of SFP transceiver""" + set_power(port_name, False) + + +# 'enable' subcommand +@power.command() +@click.argument('port_name', metavar='') +def enable(port_name): + """Enable power of SFP transceiver""" + set_power(port_name, True) + + def update_firmware_info_to_state_db(port_name): physical_port = logical_port_to_physical_port_index(port_name) diff --git a/tests/sfputil_test.py b/tests/sfputil_test.py index 5854bb201b..0e58daa18e 100644 --- a/tests/sfputil_test.py +++ b/tests/sfputil_test.py @@ -610,6 +610,51 @@ def test_show_lpmode(self, mock_chassis): """ assert result.output == expected_output + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=True)) + def test_power_RJ45(self, mock_chassis): + mock_sfp = MagicMock() + mock_api = MagicMock() + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + mock_sfp.get_presence.return_value = True + mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.output == 'Power disable/enable is not available for RJ45 port Ethernet0.\n' + assert result.exit_code == EXIT_FAIL + + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) + def test_power(self, mock_chassis): + mock_sfp = MagicMock() + mock_api = MagicMock() + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) + mock_sfp.get_presence.return_value = True + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.exit_code == 0 + + mock_sfp.get_presence.return_value = False + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.output == 'Ethernet0: SFP EEPROM not detected\n\n' + + mock_sfp.get_presence.return_value = True + mock_sfp.set_power = MagicMock(side_effect=NotImplementedError) + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.output == 'This functionality is currently not implemented for this platform\n' + assert result.exit_code == ERROR_NOT_IMPLEMENTED + + mock_sfp.set_power = MagicMock(return_value=False) + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.output == 'Failed\n' + + @patch('sfputil.main.platform_chassis') @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) @patch('sfputil.main.logical_port_name_to_physical_port_list', MagicMock(return_value=[1])) From 84cb00a4b2d7e8fb2bcab259367836fa11a17d0a Mon Sep 17 00:00:00 2001 From: Changrong Wu Date: Fri, 26 Jul 2024 08:51:51 -0700 Subject: [PATCH 21/67] Change the default behavior of show ip bgp network (#3447) * show/bgp_frr_v4.py: change the default behavior of "show ip bgp network" - after change, show ip bgp network will have "all" as the default value of namespace option - after change, ip-address/ip-prefix is a required argument when executing show ip bgp network on a chassis supervisor * tests/remote_show_test.py update unit tests to comply with the new behaviors * tests/show_bgp_network_test.py: update a test vector to make it comply with the new default behavior * tests/bgp_commands_input/bgp_network_test_vector.py: update a test vector to comply with the new default behavior --- show/bgp_frr_v4.py | 30 +++++++++++-------- .../bgp_network_test_vector.py | 6 ++-- tests/remote_show_test.py | 16 ++++++++++ tests/show_bgp_network_test.py | 2 +- 4 files changed, 37 insertions(+), 17 deletions(-) diff --git a/show/bgp_frr_v4.py b/show/bgp_frr_v4.py index 10e5d982cd..ddcd688581 100644 --- a/show/bgp_frr_v4.py +++ b/show/bgp_frr_v4.py @@ -20,12 +20,13 @@ def bgp(): """Show IPv4 BGP (Border Gateway Protocol) information""" if device_info.is_supervisor(): - # if the device is a chassis, the command need to be executed by rexec - click.echo("Since the current device is a chassis supervisor, " + - "this command will be executed remotely on all linecards") - proc = subprocess.run(["rexec", "all"] + ["-c", " ".join(sys.argv)]) - sys.exit(proc.returncode) - pass + subcommand = sys.argv[3] + if subcommand not in "network": + # the command will be executed directly by rexec if it is not "show ip bgp network" + click.echo("Since the current device is a chassis supervisor, " + + "this command will be executed remotely on all linecards") + proc = subprocess.run(["rexec", "all"] + ["-c", " ".join(sys.argv)]) + sys.exit(proc.returncode) # 'summary' subcommand ("show ip bgp summary") @@ -92,7 +93,7 @@ def neighbors(ipaddress, info_type, namespace): @bgp.command() @click.argument('ipaddress', metavar='[|]', - required=False) + required=True if device_info.is_supervisor() else False) @click.argument('info_type', metavar='[bestpath|json|longer-prefixes|multipath]', type=click.Choice( @@ -103,19 +104,22 @@ def neighbors(ipaddress, info_type, namespace): 'namespace', type=str, show_default=True, - required=True if multi_asic.is_multi_asic is True else False, + required=False, help='Namespace name or all', - default=multi_asic.DEFAULT_NAMESPACE, + default="all", callback=multi_asic_util.multi_asic_namespace_validation_callback) def network(ipaddress, info_type, namespace): """Show IP (IPv4) BGP network""" + if device_info.is_supervisor(): + # the command will be executed by rexec + click.echo("Since the current device is a chassis supervisor, " + + "this command will be executed remotely on all linecards") + proc = subprocess.run(["rexec", "all"] + ["-c", " ".join(sys.argv)]) + sys.exit(proc.returncode) + namespace = namespace.strip() if multi_asic.is_multi_asic(): - if namespace == multi_asic.DEFAULT_NAMESPACE: - ctx = click.get_current_context() - ctx.fail('-n/--namespace option required. provide namespace from list {}' - .format(multi_asic.get_namespace_list())) if namespace != "all" and namespace not in multi_asic.get_namespace_list(): ctx = click.get_current_context() ctx.fail('invalid namespace {}. provide namespace from list {}' diff --git a/tests/bgp_commands_input/bgp_network_test_vector.py b/tests/bgp_commands_input/bgp_network_test_vector.py index 73ece16a66..f9edd66fa2 100644 --- a/tests/bgp_commands_input/bgp_network_test_vector.py +++ b/tests/bgp_commands_input/bgp_network_test_vector.py @@ -595,10 +595,10 @@ def mock_show_bgp_network_multi_asic(param): 'rc': 0, 'rc_output': bgp_v6_network_longer_prefixes }, - 'bgp_v4_network_multi_asic': { + 'bgp_v4_network_default_multi_asic': { 'args': [], - 'rc': 2, - 'rc_err_msg': multi_asic_bgp_network_err + 'rc': 0, + 'rc_output': bgp_v4_network_all_asic }, 'bgp_v4_network_asic0': { 'args': ['-nasic0'], diff --git a/tests/remote_show_test.py b/tests/remote_show_test.py index 6acbb8185f..e1be3d0302 100644 --- a/tests/remote_show_test.py +++ b/tests/remote_show_test.py @@ -31,6 +31,7 @@ def setup_class(cls): pass @mock.patch("sonic_py_common.device_info.is_supervisor", mock.MagicMock(return_value=True)) + @mock.patch("sys.argv", ["show", "ip", "bgp", "summary"]) def test_show_ip_bgp_rexec(self, setup_bgp_commands): show = setup_bgp_commands runner = CliRunner() @@ -44,6 +45,7 @@ def test_show_ip_bgp_rexec(self, setup_bgp_commands): assert MULTI_LC_REXEC_OUTPUT == result.output @mock.patch("sonic_py_common.device_info.is_supervisor", mock.MagicMock(return_value=True)) + @mock.patch("sys.argv", ["show", "ip", "bgp", "summary"]) def test_show_ip_bgp_error_rexec(self, setup_bgp_commands): show = setup_bgp_commands runner = CliRunner() @@ -55,3 +57,17 @@ def test_show_ip_bgp_error_rexec(self, setup_bgp_commands): subprocess.run = _old_subprocess_run assert result.exit_code == 1 assert MULTI_LC_ERR_OUTPUT == result.output + + @mock.patch("sonic_py_common.device_info.is_supervisor", mock.MagicMock(return_value=True)) + @mock.patch("sys.argv", ["show", "ip", "bgp", "network", "10.0.0.0/24"]) + def test_show_ip_bgp_network_rexec(self, setup_bgp_commands): + show = setup_bgp_commands + runner = CliRunner() + + _old_subprocess_run = subprocess.run + subprocess.run = mock_rexec_command + result = runner.invoke(show.cli.commands["ip"].commands["bgp"], args=["network", "10.0.0.0/24"]) + print(result.output) + subprocess.run = _old_subprocess_run + assert result.exit_code == 0 + assert MULTI_LC_REXEC_OUTPUT == result.output diff --git a/tests/show_bgp_network_test.py b/tests/show_bgp_network_test.py index d3f24c8571..bfc23d8912 100644 --- a/tests/show_bgp_network_test.py +++ b/tests/show_bgp_network_test.py @@ -78,7 +78,7 @@ def setup_class(cls): @pytest.mark.parametrize( 'setup_multi_asic_bgp_instance, test_vector', - [('bgp_v4_network', 'bgp_v4_network_multi_asic'), + [('bgp_v4_network_all_asic', 'bgp_v4_network_default_multi_asic'), ('bgp_v6_network', 'bgp_v6_network_multi_asic'), ('bgp_v4_network_asic0', 'bgp_v4_network_asic0'), ('bgp_v4_network_ip_address_asic0', 'bgp_v4_network_ip_address_asic0'), From ff2c73f85ca24dea2634dc5ec83956f27ab9e32b Mon Sep 17 00:00:00 2001 From: Xincun Li <147451452+xincunli-sonic@users.noreply.github.com> Date: Tue, 30 Jul 2024 16:02:59 -0700 Subject: [PATCH 22/67] Add namespace check for multiasic (#3458) * Add namespace check for multiasic * Fix format --- generic_config_updater/generic_updater.py | 3 + .../multiasic_change_applier_test.py | 137 +++++++++++++++--- 2 files changed, 121 insertions(+), 19 deletions(-) diff --git a/generic_config_updater/generic_updater.py b/generic_config_updater/generic_updater.py index b6d65e2ce6..8ce27455bb 100644 --- a/generic_config_updater/generic_updater.py +++ b/generic_config_updater/generic_updater.py @@ -31,6 +31,9 @@ def extract_scope(path): scope = HOST_NAMESPACE remainder = "/" + "/".join(parts[1:]) else: + if multi_asic.is_multi_asic(): + raise GenericConfigUpdaterError(f"Multi ASIC must have namespace prefix in path: '{path}'.") + scope = "" remainder = path return scope, remainder diff --git a/tests/generic_config_updater/multiasic_change_applier_test.py b/tests/generic_config_updater/multiasic_change_applier_test.py index d7f734d2ec..0102cfff00 100644 --- a/tests/generic_config_updater/multiasic_change_applier_test.py +++ b/tests/generic_config_updater/multiasic_change_applier_test.py @@ -9,25 +9,124 @@ class TestMultiAsicChangeApplier(unittest.TestCase): - def test_extract_scope(self): + @patch('sonic_py_common.multi_asic.is_multi_asic') + def test_extract_scope_multiasic(self, mock_is_multi_asic): + mock_is_multi_asic.return_value = True test_paths_expectedresults = { - "/asic0/PORTCHANNEL/PortChannel102/admin_status": (True, "asic0", "/PORTCHANNEL/PortChannel102/admin_status"), - "/asic01/PORTCHANNEL/PortChannel102/admin_status": (True, "asic01", "/PORTCHANNEL/PortChannel102/admin_status"), - "/asic123456789/PORTCHANNEL/PortChannel102/admin_status": (True, "asic123456789", "/PORTCHANNEL/PortChannel102/admin_status"), - "/asic0123456789/PORTCHANNEL/PortChannel102/admin_status": (True, "asic0123456789", "/PORTCHANNEL/PortChannel102/admin_status"), - "/localhost/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (True, "localhost", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled"), - "/asic1/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (True, "asic1", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled"), - "/sometable/data": (True, "", "/sometable/data"), - "": (False, "", ""), - "localhostabc/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (False, "", ""), + "/asic0/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic0", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic01/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic01", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic123456789/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic123456789", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic0123456789/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic0123456789", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/localhost/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + True, "localhost", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" + ), + "/asic1/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + True, "asic1", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" + ), + "/sometable/data": ( + False, "", "/sometable/data" + ), + "": ( + False, "", "" + ), + "localhostabc/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + False, "", "" + ), + "/asic77": ( + False, "", "" + ), + "/Asic0/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/ASIC1/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/Localhost/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/LocalHost/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asci1/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asicx/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asic-12/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + } + + for test_path, (result, expectedscope, expectedremainder) in test_paths_expectedresults.items(): + try: + scope, remainder = extract_scope(test_path) + assert(scope == expectedscope) + assert(remainder == expectedremainder) + except Exception: + assert(not result) + + @patch('sonic_py_common.multi_asic.is_multi_asic') + def test_extract_scope_singleasic(self, mock_is_multi_asic): + mock_is_multi_asic.return_value = False + test_paths_expectedresults = { + "/asic0/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic0", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic01/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic01", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic123456789/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic123456789", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic0123456789/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic0123456789", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/localhost/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + True, "localhost", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" + ), + "/asic1/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + True, "asic1", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" + ), + "/sometable/data": ( + True, "", "/sometable/data" + ), + "": ( + False, "", "" + ), + "localhostabc/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + False, "", "" + ), "/asic77": (False, "", ""), - "/Asic0/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/ASIC1/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/Localhost/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/LocalHost/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/asci1/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/asicx/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/asic-12/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), + "/Asic0/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/ASIC1/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/Localhost/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/LocalHost/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asci1/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asicx/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asic-12/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), } for test_path, (result, expectedscope, expectedremainder) in test_paths_expectedresults.items(): @@ -35,8 +134,8 @@ def test_extract_scope(self): scope, remainder = extract_scope(test_path) assert(scope == expectedscope) assert(remainder == expectedremainder) - except Exception as e: - assert(result == False) + except Exception: + assert(not result) @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) From f50587a1ff65bb489231bfe45cec805fb32dbf00 Mon Sep 17 00:00:00 2001 From: Changrong Wu Date: Fri, 2 Aug 2024 15:35:59 -0700 Subject: [PATCH 23/67] Update README.md (#3406) * Update README.md The new location of the sonic-utilities target wheel package is under bookworm instead of bullseye. Update the README to make it consistent with the current build behavior. * README.md: update build instrucrions --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 91146bc9d0..d6f9a5e25a 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ A convenient alternative is to let the SONiC build system configure a build envi 2. Build the sonic-utilities Python wheel package inside the Bullseye slave container, and tell the build system to keep the container alive when finished ``` - make NOSTRETCH=1 NOBUSTER=1 KEEP_SLAVE_ON=yes target/python-wheels/bullseye/sonic_utilities-1.2-py3-none-any.whl + make -f Makefile.work BLDENV=bookworm KEEP_SLAVE_ON=yes target/python-wheels/bookworm/sonic_utilities-1.2-py3-none-any.whl ``` 3. When the build finishes, your prompt will change to indicate you are inside the slave container. Change into the `src/sonic-utilities/` directory From 018eb737eef61fb1f1134d1c80d508a756973e05 Mon Sep 17 00:00:00 2001 From: SuvarnaMeenakshi <50386592+SuvarnaMeenakshi@users.noreply.github.com> Date: Mon, 5 Aug 2024 09:26:46 -0700 Subject: [PATCH 24/67] Fix to use IPv6 linklocal address as snmp agent address (#3215) What I did If link local IPv6 address is added as SNMP agent address, it will fail. This PR requires changes in snmpd.conf.j2 template here sonic-net/sonic-buildimage#18350 How I did it Append scope id to ipv6 link local IP address. How to verify it Able to configure link local ipv6 address as snmp agent address sudo config snmpagentaddress add fe80::a%eth0 --- config/main.py | 7 +++++-- tests/config_snmp_test.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/config/main.py b/config/main.py index 46bfc332b0..f48c446adf 100644 --- a/config/main.py +++ b/config/main.py @@ -3498,7 +3498,10 @@ def add_snmp_agent_address(ctx, agentip, port, vrf): """Add the SNMP agent listening IP:Port%Vrf configuration""" #Construct SNMP_AGENT_ADDRESS_CONFIG table key in the format ip|| - if not clicommon.is_ipaddress(agentip): + # Link local IP address should be provided along with zone id + # % for ex fe80::1%eth0 + agent_ip_addr = agentip.split('%')[0] + if not clicommon.is_ipaddress(agent_ip_addr): click.echo("Invalid IP address") return False config_db = ctx.obj['db'] @@ -3508,7 +3511,7 @@ def add_snmp_agent_address(ctx, agentip, port, vrf): click.echo("ManagementVRF is Enabled. Provide vrf.") return False found = 0 - ip = ipaddress.ip_address(agentip) + ip = ipaddress.ip_address(agent_ip_addr) for intf in netifaces.interfaces(): ipaddresses = netifaces.ifaddresses(intf) if ip_family[ip.version] in ipaddresses: diff --git a/tests/config_snmp_test.py b/tests/config_snmp_test.py index 76f5675690..25c54d36ec 100644 --- a/tests/config_snmp_test.py +++ b/tests/config_snmp_test.py @@ -877,6 +877,34 @@ def test_config_snmp_community_add_new_community_with_invalid_type_yang_validati assert result.exit_code != 0 assert 'SNMP community configuration failed' in result.output + @patch('netifaces.interfaces', mock.Mock(return_value=['eth0'])) + @patch('netifaces.ifaddresses', mock.Mock(return_value={2: + [{'addr': '10.1.0.32', 'netmask': '255.255.255.0', + 'broadcast': '10.1.0.255'}], + 10: [{'addr': 'fe80::1%eth0', 'netmask': 'ffff:ffff:ffff:ffff::/64'}]})) + @patch('os.system', mock.Mock(return_value=0)) + def test_config_snmpagentaddress_add_linklocal(self): + db = Db() + obj = {'db': db.cfgdb} + runner = CliRunner() + runner.invoke(config.config.commands["snmpagentaddress"].commands["add"], ["fe80::1%eth0"], obj=obj) + assert ('fe80::1%eth0', '', '') in db.cfgdb.get_keys('SNMP_AGENT_ADDRESS_CONFIG') + assert db.cfgdb.get_entry("SNMP_AGENT_ADDRESS_CONFIG", "fe80::1%eth0||") == {} + + @patch('netifaces.interfaces', mock.Mock(return_value=['eth0'])) + @patch('netifaces.ifaddresses', mock.Mock(return_value={2: + [{'addr': '10.1.0.32', 'netmask': '255.255.255.0', + 'broadcast': '10.1.0.255'}], + 10: [{'addr': 'fe80::1', 'netmask': 'ffff:ffff:ffff:ffff::/64'}]})) + @patch('os.system', mock.Mock(return_value=0)) + def test_config_snmpagentaddress_add_ipv4(self): + db = Db() + obj = {'db': db.cfgdb} + runner = CliRunner() + runner.invoke(config.config.commands["snmpagentaddress"].commands["add"], ["10.1.0.32"], obj=obj) + assert ('10.1.0.32', '', '') in db.cfgdb.get_keys('SNMP_AGENT_ADDRESS_CONFIG') + assert db.cfgdb.get_entry("SNMP_AGENT_ADDRESS_CONFIG", "10.1.0.32||") == {} + @classmethod def teardown_class(cls): print("TEARDOWN") From 557d68865cd92f0ede20c89edc636554605a4bf1 Mon Sep 17 00:00:00 2001 From: Andriy Yurkiv <70649192+ayurkiv-nvda@users.noreply.github.com> Date: Mon, 5 Aug 2024 19:37:19 +0300 Subject: [PATCH 25/67] [Mellanox] Add support for Mellanox-SN4700-O32 and Mellanox-SN4700-V64 (#3450) Signed-off-by: Andriy Yurkiv --- generic_config_updater/gcu_field_operation_validators.conf.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index 77b504b313..a379e7282f 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -20,7 +20,7 @@ "spc1": [ "ACS-MSN2700", "ACS-MSN2740", "ACS-MSN2100", "ACS-MSN2410", "ACS-MSN2010", "Mellanox-SN2700", "Mellanox-SN2700-C28D8", "Mellanox-SN2700-D40C8S8", "Mellanox-SN2700-D44C10", "Mellanox-SN2700-D48C8", "ACS-MSN2700-A1", "Mellanox-SN2700-A1", "Mellanox-SN2700-A1-C28D8", "Mellanox-SN2700-A1-D40C8S8", "Mellanox-SN2700-A1-D44C10", "Mellanox-SN2700-A1-D48C8" ], "spc2": [ "ACS-MSN3800", "Mellanox-SN3800-D112C8", "ACS-MSN3420", "ACS-MSN3700C", "ACS-MSN3700", "Mellanox-SN3800-C64", "Mellanox-SN3800-D100C12S2", "Mellanox-SN3800-D24C52", "Mellanox-SN3800-D28C49S1", "Mellanox-SN3800-D28C50" ], - "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "ACS-SN4280", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40", + "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "ACS-SN4280", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40","Mellanox-SN4700-O32","Mellanox-SN4700-V64", "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32", "Mellanox-SN4280-O28"], "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "ACS-SN5400" ] }, From 317e649514c9b205849ffb5ea96a6a233e38290c Mon Sep 17 00:00:00 2001 From: Vivek Date: Mon, 5 Aug 2024 09:40:18 -0700 Subject: [PATCH 26/67] Fix kexec_unload failure on secure boot enabled platforms (#3439) --- scripts/fast-reboot | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 2eeca11112..e183c34219 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -147,7 +147,7 @@ function clear_boot() # common_clear debug "${REBOOT_TYPE} failure ($?) cleanup ..." - /sbin/kexec -u || /bin/true + /sbin/kexec -u -a || /bin/true teardown_control_plane_assistant @@ -519,7 +519,7 @@ function unload_kernel() { # Unload the previously loaded kernel if any loaded if [[ "$(cat /sys/kernel/kexec_loaded)" -eq 1 ]]; then - /sbin/kexec -u + /sbin/kexec -u -a fi } From 1c4300f309be95d3b182a490032b3af2de95a89b Mon Sep 17 00:00:00 2001 From: Xincun Li <147451452+xincunli-sonic@users.noreply.github.com> Date: Fri, 16 Aug 2024 09:31:10 -0700 Subject: [PATCH 27/67] Skip default lanes dup check (#3489) * Add namespace check for multiasic * Skip Default lane duplication check. --- generic_config_updater/gu_common.py | 3 ++- tests/generic_config_updater/gu_common_test.py | 7 +++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index 938aa1d034..452bad1ee7 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -239,7 +239,8 @@ def validate_lanes(self, config_db): for port in port_to_lanes_map: lanes = port_to_lanes_map[port] for lane in lanes: - if lane in existing: + # default lane would be 0, it does not need validate duplication. + if lane in existing and lane != '0': return False, f"'{lane}' lane is used multiple times in PORT: {set([port, existing[lane]])}" existing[lane] = port return True, None diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py index 4a16a5ca4f..21f50e0b7b 100644 --- a/tests/generic_config_updater/gu_common_test.py +++ b/tests/generic_config_updater/gu_common_test.py @@ -361,6 +361,13 @@ def test_validate_lanes__same_valid_lanes_multi_ports_no_spaces__failure(self): }} self.validate_lanes(config, '67') + def test_validate_lanes_default_value_duplicate_check(self): + config = {"PORT": { + "Ethernet0": {"lanes": "0", "speed": "10000"}, + "Ethernet1": {"lanes": "0", "speed": "10000"}, + }} + self.validate_lanes(config) + def validate_lanes(self, config_db, expected_error=None): # Arrange config_wrapper = gu_common.ConfigWrapper() From 4372ced5f87d6c2a048a6df5e49de2179cbf06c1 Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Tue, 20 Aug 2024 13:29:15 +0800 Subject: [PATCH 28/67] Add lock to config reload/load_minigraph (#3475) What I did In some cases, if multiple config reload/load_minigraph are running in parallel, they might leave the system in an error state. In this PR, a flock is added to config reload/load_minigraph so they will not run in parallel. The file lock is binding to /etc/sonic/reload.lock. This is to fix issue: #19855 Microsoft ADO (number only): 28877643 Signed-off-by: Longxiang Lyu lolv@microsoft.com How I did it Add flock utility and decoate the reload and load_minigraph with the try_lock to ensure the lock is acquired before reload/load_minigraph. How to verify it UT and on testbed. New command output (if the output of a command-line utility has changed) reload with locking success # config reload Acquired lock on /etc/sonic/reload.lock Clear current config and reload config in config_db format from the default config file(s) ? [y/N]: y Disabling container monitoring ... Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -j /etc/sonic/init_cfg.json -j /etc/sonic/config_db.json --write-to-db Running command: /usr/local/bin/db_migrator.py -o migrate Running command: /usr/local/bin/sonic-cfggen -d -y /etc/sonic/sonic_version.yml -t /usr/share/sonic/templates/sonic-environment.j2,/etc/sonic/sonic-environment Restarting SONiC target ... Enabling container monitoring ... Reloading Monit configuration ... Reinitializing monit daemon Released lock on /etc/sonic/reload.lock reload with locking failure # config reload Failed to acquire lock on /etc/sonic/reload.lock --- config/main.py | 15 ++- tests/config_test.py | 169 ++++++++++++++++++++++++++++++++-- tests/flock_test.py | 187 ++++++++++++++++++++++++++++++++++++++ utilities_common/flock.py | 89 ++++++++++++++++++ 4 files changed, 448 insertions(+), 12 deletions(-) create mode 100644 tests/flock_test.py create mode 100644 utilities_common/flock.py diff --git a/config/main.py b/config/main.py index f48c446adf..4a46efda5e 100644 --- a/config/main.py +++ b/config/main.py @@ -42,6 +42,7 @@ from utilities_common.general import load_db_config, load_module_from_source from .validated_config_db_connector import ValidatedConfigDBConnector import utilities_common.multi_asic as multi_asic_util +from utilities_common.flock import try_lock from .utils import log @@ -124,6 +125,12 @@ GRE_TYPE_RANGE = click.IntRange(min=0, max=65535) ADHOC_VALIDATION = True +if os.environ.get("UTILITIES_UNIT_TESTING", "0") in ("1", "2"): + temp_system_reload_lockfile = tempfile.NamedTemporaryFile() + SYSTEM_RELOAD_LOCK = temp_system_reload_lockfile.name +else: + SYSTEM_RELOAD_LOCK = "/etc/sonic/reload.lock" + # Load sonic-cfggen from source since /usr/local/bin/sonic-cfggen does not have .py extension. sonic_cfggen = load_module_from_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') @@ -1753,9 +1760,11 @@ def list_checkpoints(ctx, verbose): @click.option('-n', '--no_service_restart', default=False, is_flag=True, help='Do not restart docker services') @click.option('-f', '--force', default=False, is_flag=True, help='Force config reload without system checks') @click.option('-t', '--file_format', default='config_db',type=click.Choice(['config_yang', 'config_db']),show_default=True,help='specify the file format') +@click.option('-b', '--bypass-lock', default=False, is_flag=True, help='Do reload without acquiring lock') @click.argument('filename', required=False) @clicommon.pass_db -def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_format): +@try_lock(SYSTEM_RELOAD_LOCK, timeout=0) +def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_format, bypass_lock): """Clear current configuration and import a previous saved config DB dump file. : Names of configuration file(s) to load, separated by comma with no spaces in between """ @@ -1968,8 +1977,10 @@ def load_mgmt_config(filename): @click.option('-t', '--traffic_shift_away', default=False, is_flag=True, help='Keep device in maintenance with TSA') @click.option('-o', '--override_config', default=False, is_flag=True, help='Enable config override. Proceed with default path.') @click.option('-p', '--golden_config_path', help='Provide golden config path to override. Use with --override_config') +@click.option('-b', '--bypass-lock', default=False, is_flag=True, help='Do load minigraph without acquiring lock') @clicommon.pass_db -def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, golden_config_path): +@try_lock(SYSTEM_RELOAD_LOCK, timeout=0) +def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, golden_config_path, bypass_lock): """Reconfigure based on minigraph.""" argv_str = ' '.join(['config', *sys.argv[1:]]) log.log_notice(f"'load_minigraph' executing with command: {argv_str}") diff --git a/tests/config_test.py b/tests/config_test.py index 748d434fc2..74bc0e1093 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -18,6 +18,7 @@ from click.testing import CliRunner from sonic_py_common import device_info, multi_asic +from utilities_common import flock from utilities_common.db import Db from utilities_common.general import load_module_from_source from mock import call, patch, mock_open, MagicMock @@ -45,6 +46,23 @@ load_minigraph_platform_false_path = os.path.join(load_minigraph_input_path, "platform_false") load_minigraph_command_output="""\ +Acquired lock on {0} +Stopping SONiC target ... +Running command: /usr/local/bin/sonic-cfggen -H -m --write-to-db +Running command: config qos reload --no-dynamic-buffer --no-delay +Running command: pfcwd start_default +Restarting SONiC target ... +Reloading Monit configuration ... +Please note setting loaded from minigraph will be lost after system reboot. To preserve setting, run `config save`. +Released lock on {0} +""" + +load_minigraph_lock_failure_output = """\ +Failed to acquire lock on {0} +""" + +load_minigraph_command_bypass_lock_output = """\ +Bypass lock on {} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -H -m --write-to-db Running command: config qos reload --no-dynamic-buffer --no-delay @@ -55,6 +73,7 @@ """ load_minigraph_platform_plugin_command_output="""\ +Acquired lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -H -m --write-to-db Running command: config qos reload --no-dynamic-buffer --no-delay @@ -63,6 +82,7 @@ Restarting SONiC target ... Reloading Monit configuration ... Please note setting loaded from minigraph will be lost after system reboot. To preserve setting, run `config save`. +Released lock on {0} """ load_mgmt_config_command_ipv4_only_output="""\ @@ -137,6 +157,20 @@ """ RELOAD_CONFIG_DB_OUTPUT = """\ +Acquired lock on {0} +Stopping SONiC target ... +Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db +Restarting SONiC target ... +Reloading Monit configuration ... +Released lock on {0} +""" + +RELOAD_CONFIG_DB_LOCK_FAILURE_OUTPUT = """\ +Failed to acquire lock on {0} +""" + +RELOAD_CONFIG_DB_BYPASS_LOCK_OUTPUT = """\ +Bypass lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db Restarting SONiC target ... @@ -144,44 +178,55 @@ """ RELOAD_YANG_CFG_OUTPUT = """\ +Acquired lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -Y /tmp/config.json --write-to-db Restarting SONiC target ... Reloading Monit configuration ... +Released lock on {0} """ RELOAD_MASIC_CONFIG_DB_OUTPUT = """\ +Acquired lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json -n asic0 --write-to-db Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json -n asic1 --write-to-db Restarting SONiC target ... Reloading Monit configuration ... +Released lock on {0} """ reload_config_with_sys_info_command_output="""\ +Acquired lock on {0} Running command: /usr/local/bin/sonic-cfggen -H -k Seastone-DX010-25-50 --write-to-db""" reload_config_with_disabled_service_output="""\ +Acquired lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db Restarting SONiC target ... Reloading Monit configuration ... +Released lock on {0} """ reload_config_masic_onefile_output = """\ +Acquired lock on {0} Stopping SONiC target ... Restarting SONiC target ... Reloading Monit configuration ... +Released lock on {0} """ reload_config_masic_onefile_gen_sysinfo_output = """\ +Acquired lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -H -k Mellanox-SN3800-D112C8 --write-to-db Running command: /usr/local/bin/sonic-cfggen -H -k multi_asic -n asic0 --write-to-db Running command: /usr/local/bin/sonic-cfggen -H -k multi_asic -n asic1 --write-to-db Restarting SONiC target ... Reloading Monit configuration ... +Released lock on {0} """ save_config_output = """\ @@ -601,7 +646,8 @@ def test_config_reload(self, get_cmd_module, setup_single_broadcom_asic): assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')][:1]) == reload_config_with_sys_info_command_output + assert "\n".join([line.rstrip() for line in result.output.split('\n')][:2]) == \ + reload_config_with_sys_info_command_output.format(config.SYSTEM_RELOAD_LOCK) def test_config_reload_stdin(self, get_cmd_module, setup_single_broadcom_asic): def mock_json_load(f): @@ -641,7 +687,8 @@ def mock_json_load(f): assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')][:1]) == reload_config_with_sys_info_command_output + assert "\n".join([line.rstrip() for line in result.output.split('\n')][:2]) == \ + reload_config_with_sys_info_command_output.format(config.SYSTEM_RELOAD_LOCK) @classmethod def teardown_class(cls): @@ -747,7 +794,8 @@ def read_json_file_side_effect(filename): traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 - assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == reload_config_masic_onefile_output + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == \ + reload_config_masic_onefile_output.format(config.SYSTEM_RELOAD_LOCK) def test_config_reload_onefile_gen_sysinfo_masic(self): def read_json_file_side_effect(filename): @@ -823,7 +871,7 @@ def read_json_file_side_effect(filename): assert result.exit_code == 0 assert "\n".join( [li.rstrip() for li in result.output.split('\n')] - ) == reload_config_masic_onefile_gen_sysinfo_output + ) == reload_config_masic_onefile_gen_sysinfo_output.format(config.SYSTEM_RELOAD_LOCK) def test_config_reload_onefile_bad_format_masic(self): def read_json_file_side_effect(filename): @@ -878,11 +926,58 @@ def test_load_minigraph(self, get_cmd_module, setup_single_broadcom_asic): print(result.output) traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == load_minigraph_command_output + assert "\n".join([line.rstrip() for line in result.output.split('\n')]) == \ + (load_minigraph_command_output.format(config.SYSTEM_RELOAD_LOCK)) # Verify "systemctl reset-failed" is called for services under sonic.target mock_run_command.assert_any_call(['systemctl', 'reset-failed', 'swss']) assert mock_run_command.call_count == 12 + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', + mock.MagicMock(return_value=("dummy_path", None))) + def test_load_minigraph_lock_failure(self, get_cmd_module, setup_single_broadcom_asic): + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + (config, _) = get_cmd_module + + fd = open(config.SYSTEM_RELOAD_LOCK, 'r') + assert flock.acquire_flock(fd, 0) + + try: + runner = CliRunner() + result = runner.invoke(config.config.commands["load_minigraph"], ["-y"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code != 0 + assert result.output == \ + (load_minigraph_lock_failure_output.format(config.SYSTEM_RELOAD_LOCK)) + assert mock_run_command.call_count == 0 + finally: + flock.release_flock(fd) + + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', + mock.MagicMock(return_value=("dummy_path", None))) + def test_load_minigraph_bypass_lock(self, get_cmd_module, setup_single_broadcom_asic): + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + (config, _) = get_cmd_module + + fd = open(config.SYSTEM_RELOAD_LOCK, 'r') + assert flock.acquire_flock(fd, 0) + + try: + runner = CliRunner() + result = runner.invoke(config.config.commands["load_minigraph"], ["-y", "-b"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert result.output == \ + load_minigraph_command_bypass_lock_output.format(config.SYSTEM_RELOAD_LOCK) + assert mock_run_command.call_count == 12 + finally: + flock.release_flock(fd) + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=(load_minigraph_platform_path, None))) def test_load_minigraph_platform_plugin(self, get_cmd_module, setup_single_broadcom_asic): with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: @@ -893,7 +988,8 @@ def test_load_minigraph_platform_plugin(self, get_cmd_module, setup_single_broad print(result.output) traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == load_minigraph_platform_plugin_command_output + assert "\n".join([line.rstrip() for line in result.output.split('\n')]) == \ + (load_minigraph_platform_plugin_command_output.format(config.SYSTEM_RELOAD_LOCK)) # Verify "systemctl reset-failed" is called for services under sonic.target mock_run_command.assert_any_call(['systemctl', 'reset-failed', 'swss']) assert mock_run_command.call_count == 12 @@ -1171,7 +1267,59 @@ def test_reload_config(self, get_cmd_module, setup_single_broadcom_asic): traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 assert "\n".join([l.rstrip() for l in result.output.split('\n')]) \ - == RELOAD_CONFIG_DB_OUTPUT + == RELOAD_CONFIG_DB_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) + + def test_reload_config_lock_failure(self, get_cmd_module, setup_single_broadcom_asic): + self.add_sysinfo_to_cfg_file() + with mock.patch( + "utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect) + ): + (config, show) = get_cmd_module + runner = CliRunner() + + fd = open(config.SYSTEM_RELOAD_LOCK, 'r') + assert flock.acquire_flock(fd, 0) + + try: + result = runner.invoke( + config.config.commands["reload"], + [self.dummy_cfg_file, '-y', '-f']) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code != 0 + assert "\n".join([line.rstrip() for line in result.output.split('\n')]) \ + == RELOAD_CONFIG_DB_LOCK_FAILURE_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) + finally: + flock.release_flock(fd) + + def test_reload_config_bypass_lock(self, get_cmd_module, setup_single_broadcom_asic): + self.add_sysinfo_to_cfg_file() + with mock.patch( + "utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect) + ): + (config, show) = get_cmd_module + runner = CliRunner() + + fd = open(config.SYSTEM_RELOAD_LOCK, 'r') + assert flock.acquire_flock(fd, 0) + + try: + result = runner.invoke( + config.config.commands["reload"], + [self.dummy_cfg_file, '-y', '-f', '-b']) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert "\n".join([line.rstrip() for line in result.output.split('\n')]) \ + == RELOAD_CONFIG_DB_BYPASS_LOCK_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) + finally: + flock.release_flock(fd) def test_config_reload_disabled_service(self, get_cmd_module, setup_single_broadcom_asic): self.add_sysinfo_to_cfg_file() @@ -1191,7 +1339,8 @@ def test_config_reload_disabled_service(self, get_cmd_module, setup_single_broad assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == reload_config_with_disabled_service_output + assert "\n".join([line.rstrip() for line in result.output.split('\n')]) == \ + reload_config_with_disabled_service_output.format(config.SYSTEM_RELOAD_LOCK) def test_reload_config_masic(self, get_cmd_module, setup_multi_broadcom_masic): self.add_sysinfo_to_cfg_file() @@ -1215,7 +1364,7 @@ def test_reload_config_masic(self, get_cmd_module, setup_multi_broadcom_masic): traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 assert "\n".join([l.rstrip() for l in result.output.split('\n')]) \ - == RELOAD_MASIC_CONFIG_DB_OUTPUT + == RELOAD_MASIC_CONFIG_DB_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) def test_reload_yang_config(self, get_cmd_module, setup_single_broadcom_asic): @@ -1234,7 +1383,7 @@ def test_reload_yang_config(self, get_cmd_module, traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 assert "\n".join([l.rstrip() for l in result.output.split('\n')]) \ - == RELOAD_YANG_CFG_OUTPUT + == RELOAD_YANG_CFG_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) @classmethod def teardown_class(cls): diff --git a/tests/flock_test.py b/tests/flock_test.py new file mode 100644 index 0000000000..7d9039dd2d --- /dev/null +++ b/tests/flock_test.py @@ -0,0 +1,187 @@ +import pytest +import tempfile +import threading +import time + +from unittest import mock +from utilities_common import flock + + +f0_exit = threading.Event() +f1_exit = threading.Event() +f2_exit = threading.Event() + + +def dummy_f0(): + while not f0_exit.is_set(): + time.sleep(1) + + +def dummy_f1(bypass_lock=False): + while not f1_exit.is_set(): + time.sleep(1) + + +def dummy_f2(bypass_lock=True): + while not f2_exit.is_set(): + time.sleep(1) + + +class TestFLock: + def setup(self): + print("SETUP") + f0_exit.clear() + f1_exit.clear() + f2_exit.clear() + + def test_flock_acquire_lock_non_blocking(self): + """Test flock non-blocking acquire lock.""" + with tempfile.NamedTemporaryFile() as fd0: + fd1 = open(fd0.name, "r") + + assert flock.acquire_flock(fd0.fileno(), 0) + assert not flock.acquire_flock(fd1.fileno(), 0) + + flock.release_flock(fd0.fileno()) + + assert flock.acquire_flock(fd1.fileno(), 0) + flock.release_flock(fd1.fileno()) + + def test_flock_acquire_lock_blocking(self): + """Test flock blocking acquire.""" + with tempfile.NamedTemporaryFile() as fd0: + fd1 = open(fd0.name, "r") + res = [] + + assert flock.acquire_flock(fd0.fileno(), 0) + thrd = threading.Thread(target=lambda: res.append(flock.acquire_flock(fd1.fileno(), -1))) + thrd.start() + + time.sleep(5) + assert thrd.is_alive() + + flock.release_flock(fd0.fileno()) + thrd.join() + assert len(res) == 1 and res[0] + + fd2 = open(fd0.name, "r") + assert not flock.acquire_flock(fd2.fileno(), 0) + + flock.release_flock(fd1.fileno()) + assert flock.acquire_flock(fd2.fileno(), 0) + flock.release_flock(fd2.fileno()) + + def test_flock_acquire_lock_timeout(self): + """Test flock timeout acquire.""" + with tempfile.NamedTemporaryFile() as fd0: + def acquire_helper(): + nonlocal elapsed + start = time.time() + res.append(flock.acquire_flock(fd1.fileno(), 5)) + end = time.time() + elapsed = end - start + + fd1 = open(fd0.name, "r") + elapsed = 0 + res = [] + + assert flock.acquire_flock(fd0.fileno(), 0) + thrd = threading.Thread(target=acquire_helper) + thrd.start() + + thrd.join() + assert ((len(res) == 1) and (not res[0])) + assert elapsed >= 5 + + flock.release_flock(fd0.fileno()) + + @mock.patch("click.echo") + def test_try_lock(self, mock_echo): + """Test try_lock decorator.""" + with tempfile.NamedTemporaryFile() as fd0: + def get_file_content(fd): + fd.seek(0) + return fd.read() + + f0_with_try_lock = flock.try_lock(fd0.name, timeout=0)(dummy_f0) + f1_with_try_lock = flock.try_lock(fd0.name, timeout=0)(dummy_f1) + + thrd = threading.Thread(target=f0_with_try_lock) + thrd.start() + time.sleep(2) + + try: + assert mock_echo.call_args_list == [mock.call(f"Acquired lock on {fd0.name}")] + assert b"dummy_f0" in get_file_content(fd0) + + with pytest.raises(SystemExit): + f1_with_try_lock() + assert mock_echo.call_args_list == [mock.call(f"Acquired lock on {fd0.name}"), + mock.call(f"Failed to acquire lock on {fd0.name}")] + finally: + f0_exit.set() + thrd.join() + + assert b"dummy_f0" not in get_file_content(fd0) + + thrd = threading.Thread(target=f1_with_try_lock) + thrd.start() + time.sleep(2) + + try: + assert mock_echo.call_args_list == [mock.call(f"Acquired lock on {fd0.name}"), + mock.call(f"Failed to acquire lock on {fd0.name}"), + mock.call(f"Released lock on {fd0.name}"), + mock.call(f"Acquired lock on {fd0.name}")] + assert b"dummy_f1" in get_file_content(fd0) + finally: + f1_exit.set() + thrd.join() + + assert b"dummy_f1" not in get_file_content(fd0) + + @mock.patch("click.echo") + def test_try_lock_with_bypass(self, mock_echo): + with tempfile.NamedTemporaryFile() as fd0: + def get_file_content(fd): + fd.seek(0) + return fd.read() + + f1_with_try_lock = flock.try_lock(fd0.name, timeout=0)(dummy_f1) + + thrd = threading.Thread(target=f1_with_try_lock, args=(True,)) + thrd.start() + time.sleep(2) + + try: + assert mock_echo.call_args_list == [mock.call(f"Bypass lock on {fd0.name}")] + assert b"dummy_f1" not in get_file_content(fd0) + finally: + f1_exit.set() + thrd.join() + + @mock.patch("click.echo") + def test_try_lock_with_bypass_default(self, mock_echo): + with tempfile.NamedTemporaryFile() as fd0: + def get_file_content(fd): + fd.seek(0) + return fd.read() + + f2_with_try_lock = flock.try_lock(fd0.name, timeout=0)(dummy_f2) + + thrd = threading.Thread(target=f2_with_try_lock) + thrd.start() + time.sleep(2) + + try: + assert mock_echo.call_args_list == [mock.call(f"Bypass lock on {fd0.name}")] + assert b"dummy_f2" not in get_file_content(fd0) + finally: + f2_exit.set() + thrd.join() + + def teardown(self): + print("TEARDOWN") + f0_exit.clear() + f1_exit.clear() + f2_exit.clear() diff --git a/utilities_common/flock.py b/utilities_common/flock.py new file mode 100644 index 0000000000..c8faa8bfd9 --- /dev/null +++ b/utilities_common/flock.py @@ -0,0 +1,89 @@ +"""File lock utilities.""" +import click +import fcntl +import functools +import inspect +import os +import sys +import time + +from sonic_py_common import logger + + +log = logger.Logger() + + +def acquire_flock(fd, timeout=-1): + """Acquire the flock.""" + flags = fcntl.LOCK_EX + if timeout >= 0: + flags |= fcntl.LOCK_NB + else: + timeout = 0 + + start_time = current_time = time.time() + ret = False + while current_time - start_time <= timeout: + try: + fcntl.flock(fd, flags) + except (IOError, OSError): + ret = False + else: + ret = True + break + current_time = time.time() + if timeout != 0: + time.sleep(0.2) + return ret + + +def release_flock(fd): + """Release the flock.""" + fcntl.flock(fd, fcntl.LOCK_UN) + + +def try_lock(lock_file, timeout=-1): + """Decorator to try lock file using fcntl.flock.""" + def _decorator(func): + @functools.wraps(func) + def _wrapper(*args, **kwargs): + bypass_lock = False + + # Get the bypass_lock argument from the function signature + func_signature = inspect.signature(func) + has_bypass_lock = "bypass_lock" in func_signature.parameters + if has_bypass_lock: + func_ba = func_signature.bind(*args, **kwargs) + func_ba.apply_defaults() + bypass_lock = func_ba.arguments["bypass_lock"] + + if bypass_lock: + click.echo(f"Bypass lock on {lock_file}") + return func(*args, **kwargs) + else: + fd = os.open(lock_file, os.O_CREAT | os.O_RDWR) + if acquire_flock(fd, timeout): + click.echo(f"Acquired lock on {lock_file}") + os.truncate(fd, 0) + # Write pid and the function name to the lock file as a record + os.write(fd, f"{func.__name__}, pid {os.getpid()}\n".encode()) + try: + return func(*args, **kwargs) + finally: + release_flock(fd) + click.echo(f"Released lock on {lock_file}") + os.truncate(fd, 0) + os.close(fd) + else: + click.echo(f"Failed to acquire lock on {lock_file}") + lock_owner = os.read(fd, 1024).decode() + if not lock_owner: + lock_owner = "unknown" + log.log_notice( + (f"{func.__name__} failed to acquire lock on {lock_file}," + f" which is taken by {lock_owner}") + ) + os.close(fd) + sys.exit(1) + return _wrapper + return _decorator From 9a3f359ee8bd4034b191506eb3a6dd203734cbfe Mon Sep 17 00:00:00 2001 From: Changrong Wu Date: Tue, 20 Aug 2024 14:47:47 -0700 Subject: [PATCH 29/67] Add timeout for rexec's get_password (#3484) ### What I did I added a timeout setting for the get_password function in rexec module so that automatic pipelines that does not expect password input will not get blocked. The current timeout setting is 10 sec. #### How I did it Add a SIGALRM signal before waiting for password input. #### How to verify it Run a "show ip bgp summary" on SONiC Chassis Supversior and does not input password until it times out. #### Previous command output (if the output of a command-line utility has changed) Before adding such a mechanism, if you do not input password when run "rexec -c " you will be blocked at the following output: ``` Since the current device is a chassis supervisor, this command will be executed remotely on all linecards Password for username 'XXX': ``` #### New command output (if the output of a command-line utility has changed) After adding such a mechanism, if you do not input password when run "rexec -c " you will see a timeout message after 10 seconds. ``` Since the current device is a chassis supervisor, this command will be executed remotely on all linecards Password for username 'XXX': Aborted! Timeout when waiting for password input. ``` --- rcli/utils.py | 17 ++++++++++++++--- tests/remote_cli_test.py | 36 ++++++++++++++++++++++++++---------- 2 files changed, 40 insertions(+), 13 deletions(-) diff --git a/rcli/utils.py b/rcli/utils.py index e2f48788ba..7563eafdcd 100644 --- a/rcli/utils.py +++ b/rcli/utils.py @@ -1,7 +1,7 @@ import click -from getpass import getpass +import getpass import os -import sys +import signal from swsscommon.swsscommon import SonicV2Connector @@ -19,6 +19,8 @@ CHASSIS_MODULE_HOSTNAME_TABLE = 'CHASSIS_MODULE_HOSTNAME_TABLE' CHASSIS_MODULE_HOSTNAME = 'module_hostname' +GET_PASSWORD_TIMEOUT = 10 + def connect_to_chassis_state_db(): chassis_state_db = SonicV2Connector(host="127.0.0.1") chassis_state_db.connect(chassis_state_db.CHASSIS_STATE_DB) @@ -151,8 +153,17 @@ def get_password(username=None): if username is None: username = os.getlogin() - return getpass( + def get_password_timeout(*args): + print("\nAborted! Timeout when waiting for password input.") + exit(1) + + signal.signal(signal.SIGALRM, get_password_timeout) + signal.alarm(GET_PASSWORD_TIMEOUT) # Set a timeout of 60 seconds + password = getpass.getpass( "Password for username '{}': ".format(username), # Pass in click stdout stream - this is similar to using click.echo stream=click.get_text_stream('stdout') ) + signal.alarm(0) # Cancel the alarm + + return password diff --git a/tests/remote_cli_test.py b/tests/remote_cli_test.py index 9883dfa16b..57a220be1e 100644 --- a/tests/remote_cli_test.py +++ b/tests/remote_cli_test.py @@ -11,6 +11,7 @@ import select import socket import termios +import getpass MULTI_LC_REXEC_OUTPUT = '''======== LINE-CARD0|sonic-lc1 output: ======== hello world @@ -75,17 +76,27 @@ def mock_paramiko_connection(channel): return conn +def mock_getpass(prompt="Password:", stream=None): + return "dummy" + + class TestRemoteExec(object): + __getpass = getpass.getpass + @classmethod def setup_class(cls): print("SETUP") from .mock_tables import dbconnector dbconnector.load_database_config() + getpass.getpass = mock_getpass + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + getpass.getpass = TestRemoteExec.__getpass @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) - # @mock.patch.object(linecard.Linecard, '_get_password', mock.MagicMock(return_value='dummmy')) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(paramiko.SSHClient, 'exec_command', mock.MagicMock(return_value=mock_exec_command())) def test_rexec_with_module_name(self): @@ -98,7 +109,6 @@ def test_rexec_with_module_name(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(paramiko.SSHClient, 'exec_command', mock.MagicMock(return_value=mock_exec_command())) def test_rexec_with_hostname(self): @@ -111,7 +121,6 @@ def test_rexec_with_hostname(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(paramiko.SSHClient, 'exec_command', mock.MagicMock(return_value=mock_exec_error_cmd())) def test_rexec_error_with_module_name(self): @@ -133,7 +142,6 @@ def test_rexec_error(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) def test_rexec_all(self): @@ -147,7 +155,6 @@ def test_rexec_all(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) def test_rexec_invalid_lc(self): @@ -161,7 +168,6 @@ def test_rexec_invalid_lc(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) def test_rexec_unreachable_lc(self): @@ -175,7 +181,6 @@ def test_rexec_unreachable_lc(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) def test_rexec_help(self): @@ -188,7 +193,6 @@ def test_rexec_help(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock(side_effect=paramiko.ssh_exception.NoValidConnectionsError({('192.168.0.1', 22): "None"}))) @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) @@ -202,7 +206,6 @@ def test_rexec_exception(self): assert "Failed to connect to sonic-lc1 with username admin\n" == result.output @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock(side_effect=paramiko.ssh_exception.NoValidConnectionsError({('192.168.0.1', 22): "None"}))) def test_rexec_with_user_param(self): @@ -214,6 +217,19 @@ def test_rexec_with_user_param(self): assert result.exit_code == 1, result.output assert "Failed to connect to sonic-lc1 with username testuser\n" == result.output + @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) + @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) + def test_rexec_without_password_input(self): + runner = CliRunner() + getpass.getpass = TestRemoteExec.__getpass + LINECARD_NAME = "all" + result = runner.invoke( + rexec.cli, [LINECARD_NAME, "-c", "show version"]) + getpass.getpass = mock_getpass + print(result.output) + assert result.exit_code == 1, result.output + assert "Aborted" in result.output + class TestRemoteCLI(object): @classmethod From e4df80a5c89c570dfe6580d91ac25c99e425c02d Mon Sep 17 00:00:00 2001 From: bktsim <144830673+bktsim-arista@users.noreply.github.com> Date: Wed, 21 Aug 2024 21:19:37 -0700 Subject: [PATCH 30/67] Fix multi-asic behaviour for ecnconfig (#3062) * Fixes multi-asic behaviour for ecnconfig. Previously, ecnconfig -l was not behaving correctly on multi-asic devices, as the '-n' namespace option was not available, and correct namespaces were not traversed on multi-asic devices to retrieve correct results for ecnconfig. This change fixes multi-asic behaviour of CLI commands that rely on ecnconfig. * tests cleanup * Remove wred on lossy * Addendum to previous commit regarding wred lossy * Enhancements to multi-asic support in ecnconfig - Removed unix socket and fixed failing unit test - Replace argparse with click - Add multi-asic support for get and set queue - Add multi-asic support for set threshold and prob - Modify test framework to support multi-asic - Use multi_asic decorators - Resolve precommit errors --------- Co-authored-by: rdjeric Co-authored-by: arista-hpandya --- config/main.py | 5 +- scripts/ecnconfig | 364 ++++++++++-------- show/main.py | 5 +- tests/ecn_input/ecn_test_vectors.py | 491 ++++++++++++++++--------- tests/ecn_test.py | 178 +++++---- tests/mock_tables/asic0/config_db.json | 23 ++ tests/mock_tables/asic1/config_db.json | 20 + tests/multi_asic_ecnconfig_test.py | 64 ++++ 8 files changed, 755 insertions(+), 395 deletions(-) create mode 100644 tests/multi_asic_ecnconfig_test.py diff --git a/config/main.py b/config/main.py index 4a46efda5e..7509628a67 100644 --- a/config/main.py +++ b/config/main.py @@ -6399,7 +6399,8 @@ def remove_reasons(counter_name, reasons, verbose): @click.option('-ydrop', metavar='', type=click.IntRange(0, 100), help="Set yellow drop probability") @click.option('-gdrop', metavar='', type=click.IntRange(0, 100), help="Set green drop probability") @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") -def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, rdrop, ydrop, gdrop, verbose): +@multi_asic_util.multi_asic_click_option_namespace +def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, rdrop, ydrop, gdrop, verbose, namespace): """ECN-related configuration tasks""" log.log_info("'ecn -profile {}' executing...".format(profile)) command = ['ecnconfig', '-p', str(profile)] @@ -6413,6 +6414,8 @@ def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, rdrop, ydrop, gdrop, verbos if ydrop is not None: command += ['-ydrop', str(ydrop)] if gdrop is not None: command += ['-gdrop', str(gdrop)] if verbose: command += ["-vv"] + if namespace is not None: + command += ['-n', str(namespace)] clicommon.run_command(command, display_cmd=verbose) diff --git a/scripts/ecnconfig b/scripts/ecnconfig index e3b08d2bd3..9b2deab4dc 100755 --- a/scripts/ecnconfig +++ b/scripts/ecnconfig @@ -5,7 +5,7 @@ ecnconfig is the utility to 1) show and change ECN configuration -usage: ecnconfig [-h] [-v] [-l] [-p PROFILE] [-gmin GREEN_MIN] +usage: ecnconfig [-h] [-v] [-l] [-p PROFILE] [-gmin GREEN_MIN] [-n NAMESPACE] [-gmax GREEN_MAX] [-ymin YELLOW_MIN] [-ymax YELLOW_MAX] [-rmin RED_MIN] [-rmax RED_MAX] [-gdrop GREEN_DROP_PROB] [-ydrop YELLOW_DROP_PROB] [-rdrop RED_DROP_PROB] [-vv] @@ -16,6 +16,7 @@ optional arguments: -vv --verbose verbose output -l --list show ECN WRED configuration -p --profile specify WRED profile name + -n --namespace show ECN configuration for specified namespace -gmin --green-min set min threshold for packets marked green -gmax --green-max set max threshold for packets marked green -ymin --yellow-min set min threshold for packets marked yellow @@ -47,7 +48,7 @@ $ecnconfig -q 3 ECN status: queue 3: on """ -import argparse +import click import json import os import sys @@ -62,12 +63,17 @@ try: sys.path.insert(0, modules_path) sys.path.insert(0, tests_path) import mock_tables.dbconnector - + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import mock_tables.mock_multi_asic + mock_tables.dbconnector.load_namespace_config() except KeyError: pass from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector +from sonic_py_common import multi_asic +from utilities_common import multi_asic as multi_asic_util +from utilities_common.general import load_db_config WRED_PROFILE_TABLE_NAME = "WRED_PROFILE" WRED_CONFIG_FIELDS = { @@ -82,7 +88,6 @@ WRED_CONFIG_FIELDS = { "rdrop": "red_drop_probability" } -PORT_TABLE_NAME = "PORT" QUEUE_TABLE_NAME = "QUEUE" DEVICE_NEIGHBOR_TABLE_NAME = "DEVICE_NEIGHBOR" FIELD = "wred_profile" @@ -96,18 +101,25 @@ class EcnConfig(object): """ Process ecnconfig """ - def __init__(self, filename, verbose): + def __init__(self, test_filename, verbose, namespace): self.ports = [] self.queues = [] - self.filename = filename self.verbose = verbose + self.namespace = namespace + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.config_db = None + self.num_wred_profiles = 0 - # Set up db connections - self.db = ConfigDBConnector() - self.db.connect() + # For unit testing + self.test_filename = test_filename + self.updated_profile_tables = {} + @multi_asic_util.run_on_multi_asic def list(self): - wred_profiles = self.db.get_table(WRED_PROFILE_TABLE_NAME) + """ + List all WRED profiles. + """ + wred_profiles = self.config_db.get_table(WRED_PROFILE_TABLE_NAME) for name, data in wred_profiles.items(): profile_name = name profile_data = data @@ -117,12 +129,18 @@ class EcnConfig(object): line = [field, value] config.append(line) print(tabulate(config) + "\n") - if self.verbose: - print("Total profiles: %d" % len(wred_profiles)) + self.num_wred_profiles += len(wred_profiles) - # get parameters of a WRED profile def get_profile_data(self, profile): - wred_profiles = self.db.get_table(WRED_PROFILE_TABLE_NAME) + """ + Get parameters of a WRED profile + """ + if self.namespace or not multi_asic.is_multi_asic(): + db = ConfigDBConnector(namespace=self.namespace) + db.connect() + wred_profiles = db.get_table(WRED_PROFILE_TABLE_NAME) + else: + wred_profiles = multi_asic.get_table(WRED_PROFILE_TABLE_NAME) for profile_name, profile_data in wred_profiles.items(): if profile_name == profile: @@ -131,6 +149,9 @@ class EcnConfig(object): return None def validate_profile_data(self, profile_data): + """ + Validate threshold, probability and color values. + """ result = True # check if thresholds are non-negative integers @@ -168,73 +189,116 @@ class EcnConfig(object): return result + @multi_asic_util.run_on_multi_asic def set_wred_threshold(self, profile, threshold, value): + """ + Single asic behaviour: + Set threshold value on default namespace + + Multi asic behaviour: + Set threshold value on the specified namespace. + If no namespace is provided, set on all namespaces. + """ chk_exec_privilege() + # Modify the threshold field = WRED_CONFIG_FIELDS[threshold] if self.verbose: - print("Setting %s value to %s" % (field, value)) - self.db.mod_entry(WRED_PROFILE_TABLE_NAME, profile, {field: value}) - if self.filename is not None: - prof_table = self.db.get_table(WRED_PROFILE_TABLE_NAME) - with open(self.filename, "w") as fd: - json.dump(prof_table, fd) + namespace_str = f" for namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' + print("Setting %s value to %s%s" % (field, value, namespace_str)) + self.config_db.mod_entry(WRED_PROFILE_TABLE_NAME, profile, {field: value}) + + # Record the change for unit testing + if self.test_filename: + profile_table = self.config_db.get_table(WRED_PROFILE_TABLE_NAME) + if self.multi_asic.current_namespace in self.updated_profile_tables.keys(): + self.updated_profile_tables[self.multi_asic.current_namespace][profile][threshold] = value + else: + self.updated_profile_tables[self.multi_asic.current_namespace] = profile_table + @multi_asic_util.run_on_multi_asic def set_wred_prob(self, profile, drop_color, value): + """ + Single asic behaviour: + Set drop probability on default namespace + + Multi asic behaviour: + Set drop probability value on the specified namespace. + If no namespace is provided, set on all namespaces. + """ chk_exec_privilege() + # Modify the drop probability field = WRED_CONFIG_FIELDS[drop_color] if self.verbose: - print("Setting %s value to %s%%" % (field, value)) - self.db.mod_entry(WRED_PROFILE_TABLE_NAME, profile, {field: value}) - if self.filename is not None: - prof_table = self.db.get_table(WRED_PROFILE_TABLE_NAME) - with open(self.filename, "w") as fd: - json.dump(prof_table, fd) + namespace_str = f" for namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' + print("Setting %s value to %s%%%s" % (field, value, namespace_str)) + self.config_db.mod_entry(WRED_PROFILE_TABLE_NAME, profile, {field: value}) + + # Record the change for unit testing + if self.test_filename: + profile_table = self.config_db.get_table(WRED_PROFILE_TABLE_NAME) + if self.multi_asic.current_namespace in self.updated_profile_tables.keys(): + self.updated_profile_tables[self.multi_asic.current_namespace][profile][field] = value + else: + self.updated_profile_tables[self.multi_asic.current_namespace] = profile_table class EcnQ(object): """ Process ecn on/off on queues """ - def __init__(self, queues, filename, verbose): + def __init__(self, queues, test_filename, verbose, namespace): self.ports_key = [] self.queues = queues.split(',') - self.filename = filename self.verbose = verbose + self.namespace = namespace + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.config_db = None + self.db = None - # Set up db connections - self.config_db = ConfigDBConnector() - self.config_db.connect() - - self.db = SonicV2Connector(use_unix_socket_path=False) - self.db.connect(self.db.CONFIG_DB) - - self.gen_ports_key() + # For unit testing + self.test_filename = test_filename + self.updated_q_table = {} def gen_ports_key(self): - if self.ports_key is not None: - port_table = self.config_db.get_table(DEVICE_NEIGHBOR_TABLE_NAME) - self.ports_key = list(port_table.keys()) + port_table = self.config_db.get_table(DEVICE_NEIGHBOR_TABLE_NAME) + self.ports_key = list(port_table.keys()) - # Verify at least one port is available - if len(self.ports_key) == 0: - raise Exception("No active ports detected in table '{}'".format(DEVICE_NEIGHBOR_TABLE_NAME)) + # Verify at least one port is available + if len(self.ports_key) == 0: + raise Exception("No active ports detected in table '{}'".format(DEVICE_NEIGHBOR_TABLE_NAME)) - # In multi-ASIC platforms backend ethernet ports are identified as - # 'Ethernet-BPxy'. Add 1024 to sort backend ports to the end. - self.ports_key.sort( - key = lambda k: int(k[8:]) if "BP" not in k else int(k[11:]) + 1024 - ) + # In multi-ASIC platforms backend ethernet ports are identified as + # 'Ethernet-BPxy'. Add 1024 to sort backend ports to the end. + self.ports_key.sort( + key = lambda k: int(k[8:]) if "BP" not in k else int(k[11:]) + 1024 + ) def dump_table_info(self): - if self.filename is not None: + """ + A function to dump updated queue tables. + These JSON dumps are used exclusively by unit tests. + The tables are organized by namespaces for multi-asic support. + """ + if self.test_filename is not None: q_table = self.config_db.get_table(QUEUE_TABLE_NAME) - with open(self.filename, "w") as fd: - json.dump({repr(x):y for x, y in q_table.items()}, fd) + with open(self.test_filename, "w") as fd: + self.updated_q_table[self.multi_asic.current_namespace] = {repr(x):y for x, y in q_table.items()} + json.dump(self.updated_q_table, fd) + @multi_asic_util.run_on_multi_asic def set(self, enable): + """ + Single asic behaviour: + Enable or disable queues on default namespace + + Multi asic behaviour: + Enable or disable queues on a specified namespace. + If no namespace is provided, set on all namespaces. + """ chk_exec_privilege() + self.gen_ports_key() for queue in self.queues: if self.verbose: print("%s ECN on %s queue %s" % ("Enable" if enable else "Disable", ','.join(self.ports_key), queue)) @@ -252,10 +316,24 @@ class EcnQ(object): self.config_db.mod_entry(QUEUE_TABLE_NAME, key, None) else: self.config_db.set_entry(QUEUE_TABLE_NAME, key, entry) + # For unit testing self.dump_table_info() + @multi_asic_util.run_on_multi_asic def get(self): - print("ECN status:") + """ + Single asic behaviour: + Get status of queues on default namespace + + Multi asic behaviour: + Get status of queues on a specified namespace. + If no namespace is provided, get queue status on all namespaces. + """ + self.gen_ports_key() + namespace = self.multi_asic.current_namespace + namespace_str = f" for namespace {namespace}" if namespace else '' + print(f"ECN status{namespace_str}:") + for queue in self.queues: out = ' '.join(['queue', queue]) if self.verbose: @@ -270,81 +348,77 @@ class EcnQ(object): print("%s: on" % (out)) else: print("%s: off" % (out)) + # For unit testing self.dump_table_info() -def main(): - parser = argparse.ArgumentParser(description='Show and change:\n' - '1) ECN WRED configuration\n' - '2) ECN on/off status on queues', - formatter_class=argparse.RawTextHelpFormatter) - - parser.add_argument('-l', '--list', action='store_true', help='show ECN WRED configuration') - parser.add_argument('-p', '--profile', type=str, help='specify WRED profile name', default=None) - parser.add_argument('-gmin', '--green-min', type=str, help='set min threshold for packets marked \'green\'', default=None) - parser.add_argument('-gmax', '--green-max', type=str, help='set max threshold for packets marked \'green\'', default=None) - parser.add_argument('-ymin', '--yellow-min', type=str, help='set min threshold for packets marked \'yellow\'', default=None) - parser.add_argument('-ymax', '--yellow-max', type=str, help='set max threshold for packets marked \'yellow\'', default=None) - parser.add_argument('-rmin', '--red-min', type=str, help='set min threshold for packets marked \'red\'', default=None) - parser.add_argument('-rmax', '--red-max', type=str, help='set max threshold for packets marked \'red\'', default=None) - parser.add_argument('-gdrop', '--green-drop-prob', type=str, help='set max drop/mark probability for packets marked \'green\'', default=None) - parser.add_argument('-ydrop', '--yellow-drop-prob', type=str, help='set max drop/mark probability for packets marked \'yellow\'', default=None) - parser.add_argument('-rdrop', '--red-drop-prob', type=str, help='set max drop/mark probability for packets marked \'red\'', default=None) - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - parser.add_argument('-vv', '--verbose', action='store_true', help='Verbose output', default=False) - - parser.add_argument('command', nargs='?', choices=['on', 'off'], type=str, help='turn on/off ecn', default=None) - parser.add_argument('-q', '--queue', type=str, help='specify queue index list: 3,4', default=None) - parser.add_argument('-f', '--filename', help='file used by mock tests', type=str, default=None) - +@click.command(help='Show and change: ECN WRED configuration\nECN on/off status on queues') +@click.argument('command', type=click.Choice(['on', 'off'], case_sensitive=False), required=False, default=None) +@click.option('-l', '--list', 'show_config', is_flag=True, help='show ECN WRED configuration') +@click.option('-p', '--profile', type=str, help='specify WRED profile name', default=None) +@click.option('-gmin', '--green-min', type=str, help='set min threshold for packets marked \'green\'', default=None) +@click.option('-gmax', '--green-max', type=str, help='set max threshold for packets marked \'green\'', default=None) +@click.option('-ymin', '--yellow-min', type=str, help='set min threshold for packets marked \'yellow\'', default=None) +@click.option('-ymax', '--yellow-max', type=str, help='set max threshold for packets marked \'yellow\'', default=None) +@click.option('-rmin', '--red-min', type=str, help='set min threshold for packets marked \'red\'', default=None) +@click.option('-rmax', '--red-max', type=str, help='set max threshold for packets marked \'red\'', default=None) +@click.option('-gdrop', '--green-drop-prob', type=str, help='set max drop/mark probability for packets marked \'green\'', default=None) +@click.option('-ydrop', '--yellow-drop-prob', type=str, help='set max drop/mark probability for packets marked \'yellow\'', default=None) +@click.option('-rdrop', '--red-drop-prob', type=str, help='set max drop/mark probability for packets marked \'red\'', default=None) +@click.option('-n', '--namespace', type=click.Choice(multi_asic.get_namespace_list()), help='Namespace name or skip for all', default=None) +@click.option('-vv', '--verbose', is_flag=True, help='Verbose output', default=False) +@click.option('-q', '--queue', type=str, help='specify queue index list: 3,4', default=None) +@click.version_option(version='1.0') +def main(command, show_config, profile, green_min, + green_max, yellow_min, yellow_max, red_min, + red_max, green_drop_prob, yellow_drop_prob, + red_drop_prob, namespace, verbose, queue): + test_filename = None if os.environ.get("UTILITIES_UNIT_TESTING", "0") == "2": - sys.argv.extend(['-f', '/tmp/ecnconfig']) - - args = parser.parse_args() + test_filename = '/tmp/ecnconfig' try: - if args.list or args.profile: - prof_cfg = EcnConfig(args.filename, args.verbose) - if args.list: - arg_len_max = 2 - if args.verbose: - arg_len_max += 1 - if args.filename: - arg_len_max += 2 - if len(sys.argv) > arg_len_max: + load_db_config() + if show_config or profile: + # Check if a set option has been provided + setOption = (green_min or green_max or yellow_min or yellow_max or red_min or red_max + or green_drop_prob or yellow_drop_prob or red_drop_prob) + + prof_cfg = EcnConfig(test_filename, verbose, namespace) + if show_config: + if setOption: raise Exception("Input arguments error. No set options allowed when -l[ist] specified") + prof_cfg.list() - elif args.profile: - arg_len_min = 4 - if args.verbose: - arg_len_min += 1 - if args.filename: - arg_len_min += 2 - if len(sys.argv) < arg_len_min: + if verbose: + print("Total profiles: %d" % prof_cfg.num_wred_profiles) + + elif profile: + if not setOption: raise Exception("Input arguments error. Specify at least one threshold parameter to set") # get current configuration data - wred_profile_data = prof_cfg.get_profile_data(args.profile) + wred_profile_data = prof_cfg.get_profile_data(profile) if wred_profile_data is None: - raise Exception("Input arguments error. Invalid WRED profile %s" % (args.profile)) - - if args.green_max: - wred_profile_data[WRED_CONFIG_FIELDS["gmax"]] = args.green_max - if args.green_min: - wred_profile_data[WRED_CONFIG_FIELDS["gmin"]] = args.green_min - if args.yellow_max: - wred_profile_data[WRED_CONFIG_FIELDS["ymax"]] = args.yellow_max - if args.yellow_min: - wred_profile_data[WRED_CONFIG_FIELDS["ymin"]] = args.yellow_min - if args.red_max: - wred_profile_data[WRED_CONFIG_FIELDS["rmax"]] = args.red_max - if args.red_min: - wred_profile_data[WRED_CONFIG_FIELDS["rmin"]] = args.red_min - if args.green_drop_prob: - wred_profile_data[WRED_CONFIG_FIELDS["gdrop"]] = args.green_drop_prob - if args.yellow_drop_prob: - wred_profile_data[WRED_CONFIG_FIELDS["ydrop"]] = args.yellow_drop_prob - if args.red_drop_prob: - wred_profile_data[WRED_CONFIG_FIELDS["rdrop"]] = args.red_drop_prob + raise Exception("Input arguments error. Invalid WRED profile %s for namespace %s" % (profile, namespace)) + + if green_max: + wred_profile_data[WRED_CONFIG_FIELDS["gmax"]] = green_max + if green_min: + wred_profile_data[WRED_CONFIG_FIELDS["gmin"]] = green_min + if yellow_max: + wred_profile_data[WRED_CONFIG_FIELDS["ymax"]] = yellow_max + if yellow_min: + wred_profile_data[WRED_CONFIG_FIELDS["ymin"]] = yellow_min + if red_max: + wred_profile_data[WRED_CONFIG_FIELDS["rmax"]] = red_max + if red_min: + wred_profile_data[WRED_CONFIG_FIELDS["rmin"]] = red_min + if green_drop_prob: + wred_profile_data[WRED_CONFIG_FIELDS["gdrop"]] = green_drop_prob + if yellow_drop_prob: + wred_profile_data[WRED_CONFIG_FIELDS["ydrop"]] = yellow_drop_prob + if red_drop_prob: + wred_profile_data[WRED_CONFIG_FIELDS["rdrop"]] = red_drop_prob # validate new configuration data if prof_cfg.validate_profile_data(wred_profile_data) == False: @@ -352,41 +426,39 @@ def main(): # apply new configuration # the following parameters can be combined in one run - if args.green_max: - prof_cfg.set_wred_threshold(args.profile, "gmax", args.green_max) - if args.green_min: - prof_cfg.set_wred_threshold(args.profile, "gmin", args.green_min) - if args.yellow_max: - prof_cfg.set_wred_threshold(args.profile, "ymax", args.yellow_max) - if args.yellow_min: - prof_cfg.set_wred_threshold(args.profile, "ymin", args.yellow_min) - if args.red_max: - prof_cfg.set_wred_threshold(args.profile, "rmax", args.red_max) - if args.red_min: - prof_cfg.set_wred_threshold(args.profile, "rmin", args.red_min) - if args.green_drop_prob: - prof_cfg.set_wred_prob(args.profile, "gdrop", args.green_drop_prob) - if args.yellow_drop_prob: - prof_cfg.set_wred_prob(args.profile, "ydrop", args.yellow_drop_prob) - if args.red_drop_prob: - prof_cfg.set_wred_prob(args.profile, "rdrop", args.red_drop_prob) - - elif args.queue: - arg_len_min = 3 - if args.filename: - arg_len_min += 1 - if args.verbose: - arg_len_min += 1 - if len(sys.argv) < arg_len_min: + if green_max: + prof_cfg.set_wred_threshold(profile, "gmax", green_max) + if green_min: + prof_cfg.set_wred_threshold(profile, "gmin", green_min) + if yellow_max: + prof_cfg.set_wred_threshold(profile, "ymax", yellow_max) + if yellow_min: + prof_cfg.set_wred_threshold(profile, "ymin", yellow_min) + if red_max: + prof_cfg.set_wred_threshold(profile, "rmax", red_max) + if red_min: + prof_cfg.set_wred_threshold(profile, "rmin", red_min) + if green_drop_prob: + prof_cfg.set_wred_prob(profile, "gdrop", green_drop_prob) + if yellow_drop_prob: + prof_cfg.set_wred_prob(profile, "ydrop", yellow_drop_prob) + if red_drop_prob: + prof_cfg.set_wred_prob(profile, "rdrop", red_drop_prob) + + # Dump the current config in the file for unit tests + if test_filename: + with open(test_filename, "w") as fd: + json.dump(prof_cfg.updated_profile_tables, fd) + + elif queue: + if queue.split(',') == ['']: raise Exception("Input arguments error. Specify at least one queue by index") - - q_ecn = EcnQ(args.queue, args.filename, args.verbose) - if not args.command: + q_ecn = EcnQ(queue, test_filename, verbose, namespace) + if command is None: q_ecn.get() else: - q_ecn.set(enable = True if args.command == 'on' else False) + q_ecn.set(enable = True if command == 'on' else False) else: - parser.print_help() sys.exit(1) except Exception as e: diff --git a/show/main.py b/show/main.py index 06114eb79f..c9e5e2086c 100755 --- a/show/main.py +++ b/show/main.py @@ -2008,10 +2008,13 @@ def policer(policer_name, verbose): # 'ecn' command ("show ecn") # @cli.command('ecn') +@multi_asic_util.multi_asic_click_option_namespace @click.option('--verbose', is_flag=True, help="Enable verbose output") -def ecn(verbose): +def ecn(namespace, verbose): """Show ECN configuration""" cmd = ['ecnconfig', '-l'] + if namespace is not None: + cmd += ['-n', str(namespace)] run_command(cmd, display_cmd=verbose) diff --git a/tests/ecn_input/ecn_test_vectors.py b/tests/ecn_input/ecn_test_vectors.py index c53bf48a24..fe47f0b7a3 100644 --- a/tests/ecn_input/ecn_test_vectors.py +++ b/tests/ecn_input/ecn_test_vectors.py @@ -18,205 +18,356 @@ """ +ecn_show_config_output_specific_namespace = """\ +Profile: AZURE_LOSSLESS +----------------------- ------- +red_max_threshold 2097152 +ecn ecn_all +green_min_threshold 1048576 +red_min_threshold 1048576 +yellow_min_threshold 1048576 +green_max_threshold 2097152 +green_drop_probability 5 +yellow_max_threshold 2097152 +yellow_drop_probability 5 +red_drop_probability 5 +----------------------- ------- + +""" + +ecn_show_config_output_multi = """\ +Profile: AZURE_LOSSLESS +----------------------- ------- +red_max_threshold 2097152 +ecn ecn_all +green_min_threshold 1048576 +red_min_threshold 1048576 +yellow_min_threshold 1048576 +green_max_threshold 2097152 +green_drop_probability 5 +yellow_max_threshold 2097152 +yellow_drop_probability 5 +red_drop_probability 5 +----------------------- ------- + +Profile: AZURE_LOSSY +----------------------- ----- +red_max_threshold 32760 +red_min_threshold 4095 +yellow_max_threshold 32760 +yellow_min_threshold 4095 +green_max_threshold 32760 +green_min_threshold 4095 +yellow_drop_probability 2 +----------------------- ----- + +""" + testData = { - 'ecn_show_config' : {'cmd' : ['show'], - 'args' : [], - 'rc' : 0, - 'rc_output': ecn_show_config_output + 'ecn_show_config': {'cmd': ['show'], + 'args': [], + 'rc': 0, + 'rc_output': ecn_show_config_output }, - 'ecn_show_config_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-l', '-vv'], - 'rc' : 0, - 'rc_output': ecn_show_config_output + 'Total profiles: 1\n' + 'ecn_show_config_verbose': {'cmd': ['q_cmd'], + 'args': ['-l', '-vv'], + 'rc': 0, + 'rc_output': ecn_show_config_output + 'Total profiles: 1\n' }, - 'ecn_cfg_gmin' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gmin', '1048600'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_min_threshold,1048600'] + 'ecn_cfg_gmin': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gmin', '1048600'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_min_threshold,1048600'] }, - 'ecn_cfg_gmin_verbose' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gmin', '1048600', '-vv'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_min_threshold,1048600'], - 'rc_output' : 'Running command: ecnconfig -p AZURE_LOSSLESS -gmin 1048600 -vv\nSetting green_min_threshold value to 1048600\n' + 'ecn_cfg_gmin_verbose': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gmin', '1048600', '-vv'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_min_threshold,1048600'], + 'rc_output': ('Running command: ecnconfig -p AZURE_LOSSLESS -gmin 1048600 -vv\n' + 'Setting green_min_threshold value to 1048600\n') }, - 'ecn_cfg_gmax' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gmax', '2097153'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_max_threshold,2097153'] + 'ecn_cfg_gmax': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gmax', '2097153'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_max_threshold,2097153'] }, - 'ecn_cfg_ymin' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-ymin', '1048600'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,yellow_min_threshold,1048600'] + 'ecn_cfg_ymin': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-ymin', '1048600'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,yellow_min_threshold,1048600'] }, - 'ecn_cfg_ymax' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-ymax', '2097153'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,yellow_max_threshold,2097153'] + 'ecn_cfg_ymax': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-ymax', '2097153'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,yellow_max_threshold,2097153'] }, - 'ecn_cfg_rmin' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rmin', '1048600'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,red_min_threshold,1048600'] + 'ecn_cfg_rmin': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rmin', '1048600'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,red_min_threshold,1048600'] }, - 'ecn_cfg_rmax' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rmax', '2097153'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,red_max_threshold,2097153'] + 'ecn_cfg_rmax': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rmax', '2097153'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,red_max_threshold,2097153'] }, - 'ecn_cfg_rdrop' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rdrop', '10'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,red_drop_probability,10'] + 'ecn_cfg_rdrop': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rdrop', '10'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,red_drop_probability,10'] }, - 'ecn_cfg_ydrop' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-ydrop', '11'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,yellow_drop_probability,11'] + 'ecn_cfg_ydrop': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-ydrop', '11'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,yellow_drop_probability,11'] }, - 'ecn_cfg_gdrop' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_drop_probability,12'] + 'ecn_cfg_gdrop': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_drop_probability,12'] }, - 'ecn_cfg_gdrop_verbose' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12', '-vv'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_drop_probability,12'], - 'rc_output' : 'Running command: ecnconfig -p AZURE_LOSSLESS -gdrop 12 -vv\nSetting green_drop_probability value to 12%\n' + 'ecn_cfg_gdrop_verbose': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12', '-vv'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_drop_probability,12'], + 'rc_output': ('Running command: ecnconfig -p AZURE_LOSSLESS -gdrop 12 -vv\n' + 'Setting green_drop_probability value to 12%\n') }, - 'ecn_cfg_multi_set' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12', '-gmax', '2097153'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_drop_probability,12', - 'AZURE_LOSSLESS,green_max_threshold,2097153' - ] + 'ecn_cfg_multi_set': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12', '-gmax', '2097153'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_drop_probability,12', + ',AZURE_LOSSLESS,green_max_threshold,2097153'] }, - 'ecn_cfg_gmin_gmax_invalid' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gmax', '2097153', '-gmin', '2097154'], - 'rc' : 1, - 'rc_msg' : 'Invalid gmin (2097154) and gmax (2097153). gmin should be smaller than gmax' + 'ecn_cfg_gmin_gmax_invalid': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gmax', + '2097153', '-gmin', '2097154'], + 'rc': 1, + 'rc_msg': ('Invalid gmin (2097154) and gmax (2097153).' + ' gmin should be smaller than gmax') }, - 'ecn_cfg_ymin_ymax_invalid' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-ymax', '2097153', '-ymin', '2097154'], - 'rc' : 1, - 'rc_msg' : 'Invalid ymin (2097154) and ymax (2097153). ymin should be smaller than ymax' + 'ecn_cfg_ymin_ymax_invalid': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-ymax', + '2097153', '-ymin', '2097154'], + 'rc': 1, + 'rc_msg': ('Invalid ymin (2097154) and ymax (2097153).' + ' ymin should be smaller than ymax') }, - 'ecn_cfg_rmin_rmax_invalid' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rmax', '2097153', '-rmin', '2097154'], - 'rc' : 1, - 'rc_msg' : 'Invalid rmin (2097154) and rmax (2097153). rmin should be smaller than rmax' + 'ecn_cfg_rmin_rmax_invalid': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rmax', + '2097153', '-rmin', '2097154'], + 'rc': 1, + 'rc_msg': ('Invalid rmin (2097154) and rmax (2097153).' + ' rmin should be smaller than rmax') }, - 'ecn_cfg_rmax_invalid' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rmax', '-2097153'], - 'rc' : 1, - 'rc_msg' : 'Invalid rmax (-2097153). rmax should be an non-negative integer' - }, - 'ecn_cfg_rdrop_invalid' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rdrop', '105'], - 'rc' : 1, - 'rc_msg' : 'Invalid value for "-rdrop": 105 is not in the valid range of 0 to 100' + 'ecn_cfg_rmax_invalid': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rmax', '-2097153'], + 'rc': 1, + 'rc_msg': 'Invalid rmax (-2097153). rmax should be an non-negative integer' }, - 'ecn_q_get' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3'], - 'rc' : 0, - 'rc_msg' : 'ECN status:\nqueue 3: on\n', - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'] + 'ecn_cfg_rdrop_invalid': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rdrop', '105'], + 'rc': 1, + 'rc_msg': 'Invalid value for "-rdrop": 105 is not in the valid range of 0 to 100' + }, + 'ecn_q_get': {'cmd': ['q_cmd'], + 'args': ['-q', '3'], + 'rc': 0, + 'rc_msg': 'ECN status:\nqueue 3: on\n', + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'] }, - 'ecn_q_get_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3', '-vv'], - 'rc' : 0, - 'rc_msg' : 'ECN status:\n{0} queue 3: on\n', - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'], - 'db_table' : 'DEVICE_NEIGHBOR' + 'ecn_q_get_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '3', '-vv'], + 'rc': 0, + 'rc_msg': 'ECN status:\n{0} queue 3: on\n', + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'], + 'db_table': 'DEVICE_NEIGHBOR' }, - 'ecn_lossy_q_get' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '2'], - 'rc' : 0, - 'rc_msg' : 'ECN status:\nqueue 2: off\n', - 'cmp_args' : [None], - 'cmp_q_args' : ['2'] + 'ecn_lossy_q_get': {'cmd': ['q_cmd'], + 'args': ['-q', '2'], + 'rc': 0, + 'rc_msg': 'ECN status:\nqueue 2: off\n', + 'cmp_args': [',None,None'], + 'cmp_q_args': ['2'] }, - 'ecn_q_all_get_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4', '-vv'], - 'rc' : 0, - 'rc_msg' : 'ECN status:\n{0} queue 3: on\n{0} queue 4: on\n', - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'], - 'db_table' : 'DEVICE_NEIGHBOR' + 'ecn_q_all_get_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4', '-vv'], + 'rc': 0, + 'rc_msg': 'ECN status:\n{0} queue 3: on\n{0} queue 4: on\n', + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'], + 'db_table': 'DEVICE_NEIGHBOR' }, - 'ecn_q_all_get' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4'], - 'rc' : 0, - 'rc_msg' : 'ECN status:\nqueue 3: on\nqueue 4: on\n', - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'] + 'ecn_q_all_get': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4'], + 'rc': 0, + 'rc_msg': 'ECN status:\nqueue 3: on\nqueue 4: on\n', + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'] }, - 'ecn_cfg_q_all_off' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4', 'off'], - 'rc' : 0, - 'cmp_args' : [None], - 'cmp_q_args' : ['3', '4'] - }, - 'ecn_cfg_q_all_off_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4', 'off', '-vv'], - 'rc' : 0, - 'cmp_args' : [None], - 'cmp_q_args' : ['3', '4'], - 'db_table' : 'DEVICE_NEIGHBOR', - 'rc_msg' : 'Disable ECN on {0} queue 3\nDisable ECN on {0} queue 4' + 'ecn_cfg_q_all_off': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4', 'off'], + 'rc': 0, + 'cmp_args': [',None,None'], + 'cmp_q_args': ['3', '4'] + }, + 'ecn_cfg_q_all_off_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4', 'off', '-vv'], + 'rc': 0, + 'cmp_args': [',None,None'], + 'cmp_q_args': ['3', '4'], + 'db_table': 'DEVICE_NEIGHBOR', + 'rc_msg': 'Disable ECN on {0} queue 3\nDisable ECN on {0} queue 4' }, - 'ecn_cfg_q_off' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3', 'off'], - 'rc' : 0, - 'cmp_args' : [None, 'wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3'], - 'other_q' : ['4'] + 'ecn_cfg_q_off': {'cmd': ['q_cmd'], + 'args': ['-q', '3', 'off'], + 'rc': 0, + 'cmp_args': [',None,None', ',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3'], + 'other_q': ['4'] }, - 'ecn_cfg_q_off_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3', 'off', '-vv'], - 'rc' : 0, - 'cmp_args' : [None, 'wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3'], - 'other_q' : ['4'], - 'db_table' : 'DEVICE_NEIGHBOR', - 'rc_msg' : 'Disable ECN on {0} queue 3' + 'ecn_cfg_q_off_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '3', 'off', '-vv'], + 'rc': 0, + 'cmp_args': [',None,None', ',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3'], + 'other_q': ['4'], + 'db_table': 'DEVICE_NEIGHBOR', + 'rc_msg': 'Disable ECN on {0} queue 3' }, - 'ecn_cfg_q_all_on' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4', 'on'], - 'rc' : 0, - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'] + 'ecn_cfg_q_all_on': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4', 'on'], + 'rc': 0, + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'] }, - 'ecn_cfg_q_all_on_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4', 'on', '-vv'], - 'rc' : 0, - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'], - 'db_table' : 'DEVICE_NEIGHBOR', - 'rc_msg' : 'Enable ECN on {0} queue 3\nEnable ECN on {0} queue 4' + 'ecn_cfg_q_all_on_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4', 'on', '-vv'], + 'rc': 0, + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'], + 'db_table': 'DEVICE_NEIGHBOR', + 'rc_msg': 'Enable ECN on {0} queue 3\nEnable ECN on {0} queue 4' }, - 'ecn_cfg_q_on' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '4', 'on'], - 'rc' : 0, - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'] + 'ecn_cfg_q_on': {'cmd': ['q_cmd'], + 'args': ['-q', '4', 'on'], + 'rc': 0, + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'] }, - 'ecn_cfg_q_on_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '4', 'on', '-vv'], - 'rc' : 0, - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'], - 'db_table' : 'DEVICE_NEIGHBOR', - 'rc_msg' : 'Enable ECN on {0} queue 4' + 'ecn_cfg_q_on_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '4', 'on', '-vv'], + 'rc': 0, + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'], + 'db_table': 'DEVICE_NEIGHBOR', + 'rc_msg': 'Enable ECN on {0} queue 4' }, - 'ecn_cfg_lossy_q_on' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '0,1,2,5,6,7', 'on'], - 'rc' : 0, - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['0', '1', '2', '5', '6', '7'] - } + 'ecn_cfg_lossy_q_on': {'cmd': ['q_cmd'], + 'args': ['-q', '0,1,2,5,6,7', 'on'], + 'rc': 0, + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['0', '1', '2', '5', '6', '7'] + }, + 'ecn_show_config_masic': {'cmd': ['show_masic'], + 'args': ['-l'], + 'rc': 0, + 'rc_output': ecn_show_config_output_multi, + }, + 'test_ecn_show_config_verbose_masic': {'cmd': ['show_masic'], + 'args': ['-l', '-vv'], + 'rc': 0, + 'rc_output': ecn_show_config_output_multi + 'Total profiles: 2\n', + }, + 'test_ecn_show_config_namespace': {'cmd': ['show_masic'], + 'args': ['-l', '-n', 'asic0'], + 'rc': 0, + 'rc_output': ecn_show_config_output_specific_namespace, + }, + 'test_ecn_show_config_namespace_verbose': {'cmd': ['show_masic'], + 'args': ['-l', '-n', 'asic0', '-vv'], + 'rc': 0, + 'rc_output': ecn_show_config_output_specific_namespace + + 'Total profiles: 1\n', + }, + 'ecn_cfg_threshold_masic': {'cmd': ['config_masic'], + 'args': ['-p', 'AZURE_LOSSY', '-gmax', '35000', '-n', 'asic1'], + 'rc': 0, + 'cmp_args': ['asic1,AZURE_LOSSY,green_max_threshold,35000'] + }, + 'ecn_cfg_probability_masic': {'cmd': ['config_masic'], + 'args': ['-p', 'AZURE_LOSSY', '-ydrop', '3', '-n', 'asic1'], + 'rc': 0, + 'cmp_args': ['asic1,AZURE_LOSSY,yellow_drop_probability,3'] + }, + 'ecn_cfg_gdrop_verbose_all_masic': {'cmd': ['config_masic'], + 'args': ['-p', 'AZURE_LOSSLESS', '-gdrop', '12', '-vv'], + 'rc': 0, + 'cmp_args': ['asic0-asic1,AZURE_LOSSLESS,green_drop_probability,12'], + 'rc_output': ('Setting green_drop_probability value to 12% ' + 'for namespace asic0\n' + 'Setting green_drop_probability value to 12% ' + 'for namespace asic1\n') + }, + 'ecn_cfg_multi_set_verbose_all_masic': {'cmd': ['config_masic'], + 'args': ['-p', 'AZURE_LOSSLESS', '-gdrop', + '14', '-gmax', '2097153', '-vv'], + 'rc': 0, + 'cmp_args': [('asic0-asic1,AZURE_LOSSLESS,' + 'green_drop_probability,14'), + ('asic0-asic1,AZURE_LOSSLESS,' + 'green_max_threshold,2097153')], + 'rc_output': ('Setting green_max_threshold value to 2097153 ' + 'for namespace asic0\n' + 'Setting green_max_threshold value to 2097153 ' + 'for namespace asic1\n' + 'Setting green_drop_probability value to 14% ' + 'for namespace asic0\n' + 'Setting green_drop_probability value to 14% ' + 'for namespace asic1\n') + }, + 'ecn_q_get_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '1', '-n', 'asic0'], + 'rc': 0, + 'rc_msg': 'ECN status for namespace asic0:\nqueue 1: on\n', + 'cmp_args': ['asic0,wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['1'] + }, + 'ecn_q_get_verbose_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '1', '-vv', '-n', 'asic0'], + 'rc': 0, + 'rc_msg': 'ECN status for namespace asic0:\nEthernet4 queue 1: on\n', + 'cmp_args': ['asic0,wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['1'], + 'db_table': 'DEVICE_NEIGHBOR' + }, + 'ecn_q_get_all_ns_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '0'], + 'rc': 0, + 'rc_msg': ('ECN status for namespace asic0:\nqueue 0: off\n' + 'ECN status for namespace asic1:\nqueue 0: on\n') + }, + 'ecn_q_get_all_ns_verbose_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '0', '-vv'], + 'rc': 0, + 'rc_msg': ('ECN status for namespace asic0:\nEthernet4 queue 0: off\n' + 'ECN status for namespace asic1:\nEthernet0 queue 0: on\n') + }, + 'ecn_cfg_q_all_ns_off_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '0,1', 'off'], + 'rc': 0, + 'cmp_args': ['asic0-asic1,None,None'], + 'cmp_q_args': ['0', '1'] + }, + 'ecn_cfg_q_one_ns_off_verbose_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '1', 'on', '-n', 'asic1', '-vv'], + 'rc': 0, + 'rc_msg': 'Enable ECN on Ethernet0 queue 1\n', + 'cmp_args': ['asic1,wred_profile,AZURE_LOSSLESS', + 'asic1,wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['0'], + 'other_q': ['1'] + } } diff --git a/tests/ecn_test.py b/tests/ecn_test.py index 13474b12e8..5d2ac36011 100644 --- a/tests/ecn_test.py +++ b/tests/ecn_test.py @@ -6,11 +6,15 @@ from click.testing import CliRunner import config.main as config -from .ecn_input.ecn_test_vectors import * +from .ecn_input.ecn_test_vectors import testData from .utils import get_result_and_return_code from utilities_common.db import Db import show.main as show +# Constants +ARGS_DELIMITER = ',' +NAMESPACE_DELIMITER = '-' + test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) scripts_path = os.path.join(modules_path, "scripts") @@ -18,13 +22,107 @@ sys.path.insert(0, modules_path) -class TestEcnConfig(object): +class TestEcnConfigBase(object): @classmethod def setup_class(cls): + print("SETUP") os.environ["PATH"] += os.pathsep + scripts_path os.environ['UTILITIES_UNIT_TESTING'] = "2" - print("SETUP") + def process_cmp_args(self, cmp_args): + """ + The arguments are a string marked by delimiters + Arguments marked as 'None', are treated as None objects + First arg is always a collection of namespaces + """ + + args = cmp_args.split(ARGS_DELIMITER) + args = [None if arg == "None" else arg for arg in args] + args[0] = args[0].split(NAMESPACE_DELIMITER) + return args + + def verify_profile(self, queue_db_entry, profile, value): + if profile is not None: + assert queue_db_entry[profile] == value + else: + assert profile not in queue_db_entry,\ + "Profile needs to be fully removed from table to propagate NULL OID to SAI" + + def executor(self, input): + runner = CliRunner() + + if 'db_table' in input: + db = Db() + data_list = list(db.cfgdb.get_table(input['db_table'])) + input['rc_msg'] = input['rc_msg'].format(",".join(data_list)) + + if 'show' in input['cmd']: + exec_cmd = show.cli.commands["ecn"] + result = runner.invoke(exec_cmd, input['args']) + exit_code = result.exit_code + output = result.output + elif 'q_cmd' in input['cmd'] or 'show_masic' in input['cmd'] or 'config_masic' in input['cmd']: + exit_code, output = get_result_and_return_code(["ecnconfig"] + input['args']) + else: + exec_cmd = config.config.commands["ecn"] + result = runner.invoke(exec_cmd, input['args']) + exit_code = result.exit_code + output = result.output + + print(exit_code) + print(output) + + if input['rc'] == 0: + assert exit_code == 0 + else: + assert exit_code != 0 + + if 'cmp_args' in input: + fd = open('/tmp/ecnconfig', 'r') + cmp_data = json.load(fd) + + # Verify queue assignments + if 'cmp_q_args' in input: + namespaces, profile, value = self.process_cmp_args(input['cmp_args'][0]) + for namespace in namespaces: + for key in cmp_data[namespace]: + queue_idx = ast.literal_eval(key)[-1] + if queue_idx in input['cmp_q_args']: + self.verify_profile(cmp_data[namespace][key], profile, value) + + # other_q helps verify two different queue assignments + if 'other_q' in input: + namespaces1, profile1, value1 = self.process_cmp_args(input['cmp_args'][-1]) + for namespace1 in namespaces1: + for key in cmp_data[namespace1]: + queue_idx = ast.literal_eval(key)[-1] + if 'other_q' in input and queue_idx in input['other_q']: + self.verify_profile(cmp_data[namespace1][key], profile1, value1) + # Verify non-queue related assignments + else: + for args in input['cmp_args']: + namespaces, profile, name, value = self.process_cmp_args(args) + for namespace in namespaces: + assert(cmp_data[namespace][profile][name] == value) + fd.close() + + if 'rc_msg' in input: + assert input['rc_msg'] in output + + if 'rc_output' in input: + assert output == input['rc_output'] + + @classmethod + def teardown_class(cls): + os.environ['PATH'] = os.pathsep.join(os.environ['PATH'].split(os.pathsep)[:-1]) + os.environ['UTILITIES_UNIT_TESTING'] = "0" + + if os.path.isfile('/tmp/ecnconfig'): + os.remove('/tmp/ecnconfig') + print("TEARDOWN") + + +class TestEcnConfig(TestEcnConfigBase): def test_ecn_show_config(self): self.executor(testData['ecn_show_config']) @@ -123,77 +221,3 @@ def test_ecn_queue_set_all_on_verbose(self): def test_ecn_queue_set_lossy_q_on(self): self.executor(testData['ecn_cfg_lossy_q_on']) - - def process_cmp_args(self, cmp_args): - if cmp_args is None: - return (None, None) - return cmp_args.split(',') - - def verify_profile(self, queue_db_entry, profile, value): - if profile != None: - assert queue_db_entry[profile] == value - else: - assert profile not in queue_db_entry,\ - "Profile needs to be fully removed from table to propagate NULL OID to SAI" - - def executor(self, input): - runner = CliRunner() - - if 'db_table' in input: - db = Db() - data_list = list(db.cfgdb.get_table(input['db_table'])) - input['rc_msg'] = input['rc_msg'].format(",".join(data_list)) - - if 'show' in input['cmd']: - exec_cmd = show.cli.commands["ecn"] - result = runner.invoke(exec_cmd, input['args']) - exit_code = result.exit_code - output = result.output - elif 'q_cmd' in input['cmd'] : - exit_code, output = get_result_and_return_code(["ecnconfig"] + input['args']) - else: - exec_cmd = config.config.commands["ecn"] - result = runner.invoke(exec_cmd, input['args']) - exit_code = result.exit_code - output = result.output - - print(exit_code) - print(output) - - if input['rc'] == 0: - assert exit_code == 0 - else: - assert exit_code != 0 - - if 'cmp_args' in input: - fd = open('/tmp/ecnconfig', 'r') - cmp_data = json.load(fd) - if 'cmp_q_args' in input: - profile, value = self.process_cmp_args(input['cmp_args'][0]) - if 'other_q' in input: - profile1, value1 = self.process_cmp_args(input['cmp_args'][-1]) - for key in cmp_data: - queue_idx = ast.literal_eval(key)[-1] - if queue_idx in input['cmp_q_args']: - self.verify_profile(cmp_data[key], profile, value) - if 'other_q' in input and queue_idx in input['other_q']: - self.verify_profile(cmp_data[key], profile1, value1) - else: - for args in input['cmp_args']: - profile, name, value = args.split(',') - assert(cmp_data[profile][name] == value) - fd.close() - - if 'rc_msg' in input: - assert input['rc_msg'] in output - - if 'rc_output' in input: - assert output == input['rc_output'] - - @classmethod - def teardown_class(cls): - os.environ['PATH'] = os.pathsep.join(os.environ['PATH'].split(os.pathsep)[:-1]) - os.environ['UTILITIES_UNIT_TESTING'] = "0" - if os.path.isfile('/tmp/ecnconfig'): - os.remove('/tmp/ecnconfig') - print("TEARDOWN") diff --git a/tests/mock_tables/asic0/config_db.json b/tests/mock_tables/asic0/config_db.json index 8b867bdc96..da38af13dd 100644 --- a/tests/mock_tables/asic0/config_db.json +++ b/tests/mock_tables/asic0/config_db.json @@ -303,5 +303,28 @@ "SYSLOG_CONFIG_FEATURE|database": { "rate_limit_interval": "222", "rate_limit_burst": "22222" + }, + "WRED_PROFILE|AZURE_LOSSLESS": { + "red_max_threshold": "2097152", + "ecn": "ecn_all", + "green_min_threshold": "1048576", + "red_min_threshold": "1048576", + "yellow_min_threshold": "1048576", + "green_max_threshold": "2097152", + "green_drop_probability": "5", + "yellow_max_threshold": "2097152", + "yellow_drop_probability": "5", + "red_drop_probability": "5" + }, + "DEVICE_NEIGHBOR|Ethernet4": { + "name": "Serverss0", + "port": "eth0" + }, + "QUEUE|Ethernet4|0": { + "scheduler": "[SCHEDULAR|scheduler.0]" + }, + "QUEUE|Ethernet4|1": { + "scheduler": "[SCHEDULAR|scheduler.0]", + "wred_profile": "AZURE_LOSSLESS" } } diff --git a/tests/mock_tables/asic1/config_db.json b/tests/mock_tables/asic1/config_db.json index 56823ae113..1bcd812ef2 100644 --- a/tests/mock_tables/asic1/config_db.json +++ b/tests/mock_tables/asic1/config_db.json @@ -242,5 +242,25 @@ "SYSLOG_CONFIG_FEATURE|database": { "rate_limit_interval": "555", "rate_limit_burst": "55555" + }, + "WRED_PROFILE|AZURE_LOSSY": { + "red_max_threshold":"32760", + "red_min_threshold":"4095", + "yellow_max_threshold":"32760", + "yellow_min_threshold":"4095", + "green_max_threshold": "32760", + "green_min_threshold": "4095", + "yellow_drop_probability": "2" + }, + "DEVICE_NEIGHBOR|Ethernet0": { + "name": "Servers", + "port": "eth0" + }, + "QUEUE|Ethernet0|0": { + "scheduler": "[SCHEDULAR|scheduler.0]", + "wred_profile": "AZURE_LOSSLESS" + }, + "QUEUE|Ethernet0|1": { + "scheduler": "[SCHEDULAR|scheduler.0]" } } diff --git a/tests/multi_asic_ecnconfig_test.py b/tests/multi_asic_ecnconfig_test.py new file mode 100644 index 0000000000..034a517ace --- /dev/null +++ b/tests/multi_asic_ecnconfig_test.py @@ -0,0 +1,64 @@ +import os +import sys +from .ecn_test import TestEcnConfigBase +from .ecn_input.ecn_test_vectors import testData + +root_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(root_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, root_path) +sys.path.insert(0, modules_path) + + +class TestEcnConfigMultiAsic(TestEcnConfigBase): + @classmethod + def setup_class(cls): + super().setup_class() + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + + def test_ecn_show_config_all_masic(self): + self.executor(testData['ecn_show_config_masic']) + + def test_ecn_show_config_all_verbose_masic(self): + self.executor(testData['test_ecn_show_config_verbose_masic']) + + def test_ecn_show_config_one_masic(self): + self.executor(testData['test_ecn_show_config_namespace']) + + def test_ecn_show_config_one_verbose_masic(self): + self.executor(testData['test_ecn_show_config_namespace_verbose']) + + def test_ecn_config_change_other_threshold_masic(self): + self.executor(testData['ecn_cfg_threshold_masic']) + + def test_ecn_config_change_other_prob_masic(self): + self.executor(testData['ecn_cfg_probability_masic']) + + def test_ecn_config_change_gdrop_verbose_all_masic(self): + self.executor(testData['ecn_cfg_gdrop_verbose_all_masic']) + + def test_ecn_config_multi_set_verbose_all_masic(self): + self.executor(testData['ecn_cfg_multi_set_verbose_all_masic']) + + def test_ecn_queue_get_masic(self): + self.executor(testData['ecn_q_get_masic']) + + def test_ecn_queue_get_verbose_masic(self): + self.executor(testData['ecn_q_get_verbose_masic']) + + def test_ecn_queue_get_all_masic(self): + self.executor(testData['ecn_q_get_all_ns_masic']) + + def test_ecn_queue_get_all_verbose_masic(self): + self.executor(testData['ecn_q_get_all_ns_verbose_masic']) + + def test_ecn_q_set_off_all_masic(self): + self.executor(testData['ecn_cfg_q_all_ns_off_masic']) + + def test_ecn_q_set_off_one_masic(self): + self.executor(testData['ecn_cfg_q_one_ns_off_verbose_masic']) + + @classmethod + def teardown_class(cls): + super().teardown_class() + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" From c4ce5ae5947554258608ae5ba97c50bdaf761015 Mon Sep 17 00:00:00 2001 From: Changrong Wu Date: Wed, 28 Aug 2024 10:05:06 -0700 Subject: [PATCH 31/67] Enable show interfacess counters on chassis supervisor (#3488) What I did I modify the portstat script and the implementation of Class Porstat to enable the Chassis Supervisor to collect port counters from linecards. By doing so, "show interfaces counters" on the Chassis Supervisor can now collect port counters from linecards and display them on the CLI. How I did it I made the Class Portstat aware of chassis environment and added logic to collect port counters from linecards through "GET_LINECARD_COUNTER|pull" signal in CHASSIS_STATE_DB. And I also added an agent on every linecard to collect port counters and send them to CHASSIS_STATE_DB on the Chassis Supervisor. Note: the agent is not part of this PR. The current agent used in my test is only for internal use in MSFT. How to verify it Run it on a SONiC Chassis Supervisor and make sure the Linecards are equipped with agents to publish port counters to CHASSIS_STATE_DB. --- scripts/portstat | 536 +------------- tests/mock_tables/chassis_state_db.json | 58 +- .../on_sup_no_counters/chassis_state_db.json | 11 + .../on_sup_partial_lc/chassis_state_db.json | 48 ++ tests/portstat_test.py | 92 ++- utilities_common/netstat.py | 9 + utilities_common/portstat.py | 666 ++++++++++++++++++ 7 files changed, 890 insertions(+), 530 deletions(-) create mode 100644 tests/portstat_db/on_sup_no_counters/chassis_state_db.json create mode 100644 tests/portstat_db/on_sup_partial_lc/chassis_state_db.json create mode 100644 utilities_common/portstat.py diff --git a/scripts/portstat b/scripts/portstat index 6294ba57a9..58cc9aefd6 100755 --- a/scripts/portstat +++ b/scripts/portstat @@ -8,16 +8,10 @@ import json import argparse -import datetime import os.path import sys import time -from collections import OrderedDict, namedtuple - -from natsort import natsorted -from tabulate import tabulate -from sonic_py_common import multi_asic -from sonic_py_common import device_info +from collections import OrderedDict # mock the redis for unit test purposes # try: @@ -27,6 +21,13 @@ try: sys.path.insert(0, modules_path) sys.path.insert(0, tests_path) import mock_tables.dbconnector + + if os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] == "1": + import mock + import sonic_py_common + from swsscommon.swsscommon import SonicV2Connector + sonic_py_common.device_info.is_supervisor = mock.MagicMock(return_value=True) + SonicV2Connector.delete_all_by_pattern = mock.MagicMock() if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": import mock_tables.mock_multi_asic mock_tables.dbconnector.load_namespace_config() @@ -34,530 +35,11 @@ try: except KeyError: pass -from swsscommon.swsscommon import CounterTable, PortCounter from utilities_common import constants from utilities_common.intf_filter import parse_interface_in_filter -import utilities_common.multi_asic as multi_asic_util -from utilities_common.netstat import ns_diff, table_as_json, format_brate, format_prate, format_util, format_number_with_comma from utilities_common.cli import json_serial, UserCache - -""" -The order and count of statistics mentioned below needs to be in sync with the values in portstat script -So, any fields added/deleted in here should be reflected in portstat script also -""" -NStats = namedtuple("NStats", "rx_ok, rx_err, rx_drop, rx_ovr, tx_ok,\ - tx_err, tx_drop, tx_ovr, rx_byt, tx_byt,\ - rx_64, rx_65_127, rx_128_255, rx_256_511, rx_512_1023, rx_1024_1518, rx_1519_2047, rx_2048_4095, rx_4096_9216, rx_9217_16383,\ - rx_uca, rx_mca, rx_bca, rx_all,\ - tx_64, tx_65_127, tx_128_255, tx_256_511, tx_512_1023, tx_1024_1518, tx_1519_2047, tx_2048_4095, tx_4096_9216, tx_9217_16383,\ - tx_uca, tx_mca, tx_bca, tx_all,\ - rx_jbr, rx_frag, rx_usize, rx_ovrrun,\ - fec_corr, fec_uncorr, fec_symbol_err") -header_all = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_PPS', 'RX_UTIL', 'RX_ERR', 'RX_DRP', 'RX_OVR', - 'TX_OK', 'TX_BPS', 'TX_PPS', 'TX_UTIL', 'TX_ERR', 'TX_DRP', 'TX_OVR'] -header_std = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_UTIL', 'RX_ERR', 'RX_DRP', 'RX_OVR', - 'TX_OK', 'TX_BPS', 'TX_UTIL', 'TX_ERR', 'TX_DRP', 'TX_OVR'] -header_errors_only = ['IFACE', 'STATE', 'RX_ERR', 'RX_DRP', 'RX_OVR', 'TX_ERR', 'TX_DRP', 'TX_OVR'] -header_fec_only = ['IFACE', 'STATE', 'FEC_CORR', 'FEC_UNCORR', 'FEC_SYMBOL_ERR'] -header_rates_only = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_PPS', 'RX_UTIL', 'TX_OK', 'TX_BPS', 'TX_PPS', 'TX_UTIL'] - -rates_key_list = [ 'RX_BPS', 'RX_PPS', 'RX_UTIL', 'TX_BPS', 'TX_PPS', 'TX_UTIL' ] -ratestat_fields = ("rx_bps", "rx_pps", "rx_util", "tx_bps", "tx_pps", "tx_util") -RateStats = namedtuple("RateStats", ratestat_fields) - -""" -The order and count of statistics mentioned below needs to be in sync with the values in portstat script -So, any fields added/deleted in here should be reflected in portstat script also -""" -BUCKET_NUM = 45 -counter_bucket_dict = { - 0:['SAI_PORT_STAT_IF_IN_UCAST_PKTS', 'SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS'], - 1:['SAI_PORT_STAT_IF_IN_ERRORS'], - 2:['SAI_PORT_STAT_IF_IN_DISCARDS'], - 3:['SAI_PORT_STAT_ETHER_RX_OVERSIZE_PKTS'], - 4:['SAI_PORT_STAT_IF_OUT_UCAST_PKTS', 'SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS'], - 5:['SAI_PORT_STAT_IF_OUT_ERRORS'], - 6:['SAI_PORT_STAT_IF_OUT_DISCARDS'], - 7:['SAI_PORT_STAT_ETHER_TX_OVERSIZE_PKTS'], - 8:['SAI_PORT_STAT_IF_IN_OCTETS'], - 9:['SAI_PORT_STAT_IF_OUT_OCTETS'], - 10:['SAI_PORT_STAT_ETHER_IN_PKTS_64_OCTETS'], - 11:['SAI_PORT_STAT_ETHER_IN_PKTS_65_TO_127_OCTETS'], - 12:['SAI_PORT_STAT_ETHER_IN_PKTS_128_TO_255_OCTETS'], - 13:['SAI_PORT_STAT_ETHER_IN_PKTS_256_TO_511_OCTETS'], - 14:['SAI_PORT_STAT_ETHER_IN_PKTS_512_TO_1023_OCTETS'], - 15:['SAI_PORT_STAT_ETHER_IN_PKTS_1024_TO_1518_OCTETS'], - 16:['SAI_PORT_STAT_ETHER_IN_PKTS_1519_TO_2047_OCTETS'], - 17:['SAI_PORT_STAT_ETHER_IN_PKTS_2048_TO_4095_OCTETS'], - 18:['SAI_PORT_STAT_ETHER_IN_PKTS_4096_TO_9216_OCTETS'], - 19:['SAI_PORT_STAT_ETHER_IN_PKTS_9217_TO_16383_OCTETS'], - 20:['SAI_PORT_STAT_IF_IN_UCAST_PKTS'], - 21:['SAI_PORT_STAT_IF_IN_MULTICAST_PKTS'], - 22:['SAI_PORT_STAT_IF_IN_BROADCAST_PKTS'], - 23:['SAI_PORT_STAT_IF_IN_UCAST_PKTS', 'SAI_PORT_STAT_IF_IN_MULTICAST_PKTS', 'SAI_PORT_STAT_IF_IN_BROADCAST_PKTS'], - 24:['SAI_PORT_STAT_ETHER_OUT_PKTS_64_OCTETS'], - 25:['SAI_PORT_STAT_ETHER_OUT_PKTS_65_TO_127_OCTETS'], - 26:['SAI_PORT_STAT_ETHER_OUT_PKTS_128_TO_255_OCTETS'], - 27:['SAI_PORT_STAT_ETHER_OUT_PKTS_256_TO_511_OCTETS'], - 28:['SAI_PORT_STAT_ETHER_OUT_PKTS_512_TO_1023_OCTETS'], - 29:['SAI_PORT_STAT_ETHER_OUT_PKTS_1024_TO_1518_OCTETS'], - 30:['SAI_PORT_STAT_ETHER_OUT_PKTS_1519_TO_2047_OCTETS'], - 31:['SAI_PORT_STAT_ETHER_OUT_PKTS_2048_TO_4095_OCTETS'], - 32:['SAI_PORT_STAT_ETHER_OUT_PKTS_4096_TO_9216_OCTETS'], - 33:['SAI_PORT_STAT_ETHER_OUT_PKTS_9217_TO_16383_OCTETS'], - 34:['SAI_PORT_STAT_IF_OUT_UCAST_PKTS'], - 35:['SAI_PORT_STAT_IF_OUT_MULTICAST_PKTS'], - 36:['SAI_PORT_STAT_IF_OUT_BROADCAST_PKTS'], - 37:['SAI_PORT_STAT_IF_OUT_UCAST_PKTS', 'SAI_PORT_STAT_IF_OUT_MULTICAST_PKTS', 'SAI_PORT_STAT_IF_OUT_BROADCAST_PKTS'], - 38:['SAI_PORT_STAT_ETHER_STATS_JABBERS'], - 39:['SAI_PORT_STAT_ETHER_STATS_FRAGMENTS'], - 40:['SAI_PORT_STAT_ETHER_STATS_UNDERSIZE_PKTS'], - 41:['SAI_PORT_STAT_IP_IN_RECEIVES'], - 42:['SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES'], - 43:['SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES'], - 44:['SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS'] -} - -STATUS_NA = 'N/A' - -RATES_TABLE_PREFIX = "RATES:" - -COUNTER_TABLE_PREFIX = "COUNTERS:" -COUNTERS_PORT_NAME_MAP = "COUNTERS_PORT_NAME_MAP" - -PORT_STATUS_TABLE_PREFIX = "PORT_TABLE:" -PORT_STATE_TABLE_PREFIX = "PORT_TABLE|" -PORT_OPER_STATUS_FIELD = "oper_status" -PORT_ADMIN_STATUS_FIELD = "admin_status" -PORT_STATUS_VALUE_UP = 'UP' -PORT_STATUS_VALUE_DOWN = 'DOWN' -PORT_SPEED_FIELD = "speed" - -PORT_STATE_UP = 'U' -PORT_STATE_DOWN = 'D' -PORT_STATE_DISABLED = 'X' - - -class Portstat(object): - def __init__(self, namespace, display_option): - self.db = None - self.multi_asic = multi_asic_util.MultiAsic(display_option, namespace) - - def get_cnstat_dict(self): - self.cnstat_dict = OrderedDict() - self.cnstat_dict['time'] = datetime.datetime.now() - self.ratestat_dict = OrderedDict() - self.collect_stat() - return self.cnstat_dict, self.ratestat_dict - - @multi_asic_util.run_on_multi_asic - def collect_stat(self): - """ - Collect the statisitics from all the asics present on the - device and store in a dict - """ - - cnstat_dict, ratestat_dict = self.get_cnstat() - self.cnstat_dict.update(cnstat_dict) - self.ratestat_dict.update(ratestat_dict) - - def get_cnstat(self): - """ - Get the counters info from database. - """ - def get_counters(port): - """ - Get the counters from specific table. - """ - fields = ["0"]*BUCKET_NUM - - _, fvs = counter_table.get(PortCounter(), port) - fvs = dict(fvs) - for pos, cntr_list in counter_bucket_dict.items(): - for counter_name in cntr_list: - if counter_name not in fvs: - fields[pos] = STATUS_NA - elif fields[pos] != STATUS_NA: - fields[pos] = str(int(fields[pos]) + int(fvs[counter_name])) - - cntr = NStats._make(fields)._asdict() - return cntr - - def get_rates(table_id): - """ - Get the rates from specific table. - """ - fields = ["0","0","0","0","0","0"] - for pos, name in enumerate(rates_key_list): - full_table_id = RATES_TABLE_PREFIX + table_id - counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, name) - if counter_data is None: - fields[pos] = STATUS_NA - elif fields[pos] != STATUS_NA: - fields[pos] = float(counter_data) - cntr = RateStats._make(fields) - return cntr - - # Get the info from database - counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP); - # Build a dictionary of the stats - cnstat_dict = OrderedDict() - cnstat_dict['time'] = datetime.datetime.now() - ratestat_dict = OrderedDict() - counter_table = CounterTable(self.db.get_redis_client(self.db.COUNTERS_DB)) - if counter_port_name_map is None: - return cnstat_dict, ratestat_dict - for port in natsorted(counter_port_name_map): - port_name = port.split(":")[0] - if self.multi_asic.skip_display(constants.PORT_OBJ, port_name): - continue - cnstat_dict[port] = get_counters(port) - ratestat_dict[port] = get_rates(counter_port_name_map[port]) - return cnstat_dict, ratestat_dict - - def get_port_speed(self, port_name): - """ - Get the port speed - """ - # Get speed from APPL_DB - state_db_table_id = PORT_STATE_TABLE_PREFIX + port_name - app_db_table_id = PORT_STATUS_TABLE_PREFIX + port_name - for ns in self.multi_asic.get_ns_list_based_on_options(): - self.db = multi_asic.connect_to_all_dbs_for_ns(ns) - speed = self.db.get(self.db.STATE_DB, state_db_table_id, PORT_SPEED_FIELD) - oper_status = self.db.get(self.db.APPL_DB, app_db_table_id, PORT_OPER_STATUS_FIELD) - if speed is None or speed == STATUS_NA or oper_status != "up": - speed = self.db.get(self.db.APPL_DB, app_db_table_id, PORT_SPEED_FIELD) - if speed is not None: - return int(speed) - return STATUS_NA - - def get_port_state(self, port_name): - """ - Get the port state - """ - full_table_id = PORT_STATUS_TABLE_PREFIX + port_name - for ns in self.multi_asic.get_ns_list_based_on_options(): - self.db = multi_asic.connect_to_all_dbs_for_ns(ns) - admin_state = self.db.get(self.db.APPL_DB, full_table_id, PORT_ADMIN_STATUS_FIELD) - oper_state = self.db.get(self.db.APPL_DB, full_table_id, PORT_OPER_STATUS_FIELD) - - if admin_state is None or oper_state is None: - continue - if admin_state.upper() == PORT_STATUS_VALUE_DOWN: - return PORT_STATE_DISABLED - elif admin_state.upper() == PORT_STATUS_VALUE_UP and oper_state.upper() == PORT_STATUS_VALUE_UP: - return PORT_STATE_UP - elif admin_state.upper() == PORT_STATUS_VALUE_UP and oper_state.upper() == PORT_STATUS_VALUE_DOWN: - return PORT_STATE_DOWN - else: - return STATUS_NA - return STATUS_NA - - - def cnstat_print(self, cnstat_dict, ratestat_dict, intf_list, use_json, print_all, errors_only, fec_stats_only, rates_only, detail=False): - """ - Print the cnstat. - """ - - if intf_list and detail: - self.cnstat_intf_diff_print(cnstat_dict, {}, intf_list) - return None - - table = [] - header = None - - for key, data in cnstat_dict.items(): - if key == 'time': - continue - if intf_list and key not in intf_list: - continue - port_speed = self.get_port_speed(key) - rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(rates_key_list))) - if print_all: - header = header_all - table.append((key, self.get_port_state(key), - format_number_with_comma(data['rx_ok']), - format_brate(rates.rx_bps), - format_prate(rates.rx_pps), - format_util(rates.rx_bps, port_speed), - format_number_with_comma(data['rx_err']), - format_number_with_comma(data['rx_drop']), - format_number_with_comma(data['rx_ovr']), - format_number_with_comma(data['tx_ok']), - format_brate(rates.tx_bps), - format_prate(rates.tx_pps), - format_util(rates.tx_bps, port_speed), - format_number_with_comma(data['tx_err']), - format_number_with_comma(data['tx_drop']), - format_number_with_comma(data['tx_ovr']))) - elif errors_only: - header = header_errors_only - table.append((key, self.get_port_state(key), - format_number_with_comma(data['rx_err']), - format_number_with_comma(data['rx_drop']), - format_number_with_comma(data['rx_ovr']), - format_number_with_comma(data['tx_err']), - format_number_with_comma(data['tx_drop']), - format_number_with_comma(data['tx_ovr']))) - elif fec_stats_only: - header = header_fec_only - table.append((key, self.get_port_state(key), - format_number_with_comma(data['fec_corr']), - format_number_with_comma(data['fec_uncorr']), - format_number_with_comma(data['fec_symbol_err']))) - elif rates_only: - header = header_rates_only - table.append((key, self.get_port_state(key), - format_number_with_comma(data['rx_ok']), - format_brate(rates.rx_bps), - format_prate(rates.rx_pps), - format_util(rates.rx_bps, port_speed), - format_number_with_comma(data['tx_ok']), - format_brate(rates.tx_bps), - format_prate(rates.tx_pps), - format_util(rates.tx_bps, port_speed))) - else: - header = header_std - table.append((key, self.get_port_state(key), - format_number_with_comma(data['rx_ok']), - format_brate(rates.rx_bps), - format_util(rates.rx_bps, port_speed), - format_number_with_comma(data['rx_err']), - format_number_with_comma(data['rx_drop']), - format_number_with_comma(data['rx_ovr']), - format_number_with_comma(data['tx_ok']), - format_brate(rates.tx_bps), - format_util(rates.tx_bps, port_speed), - format_number_with_comma(data['tx_err']), - format_number_with_comma(data['tx_drop']), - format_number_with_comma(data['tx_ovr']))) - if table: - if use_json: - print(table_as_json(table, header)) - else: - print(tabulate(table, header, tablefmt='simple', stralign='right')) - if (multi_asic.is_multi_asic() or device_info.is_chassis()) and not use_json: - print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") - - def cnstat_intf_diff_print(self, cnstat_new_dict, cnstat_old_dict, intf_list): - """ - Print the difference between two cnstat results for interface. - """ - - for key, cntr in cnstat_new_dict.items(): - if key == 'time': - continue - - if key in cnstat_old_dict: - old_cntr = cnstat_old_dict.get(key) - else: - old_cntr = NStats._make([0] * BUCKET_NUM)._asdict() - - if intf_list and key not in intf_list: - continue - - print("Packets Received 64 Octets..................... {}".format(ns_diff(cntr['rx_64'], old_cntr['rx_64']))) - print("Packets Received 65-127 Octets................. {}".format(ns_diff(cntr['rx_65_127'], old_cntr['rx_65_127']))) - print("Packets Received 128-255 Octets................ {}".format(ns_diff(cntr['rx_128_255'], old_cntr['rx_128_255']))) - print("Packets Received 256-511 Octets................ {}".format(ns_diff(cntr['rx_256_511'], old_cntr['rx_256_511']))) - print("Packets Received 512-1023 Octets............... {}".format(ns_diff(cntr['rx_512_1023'], old_cntr['rx_512_1023']))) - print("Packets Received 1024-1518 Octets.............. {}".format(ns_diff(cntr['rx_1024_1518'], old_cntr['rx_1024_1518']))) - print("Packets Received 1519-2047 Octets.............. {}".format(ns_diff(cntr['rx_1519_2047'], old_cntr['rx_1519_2047']))) - print("Packets Received 2048-4095 Octets.............. {}".format(ns_diff(cntr['rx_2048_4095'], old_cntr['rx_2048_4095']))) - print("Packets Received 4096-9216 Octets.............. {}".format(ns_diff(cntr['rx_4096_9216'], old_cntr['rx_4096_9216']))) - print("Packets Received 9217-16383 Octets............. {}".format(ns_diff(cntr['rx_9217_16383'], old_cntr['rx_9217_16383']))) - - print("") - print("Total Packets Received Without Errors.......... {}".format(ns_diff(cntr['rx_all'], old_cntr['rx_all']))) - print("Unicast Packets Received....................... {}".format(ns_diff(cntr['rx_uca'], old_cntr['rx_uca']))) - print("Multicast Packets Received..................... {}".format(ns_diff(cntr['rx_mca'], old_cntr['rx_mca']))) - print("Broadcast Packets Received..................... {}".format(ns_diff(cntr['rx_bca'], old_cntr['rx_bca']))) - - print("") - print("Jabbers Received............................... {}".format(ns_diff(cntr['rx_jbr'], old_cntr['rx_jbr']))) - print("Fragments Received............................. {}".format(ns_diff(cntr['rx_frag'], old_cntr['rx_frag']))) - print("Undersize Received............................. {}".format(ns_diff(cntr['rx_usize'], old_cntr['rx_usize']))) - print("Overruns Received.............................. {}".format(ns_diff(cntr['rx_ovrrun'], old_cntr['rx_ovrrun']))) - - print("") - print("Packets Transmitted 64 Octets.................. {}".format(ns_diff(cntr['tx_64'], old_cntr['tx_64']))) - print("Packets Transmitted 65-127 Octets.............. {}".format(ns_diff(cntr['tx_65_127'], old_cntr['tx_65_127']))) - print("Packets Transmitted 128-255 Octets............. {}".format(ns_diff(cntr['tx_128_255'], old_cntr['tx_128_255']))) - print("Packets Transmitted 256-511 Octets............. {}".format(ns_diff(cntr['tx_256_511'], old_cntr['tx_256_511']))) - print("Packets Transmitted 512-1023 Octets............ {}".format(ns_diff(cntr['tx_512_1023'], old_cntr['tx_512_1023']))) - print("Packets Transmitted 1024-1518 Octets........... {}".format(ns_diff(cntr['tx_1024_1518'], old_cntr['tx_1024_1518']))) - print("Packets Transmitted 1519-2047 Octets........... {}".format(ns_diff(cntr['tx_1519_2047'], old_cntr['tx_1519_2047']))) - print("Packets Transmitted 2048-4095 Octets........... {}".format(ns_diff(cntr['tx_2048_4095'], old_cntr['tx_2048_4095']))) - print("Packets Transmitted 4096-9216 Octets........... {}".format(ns_diff(cntr['tx_4096_9216'], old_cntr['tx_4096_9216']))) - print("Packets Transmitted 9217-16383 Octets.......... {}".format(ns_diff(cntr['tx_9217_16383'], old_cntr['tx_9217_16383']))) - - print("") - print("Total Packets Transmitted Successfully......... {}".format(ns_diff(cntr['tx_all'], old_cntr['tx_all']))) - print("Unicast Packets Transmitted.................... {}".format(ns_diff(cntr['tx_uca'], old_cntr['tx_uca']))) - print("Multicast Packets Transmitted.................. {}".format(ns_diff(cntr['tx_mca'], old_cntr['tx_mca']))) - print("Broadcast Packets Transmitted.................. {}".format(ns_diff(cntr['tx_bca'], old_cntr['tx_bca']))) - - print("Time Since Counters Last Cleared............... " + str(cnstat_old_dict.get('time'))) - - - def cnstat_diff_print(self, cnstat_new_dict, cnstat_old_dict, - ratestat_dict, intf_list, use_json, - print_all, errors_only, fec_stats_only, - rates_only, detail=False): - """ - Print the difference between two cnstat results. - """ - - if intf_list and detail: - self.cnstat_intf_diff_print(cnstat_new_dict, cnstat_old_dict, intf_list) - return None - - table = [] - header = None - - for key, cntr in cnstat_new_dict.items(): - if key == 'time': - continue - old_cntr = None - if key in cnstat_old_dict: - old_cntr = cnstat_old_dict.get(key) - - rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(ratestat_fields))) - - if intf_list and key not in intf_list: - continue - port_speed = self.get_port_speed(key) - - if print_all: - header = header_all - if old_cntr is not None: - table.append((key, self.get_port_state(key), - ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), - format_brate(rates.rx_bps), - format_prate(rates.rx_pps), - format_util(rates.rx_bps, port_speed), - ns_diff(cntr['rx_err'], old_cntr['rx_err']), - ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), - ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), - ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), - format_brate(rates.tx_bps), - format_prate(rates.tx_pps), - format_util(rates.tx_bps, port_speed), - ns_diff(cntr['tx_err'], old_cntr['tx_err']), - ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), - ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) - else: - table.append((key, self.get_port_state(key), - format_number_with_comma(cntr['rx_ok']), - format_brate(rates.rx_bps), - format_prate(rates.rx_pps), - format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr['rx_err']), - format_number_with_comma(cntr['rx_drop']), - format_number_with_comma(cntr['rx_ovr']), - format_number_with_comma(cntr['tx_ok']), - format_brate(rates.tx_bps), - format_prate(rates.tx_pps), - format_util(rates.tx_bps, port_speed), - format_number_with_comma(cntr['tx_err']), - format_number_with_comma(cntr['tx_drop']), - format_number_with_comma(cntr['tx_ovr']))) - elif errors_only: - header = header_errors_only - if old_cntr is not None: - table.append((key, self.get_port_state(key), - ns_diff(cntr['rx_err'], old_cntr['rx_err']), - ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), - ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), - ns_diff(cntr['tx_err'], old_cntr['tx_err']), - ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), - ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) - else: - table.append((key, self.get_port_state(key), - format_number_with_comma(cntr['rx_err']), - format_number_with_comma(cntr['rx_drop']), - format_number_with_comma(cntr['rx_ovr']), - format_number_with_comma(cntr['tx_err']), - format_number_with_comma(cntr['tx_drop']), - format_number_with_comma(cntr['tx_ovr']))) - elif fec_stats_only: - header = header_fec_only - if old_cntr is not None: - table.append((key, self.get_port_state(key), - ns_diff(cntr['fec_corr'], old_cntr['fec_corr']), - ns_diff(cntr['fec_uncorr'], old_cntr['fec_uncorr']), - ns_diff(cntr['fec_symbol_err'], old_cntr['fec_symbol_err']))) - else: - table.append((key, self.get_port_state(key), - format_number_with_comma(cntr['fec_corr']), - format_number_with_comma(cntr['fec_uncorr']), - format_number_with_comma(cntr['fec_symbol_err']))) - - elif rates_only: - header = header_rates_only - if old_cntr is not None: - table.append((key, - self.get_port_state(key), - ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), - format_brate(rates.rx_bps), - format_prate(rates.rx_pps), - format_util(rates.rx_bps, port_speed), - ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), - format_brate(rates.tx_bps), - format_prate(rates.tx_pps), - format_util(rates.tx_bps, port_speed))) - else: - table.append((key, - self.get_port_state(key), - format_number_with_comma(cntr['rx_ok']), - format_brate(rates.rx_bps), - format_prate(rates.rx_pps), - format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr['tx_ok']), - format_brate(rates.tx_bps), - format_prate(rates.tx_pps), - format_util(rates.tx_bps, port_speed))) - else: - header = header_std - if old_cntr is not None: - table.append((key, - self.get_port_state(key), - ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), - format_brate(rates.rx_bps), - format_util(rates.rx_bps, port_speed), - ns_diff(cntr['rx_err'], old_cntr['rx_err']), - ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), - ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), - ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), - format_brate(rates.tx_bps), - format_util(rates.tx_bps, port_speed), - ns_diff(cntr['tx_err'], old_cntr['tx_err']), - ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), - ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) - else: - table.append((key, - self.get_port_state(key), - format_number_with_comma(cntr['rx_ok']), - format_brate(rates.rx_bps), - format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr['rx_err']), - format_number_with_comma(cntr['rx_drop']), - format_number_with_comma(cntr['rx_ovr']), - format_number_with_comma(cntr['tx_ok']), - format_brate(rates.tx_bps), - format_util(rates.tx_bps, port_speed), - format_number_with_comma(cntr['tx_err']), - format_number_with_comma(cntr['tx_drop']), - format_number_with_comma(cntr['tx_ovr']))) - if table: - if use_json: - print(table_as_json(table, header)) - else: - print(tabulate(table, header, tablefmt='simple', stralign='right')) - if (multi_asic.is_multi_asic() or device_info.is_chassis()) and not use_json: - print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") +from utilities_common.portstat import Portstat def main(): parser = argparse.ArgumentParser(description='Display the ports state and counters', diff --git a/tests/mock_tables/chassis_state_db.json b/tests/mock_tables/chassis_state_db.json index 6af9e19da4..365cbf80cd 100644 --- a/tests/mock_tables/chassis_state_db.json +++ b/tests/mock_tables/chassis_state_db.json @@ -7,6 +7,62 @@ }, "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD2": { "module_hostname": "sonic-lc3" + }, + "LINECARD_PORT_STAT_MARK_TABLE|sonic-lc1": { + "timestamp": "2020-07-01 00:00:00" + }, + "LINECARD_PORT_STAT_MARK_TABLE|sonic-lc3": { + "timestamp": "2020-07-01 00:00:00" + }, + "LINECARD_PORT_STAT_TABLE|Ethernet1/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 + }, + "LINECARD_PORT_STAT_TABLE|Ethernet2/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 + }, + "LINECARD_PORT_STAT_TABLE|Ethernet11/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 } - } \ No newline at end of file diff --git a/tests/portstat_db/on_sup_no_counters/chassis_state_db.json b/tests/portstat_db/on_sup_no_counters/chassis_state_db.json new file mode 100644 index 0000000000..5c380954c3 --- /dev/null +++ b/tests/portstat_db/on_sup_no_counters/chassis_state_db.json @@ -0,0 +1,11 @@ +{ + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD0": { + "module_hostname": "sonic-lc1" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD1": { + "module_hostname": "sonic-lc2" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD2": { + "module_hostname": "sonic-lc3" + } +} \ No newline at end of file diff --git a/tests/portstat_db/on_sup_partial_lc/chassis_state_db.json b/tests/portstat_db/on_sup_partial_lc/chassis_state_db.json new file mode 100644 index 0000000000..6040a80776 --- /dev/null +++ b/tests/portstat_db/on_sup_partial_lc/chassis_state_db.json @@ -0,0 +1,48 @@ +{ + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD0": { + "module_hostname": "sonic-lc1" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD1": { + "module_hostname": "sonic-lc2" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD2": { + "module_hostname": "sonic-lc3" + }, + "LINECARD_PORT_STAT_MARK_TABLE|sonic-lc1": { + "timestamp": "2020-07-01 00:00:00" + }, + "LINECARD_PORT_STAT_TABLE|Ethernet1/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 + }, + "LINECARD_PORT_STAT_TABLE|Ethernet2/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 + } +} \ No newline at end of file diff --git a/tests/portstat_test.py b/tests/portstat_test.py index 885c06662f..3af704e66e 100644 --- a/tests/portstat_test.py +++ b/tests/portstat_test.py @@ -8,8 +8,8 @@ from .utils import get_result_and_return_code from utilities_common.cli import UserCache -root_path = os.path.dirname(os.path.abspath(__file__)) -modules_path = os.path.dirname(root_path) +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) scripts_path = os.path.join(modules_path, "scripts") intf_counters_before_clear = """\ @@ -234,6 +234,23 @@ Time Since Counters Last Cleared............... None """ +intf_counters_on_sup = """\ + IFACE STATE RX_OK RX_BPS RX_UTIL RX_ERR RX_DRP RX_OVR TX_OK TX_BPS TX_UTIL\ + TX_ERR TX_DRP TX_OVR +------------ ------- ------- --------- --------- -------- -------- -------- ------- --------- ---------\ + -------- -------- -------- + Ethernet1/1 U 100 10.00 B/s 0.00% 0 0 0 100 10.00 B/s 0.00%\ + 0 0 0 + Ethernet2/1 U 100 10.00 B/s 0.00% 0 0 0 100 10.00 B/s 0.00%\ + 0 0 0 +Ethernet11/1 U 100 10.00 B/s 0.00% 0 0 0 100 10.00 B/s 0.00%\ + 0 0 0 +""" + +intf_counters_on_sup_no_counters = "Linecard Counter Table is not available.\n" + +intf_counters_on_sup_partial_lc = "Not all linecards have published their counter values.\n" + TEST_PERIOD = 3 @@ -397,13 +414,84 @@ def test_clear_intf_counters(self): assert return_code == 0 verify_after_clear(result, intf_counter_after_clear) + def test_show_intf_counters_on_sup(self): + remove_tmp_cnstat_file() + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "1" + runner = CliRunner() + result = runner.invoke( + show.cli.commands["interfaces"].commands["counters"], []) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == intf_counters_on_sup + + return_code, result = get_result_and_return_code(['portstat']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == intf_counters_on_sup + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "0" + + def test_show_intf_counters_on_sup_no_counters(self): + remove_tmp_cnstat_file() + os.system("cp {} /tmp/".format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + os.system("cp {} {}".format(os.path.join(test_path, "portstat_db/on_sup_no_counters/chassis_state_db.json"), + os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "1" + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["interfaces"].commands["counters"], []) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == intf_counters_on_sup_no_counters + + return_code, result = get_result_and_return_code(['portstat']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == intf_counters_on_sup_no_counters + + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "0" + os.system("cp /tmp/chassis_state_db.json {}" + .format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + + def test_show_intf_counters_on_sup_partial_lc(self): + remove_tmp_cnstat_file() + os.system("cp {} /tmp/".format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + os.system("cp {} {}".format(os.path.join(test_path, "portstat_db/on_sup_partial_lc/chassis_state_db.json"), + os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "1" + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["interfaces"].commands["counters"], []) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == intf_counters_on_sup_partial_lc + + return_code, result = get_result_and_return_code(['portstat']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == intf_counters_on_sup_partial_lc + + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "0" + os.system("cp /tmp/chassis_state_db.json {}" + .format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + @classmethod def teardown_class(cls): print("TEARDOWN") os.environ["PATH"] = os.pathsep.join( os.environ["PATH"].split(os.pathsep)[:-1]) os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "0" remove_tmp_cnstat_file() + os.system("cp /tmp/chassis_state_db.json {}" + .format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) class TestMultiAsicPortStat(object): diff --git a/utilities_common/netstat.py b/utilities_common/netstat.py index 5f17c1f4c6..21b1a0faeb 100755 --- a/utilities_common/netstat.py +++ b/utilities_common/netstat.py @@ -118,3 +118,12 @@ def format_util(brate, port_rate): util = brate/(float(port_rate)*1000*1000/8.0)*100 return "{:.2f}%".format(util) + +def format_util_directly(util): + """ + Format the util without calculation. + """ + if util == STATUS_NA: + return STATUS_NA + else: + return "{:.2f}%".format(float(util)) diff --git a/utilities_common/portstat.py b/utilities_common/portstat.py new file mode 100644 index 0000000000..6942fa5f2a --- /dev/null +++ b/utilities_common/portstat.py @@ -0,0 +1,666 @@ +import datetime +import time +from collections import OrderedDict, namedtuple + +from natsort import natsorted +from tabulate import tabulate +from sonic_py_common import multi_asic +from sonic_py_common import device_info +from swsscommon.swsscommon import SonicV2Connector, CounterTable, PortCounter + +from utilities_common import constants +import utilities_common.multi_asic as multi_asic_util +from utilities_common.netstat import ns_diff, table_as_json, format_brate, format_prate, \ + format_util, format_number_with_comma, format_util_directly + +""" +The order and count of statistics mentioned below needs to be in sync with the values in portstat script +So, any fields added/deleted in here should be reflected in portstat script also +""" +NStats = namedtuple("NStats", "rx_ok, rx_err, rx_drop, rx_ovr, tx_ok,\ + tx_err, tx_drop, tx_ovr, rx_byt, tx_byt,\ + rx_64, rx_65_127, rx_128_255, rx_256_511, rx_512_1023,\ + rx_1024_1518, rx_1519_2047, rx_2048_4095, rx_4096_9216, rx_9217_16383,\ + rx_uca, rx_mca, rx_bca, rx_all,\ + tx_64, tx_65_127, tx_128_255, tx_256_511, tx_512_1023, tx_1024_1518,\ + tx_1519_2047, tx_2048_4095, tx_4096_9216, tx_9217_16383,\ + tx_uca, tx_mca, tx_bca, tx_all,\ + rx_jbr, rx_frag, rx_usize, rx_ovrrun,\ + fec_corr, fec_uncorr, fec_symbol_err") +header_all = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_PPS', 'RX_UTIL', 'RX_ERR', 'RX_DRP', 'RX_OVR', + 'TX_OK', 'TX_BPS', 'TX_PPS', 'TX_UTIL', 'TX_ERR', 'TX_DRP', 'TX_OVR'] +header_std = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_UTIL', 'RX_ERR', 'RX_DRP', 'RX_OVR', + 'TX_OK', 'TX_BPS', 'TX_UTIL', 'TX_ERR', 'TX_DRP', 'TX_OVR'] +header_errors_only = ['IFACE', 'STATE', 'RX_ERR', 'RX_DRP', 'RX_OVR', 'TX_ERR', 'TX_DRP', 'TX_OVR'] +header_fec_only = ['IFACE', 'STATE', 'FEC_CORR', 'FEC_UNCORR', 'FEC_SYMBOL_ERR'] +header_rates_only = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_PPS', 'RX_UTIL', 'TX_OK', 'TX_BPS', 'TX_PPS', 'TX_UTIL'] + +rates_key_list = ['RX_BPS', 'RX_PPS', 'RX_UTIL', 'TX_BPS', 'TX_PPS', 'TX_UTIL'] +ratestat_fields = ("rx_bps", "rx_pps", "rx_util", "tx_bps", "tx_pps", "tx_util") +RateStats = namedtuple("RateStats", ratestat_fields) + +""" +The order and count of statistics mentioned below needs to be in sync with the values in portstat script +So, any fields added/deleted in here should be reflected in portstat script also +""" +BUCKET_NUM = 45 +counter_bucket_dict = { + 0: ['SAI_PORT_STAT_IF_IN_UCAST_PKTS', 'SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS'], + 1: ['SAI_PORT_STAT_IF_IN_ERRORS'], + 2: ['SAI_PORT_STAT_IF_IN_DISCARDS'], + 3: ['SAI_PORT_STAT_ETHER_RX_OVERSIZE_PKTS'], + 4: ['SAI_PORT_STAT_IF_OUT_UCAST_PKTS', 'SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS'], + 5: ['SAI_PORT_STAT_IF_OUT_ERRORS'], + 6: ['SAI_PORT_STAT_IF_OUT_DISCARDS'], + 7: ['SAI_PORT_STAT_ETHER_TX_OVERSIZE_PKTS'], + 8: ['SAI_PORT_STAT_IF_IN_OCTETS'], + 9: ['SAI_PORT_STAT_IF_OUT_OCTETS'], + 10: ['SAI_PORT_STAT_ETHER_IN_PKTS_64_OCTETS'], + 11: ['SAI_PORT_STAT_ETHER_IN_PKTS_65_TO_127_OCTETS'], + 12: ['SAI_PORT_STAT_ETHER_IN_PKTS_128_TO_255_OCTETS'], + 13: ['SAI_PORT_STAT_ETHER_IN_PKTS_256_TO_511_OCTETS'], + 14: ['SAI_PORT_STAT_ETHER_IN_PKTS_512_TO_1023_OCTETS'], + 15: ['SAI_PORT_STAT_ETHER_IN_PKTS_1024_TO_1518_OCTETS'], + 16: ['SAI_PORT_STAT_ETHER_IN_PKTS_1519_TO_2047_OCTETS'], + 17: ['SAI_PORT_STAT_ETHER_IN_PKTS_2048_TO_4095_OCTETS'], + 18: ['SAI_PORT_STAT_ETHER_IN_PKTS_4096_TO_9216_OCTETS'], + 19: ['SAI_PORT_STAT_ETHER_IN_PKTS_9217_TO_16383_OCTETS'], + 20: ['SAI_PORT_STAT_IF_IN_UCAST_PKTS'], + 21: ['SAI_PORT_STAT_IF_IN_MULTICAST_PKTS'], + 22: ['SAI_PORT_STAT_IF_IN_BROADCAST_PKTS'], + 23: ['SAI_PORT_STAT_IF_IN_UCAST_PKTS', 'SAI_PORT_STAT_IF_IN_MULTICAST_PKTS', + 'SAI_PORT_STAT_IF_IN_BROADCAST_PKTS'], + 24: ['SAI_PORT_STAT_ETHER_OUT_PKTS_64_OCTETS'], + 25: ['SAI_PORT_STAT_ETHER_OUT_PKTS_65_TO_127_OCTETS'], + 26: ['SAI_PORT_STAT_ETHER_OUT_PKTS_128_TO_255_OCTETS'], + 27: ['SAI_PORT_STAT_ETHER_OUT_PKTS_256_TO_511_OCTETS'], + 28: ['SAI_PORT_STAT_ETHER_OUT_PKTS_512_TO_1023_OCTETS'], + 29: ['SAI_PORT_STAT_ETHER_OUT_PKTS_1024_TO_1518_OCTETS'], + 30: ['SAI_PORT_STAT_ETHER_OUT_PKTS_1519_TO_2047_OCTETS'], + 31: ['SAI_PORT_STAT_ETHER_OUT_PKTS_2048_TO_4095_OCTETS'], + 32: ['SAI_PORT_STAT_ETHER_OUT_PKTS_4096_TO_9216_OCTETS'], + 33: ['SAI_PORT_STAT_ETHER_OUT_PKTS_9217_TO_16383_OCTETS'], + 34: ['SAI_PORT_STAT_IF_OUT_UCAST_PKTS'], + 35: ['SAI_PORT_STAT_IF_OUT_MULTICAST_PKTS'], + 36: ['SAI_PORT_STAT_IF_OUT_BROADCAST_PKTS'], + 37: ['SAI_PORT_STAT_IF_OUT_UCAST_PKTS', 'SAI_PORT_STAT_IF_OUT_MULTICAST_PKTS', + 'SAI_PORT_STAT_IF_OUT_BROADCAST_PKTS'], + 38: ['SAI_PORT_STAT_ETHER_STATS_JABBERS'], + 39: ['SAI_PORT_STAT_ETHER_STATS_FRAGMENTS'], + 40: ['SAI_PORT_STAT_ETHER_STATS_UNDERSIZE_PKTS'], + 41: ['SAI_PORT_STAT_IP_IN_RECEIVES'], + 42: ['SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES'], + 43: ['SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES'], + 44: ['SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS'] +} + +STATUS_NA = 'N/A' + +RATES_TABLE_PREFIX = "RATES:" + +COUNTER_TABLE_PREFIX = "COUNTERS:" +COUNTERS_PORT_NAME_MAP = "COUNTERS_PORT_NAME_MAP" + +PORT_STATUS_TABLE_PREFIX = "PORT_TABLE:" +PORT_STATE_TABLE_PREFIX = "PORT_TABLE|" +PORT_OPER_STATUS_FIELD = "oper_status" +PORT_ADMIN_STATUS_FIELD = "admin_status" +PORT_STATUS_VALUE_UP = 'UP' +PORT_STATUS_VALUE_DOWN = 'DOWN' +PORT_SPEED_FIELD = "speed" + +PORT_STATE_UP = 'U' +PORT_STATE_DOWN = 'D' +PORT_STATE_DISABLED = 'X' + +LINECARD_PORT_STAT_TABLE = 'LINECARD_PORT_STAT_TABLE' +LINECARD_PORT_STAT_MARK_TABLE = 'LINECARD_PORT_STAT_MARK_TABLE' +CHASSIS_MIDPLANE_INFO_TABLE = 'CHASSIS_MIDPLANE_TABLE' + + +class Portstat(object): + def __init__(self, namespace, display_option): + self.db = None + self.multi_asic = multi_asic_util.MultiAsic(display_option, namespace) + if device_info.is_supervisor(): + self.db = SonicV2Connector(use_unix_socket_path=False) + self.db.connect(self.db.CHASSIS_STATE_DB, False) + + def get_cnstat_dict(self): + self.cnstat_dict = OrderedDict() + self.cnstat_dict['time'] = datetime.datetime.now() + self.ratestat_dict = OrderedDict() + if device_info.is_supervisor(): + self.collect_stat_from_lc() + else: + self.collect_stat() + return self.cnstat_dict, self.ratestat_dict + + def collect_stat_from_lc(self): + # Retrieve the current counter values from all LCs + + # Clear stale records + self.db.delete_all_by_pattern(self.db.CHASSIS_STATE_DB, LINECARD_PORT_STAT_TABLE + "*") + self.db.delete_all_by_pattern(self.db.CHASSIS_STATE_DB, LINECARD_PORT_STAT_MARK_TABLE + "*") + + # Check how many linecards are connected + tempdb = SonicV2Connector(use_unix_socket_path=False) + tempdb.connect(tempdb.STATE_DB, False) + linecard_midplane_keys = tempdb.keys(tempdb.STATE_DB, CHASSIS_MIDPLANE_INFO_TABLE + "*") + lc_count = 0 + if not linecard_midplane_keys: + # LC has not published it's Counter which could be due to chassis_port_counter_monitor.service not running + print("No linecards are connected!") + return + else: + for key in linecard_midplane_keys: + linecard_status = tempdb.get(tempdb.STATE_DB, key, "access") + if linecard_status == "True": + lc_count += 1 + + # Notify the Linecards to publish their counter values instantly + self.db.set(self.db.CHASSIS_STATE_DB, "GET_LINECARD_COUNTER|pull", "enable", "true") + time.sleep(2) + + # Check if all LCs have published counters + linecard_names = self.db.keys(self.db.CHASSIS_STATE_DB, LINECARD_PORT_STAT_MARK_TABLE + "*") + linecard_port_aliases = self.db.keys(self.db.CHASSIS_STATE_DB, LINECARD_PORT_STAT_TABLE + "*") + if not linecard_port_aliases: + # LC has not published it's Counter which could be due to chassis_port_counter_monitor.service not running + print("Linecard Counter Table is not available.") + return + if len(linecard_names) != lc_count: + print("Not all linecards have published their counter values.") + return + + # Create the dictornaries to store the counter values + cnstat_dict = OrderedDict() + cnstat_dict['time'] = datetime.datetime.now() + ratestat_dict = OrderedDict() + + # Get the counter values from CHASSIS_STATE_DB + for key in linecard_port_aliases: + rx_ok = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_ok") + rx_bps = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_bps") + rx_pps = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_pps") + rx_util = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_util") + rx_err = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_err") + rx_drop = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_drop") + rx_ovr = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_ovr") + tx_ok = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_ok") + tx_bps = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_bps") + tx_pps = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_pps") + tx_util = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_util") + tx_err = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_err") + tx_drop = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_drop") + tx_ovr = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_ovr") + port_alias = key.split("|")[-1] + cnstat_dict[port_alias] = NStats._make([rx_ok, rx_err, rx_drop, rx_ovr, tx_ok, tx_err, tx_drop, tx_ovr] + + [STATUS_NA] * (len(NStats._fields) - 8))._asdict() + ratestat_dict[port_alias] = RateStats._make([rx_bps, rx_pps, rx_util, tx_bps, tx_pps, tx_util]) + self.cnstat_dict.update(cnstat_dict) + self.ratestat_dict.update(ratestat_dict) + + @multi_asic_util.run_on_multi_asic + def collect_stat(self): + """ + Collect the statisitics from all the asics present on the + device and store in a dict + """ + + cnstat_dict, ratestat_dict = self.get_cnstat() + self.cnstat_dict.update(cnstat_dict) + self.ratestat_dict.update(ratestat_dict) + + def get_cnstat(self): + """ + Get the counters info from database. + """ + def get_counters(port): + """ + Get the counters from specific table. + """ + fields = ["0"]*BUCKET_NUM + + _, fvs = counter_table.get(PortCounter(), port) + fvs = dict(fvs) + for pos, cntr_list in counter_bucket_dict.items(): + for counter_name in cntr_list: + if counter_name not in fvs: + fields[pos] = STATUS_NA + elif fields[pos] != STATUS_NA: + fields[pos] = str(int(fields[pos]) + int(fvs[counter_name])) + + cntr = NStats._make(fields)._asdict() + return cntr + + def get_rates(table_id): + """ + Get the rates from specific table. + """ + fields = ["0", "0", "0", "0", "0", "0"] + for pos, name in enumerate(rates_key_list): + full_table_id = RATES_TABLE_PREFIX + table_id + counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, name) + if counter_data is None: + fields[pos] = STATUS_NA + elif fields[pos] != STATUS_NA: + fields[pos] = float(counter_data) + cntr = RateStats._make(fields) + return cntr + + # Get the info from database + counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) + # Build a dictionary of the stats + cnstat_dict = OrderedDict() + cnstat_dict['time'] = datetime.datetime.now() + ratestat_dict = OrderedDict() + counter_table = CounterTable(self.db.get_redis_client(self.db.COUNTERS_DB)) + if counter_port_name_map is None: + return cnstat_dict, ratestat_dict + for port in natsorted(counter_port_name_map): + port_name = port.split(":")[0] + if self.multi_asic.skip_display(constants.PORT_OBJ, port_name): + continue + cnstat_dict[port] = get_counters(port) + ratestat_dict[port] = get_rates(counter_port_name_map[port]) + return cnstat_dict, ratestat_dict + + def get_port_speed(self, port_name): + """ + Get the port speed + """ + # Get speed from APPL_DB + state_db_table_id = PORT_STATE_TABLE_PREFIX + port_name + app_db_table_id = PORT_STATUS_TABLE_PREFIX + port_name + for ns in self.multi_asic.get_ns_list_based_on_options(): + self.db = multi_asic.connect_to_all_dbs_for_ns(ns) + speed = self.db.get(self.db.STATE_DB, state_db_table_id, PORT_SPEED_FIELD) + oper_status = self.db.get(self.db.APPL_DB, app_db_table_id, PORT_OPER_STATUS_FIELD) + if speed is None or speed == STATUS_NA or oper_status != "up": + speed = self.db.get(self.db.APPL_DB, app_db_table_id, PORT_SPEED_FIELD) + if speed is not None: + return int(speed) + return STATUS_NA + + def get_port_state(self, port_name): + """ + Get the port state + """ + if device_info.is_supervisor(): + self.db.connect(self.db.CHASSIS_STATE_DB, False) + return self.db.get(self.db.CHASSIS_STATE_DB, LINECARD_PORT_STAT_TABLE + "|" + port_name, "state") + + full_table_id = PORT_STATUS_TABLE_PREFIX + port_name + for ns in self.multi_asic.get_ns_list_based_on_options(): + self.db = multi_asic.connect_to_all_dbs_for_ns(ns) + admin_state = self.db.get(self.db.APPL_DB, full_table_id, PORT_ADMIN_STATUS_FIELD) + oper_state = self.db.get(self.db.APPL_DB, full_table_id, PORT_OPER_STATUS_FIELD) + + if admin_state is None or oper_state is None: + continue + if admin_state.upper() == PORT_STATUS_VALUE_DOWN: + return PORT_STATE_DISABLED + elif admin_state.upper() == PORT_STATUS_VALUE_UP and oper_state.upper() == PORT_STATUS_VALUE_UP: + return PORT_STATE_UP + elif admin_state.upper() == PORT_STATUS_VALUE_UP and oper_state.upper() == PORT_STATUS_VALUE_DOWN: + return PORT_STATE_DOWN + else: + return STATUS_NA + return STATUS_NA + + def cnstat_print(self, cnstat_dict, ratestat_dict, intf_list, use_json, print_all, + errors_only, fec_stats_only, rates_only, detail=False): + """ + Print the cnstat. + """ + + if intf_list and detail: + self.cnstat_intf_diff_print(cnstat_dict, {}, intf_list) + return None + + table = [] + header = None + + for key in natsorted(cnstat_dict.keys()): + if key == 'time': + continue + if intf_list and key not in intf_list: + continue + port_speed = self.get_port_speed(key) + data = cnstat_dict[key] + rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(rates_key_list))) + if print_all: + header = header_all + table.append((key, self.get_port_state(key), + format_number_with_comma(data["rx_ok"]), + format_brate(rates.rx_bps), + format_prate(rates.rx_pps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + format_number_with_comma(data["rx_err"]), + format_number_with_comma(data["rx_drop"]), + format_number_with_comma(data["rx_ovr"]), + format_number_with_comma(data["tx_ok"]), + format_brate(rates.tx_bps), + format_prate(rates.tx_pps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util), + format_number_with_comma(data["tx_err"]), + format_number_with_comma(data["tx_drop"]), + format_number_with_comma(data["tx_ovr"]))) + elif errors_only: + header = header_errors_only + table.append((key, self.get_port_state(key), + format_number_with_comma(data["rx_err"]), + format_number_with_comma(data["rx_drop"]), + format_number_with_comma(data["rx_ovr"]), + format_number_with_comma(data["tx_err"]), + format_number_with_comma(data["tx_drop"]), + format_number_with_comma(data["tx_ovr"]))) + elif fec_stats_only: + header = header_fec_only + table.append((key, self.get_port_state(key), + format_number_with_comma(data['fec_corr']), + format_number_with_comma(data['fec_uncorr']), + format_number_with_comma(data['fec_symbol_err']))) + elif rates_only: + header = header_rates_only + table.append((key, self.get_port_state(key), + format_number_with_comma(data["rx_ok"]), + format_brate(rates.rx_bps), + format_prate(rates.rx_pps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + format_number_with_comma(data["tx_ok"]), + format_brate(rates.tx_bps), + format_prate(rates.tx_pps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util))) + else: + header = header_std + table.append((key, self.get_port_state(key), + format_number_with_comma(data["rx_ok"]), + format_brate(rates.rx_bps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + format_number_with_comma(data["rx_err"]), + format_number_with_comma(data["rx_drop"]), + format_number_with_comma(data["rx_ovr"]), + format_number_with_comma(data["tx_ok"]), + format_brate(rates.tx_bps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util), + format_number_with_comma(data["tx_err"]), + format_number_with_comma(data["tx_drop"]), + format_number_with_comma(data["tx_ovr"]))) + if table: + if use_json: + print(table_as_json(table, header)) + else: + print(tabulate(table, header, tablefmt='simple', stralign='right')) + if (multi_asic.is_multi_asic() or device_info.is_chassis()) and not use_json: + print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") + + def cnstat_intf_diff_print(self, cnstat_new_dict, cnstat_old_dict, intf_list): + """ + Print the difference between two cnstat results for interface. + """ + + for key in natsorted(cnstat_new_dict.keys()): + cntr = cnstat_new_dict.get(key) + if key == 'time': + continue + + if key in cnstat_old_dict: + old_cntr = cnstat_old_dict.get(key) + else: + old_cntr = NStats._make([0] * BUCKET_NUM)._asdict() + + if intf_list and key not in intf_list: + continue + + print("Packets Received 64 Octets..................... {}".format(ns_diff(cntr['rx_64'], + old_cntr['rx_64']))) + print("Packets Received 65-127 Octets................. {}".format(ns_diff(cntr['rx_65_127'], + old_cntr['rx_65_127']))) + print("Packets Received 128-255 Octets................ {}".format(ns_diff(cntr['rx_128_255'], + old_cntr['rx_128_255']))) + print("Packets Received 256-511 Octets................ {}".format(ns_diff(cntr['rx_256_511'], + old_cntr['rx_256_511']))) + print("Packets Received 512-1023 Octets............... {}".format(ns_diff(cntr['rx_512_1023'], + old_cntr['rx_512_1023']))) + print("Packets Received 1024-1518 Octets.............. {}".format(ns_diff(cntr['rx_1024_1518'], + old_cntr['rx_1024_1518']))) + print("Packets Received 1519-2047 Octets.............. {}".format(ns_diff(cntr['rx_1519_2047'], + old_cntr['rx_1519_2047']))) + print("Packets Received 2048-4095 Octets.............. {}".format(ns_diff(cntr['rx_2048_4095'], + old_cntr['rx_2048_4095']))) + print("Packets Received 4096-9216 Octets.............. {}".format(ns_diff(cntr['rx_4096_9216'], + old_cntr['rx_4096_9216']))) + print("Packets Received 9217-16383 Octets............. {}".format(ns_diff(cntr['rx_9217_16383'], + old_cntr['rx_9217_16383']))) + + print("") + print("Total Packets Received Without Errors.......... {}".format(ns_diff(cntr['rx_all'], + old_cntr['rx_all']))) + print("Unicast Packets Received....................... {}".format(ns_diff(cntr['rx_uca'], + old_cntr['rx_uca']))) + print("Multicast Packets Received..................... {}".format(ns_diff(cntr['rx_mca'], + old_cntr['rx_mca']))) + print("Broadcast Packets Received..................... {}".format(ns_diff(cntr['rx_bca'], + old_cntr['rx_bca']))) + + print("") + print("Jabbers Received............................... {}".format(ns_diff(cntr['rx_jbr'], + old_cntr['rx_jbr']))) + print("Fragments Received............................. {}".format(ns_diff(cntr['rx_frag'], + old_cntr['rx_frag']))) + print("Undersize Received............................. {}".format(ns_diff(cntr['rx_usize'], + old_cntr['rx_usize']))) + print("Overruns Received.............................. {}".format(ns_diff(cntr["rx_ovrrun"], + old_cntr["rx_ovrrun"]))) + + print("") + print("Packets Transmitted 64 Octets.................. {}".format(ns_diff(cntr['tx_64'], + old_cntr['tx_64']))) + print("Packets Transmitted 65-127 Octets.............. {}".format(ns_diff(cntr['tx_65_127'], + old_cntr['tx_65_127']))) + print("Packets Transmitted 128-255 Octets............. {}".format(ns_diff(cntr['tx_128_255'], + old_cntr['tx_128_255']))) + print("Packets Transmitted 256-511 Octets............. {}".format(ns_diff(cntr['tx_256_511'], + old_cntr['tx_256_511']))) + print("Packets Transmitted 512-1023 Octets............ {}".format(ns_diff(cntr['tx_512_1023'], + old_cntr['tx_512_1023']))) + print("Packets Transmitted 1024-1518 Octets........... {}".format(ns_diff(cntr['tx_1024_1518'], + old_cntr['tx_1024_1518']))) + print("Packets Transmitted 1519-2047 Octets........... {}".format(ns_diff(cntr['tx_1519_2047'], + old_cntr['tx_1519_2047']))) + print("Packets Transmitted 2048-4095 Octets........... {}".format(ns_diff(cntr['tx_2048_4095'], + old_cntr['tx_2048_4095']))) + print("Packets Transmitted 4096-9216 Octets........... {}".format(ns_diff(cntr['tx_4096_9216'], + old_cntr['tx_4096_9216']))) + print("Packets Transmitted 9217-16383 Octets.......... {}".format(ns_diff(cntr['tx_9217_16383'], + old_cntr['tx_9217_16383']))) + + print("") + print("Total Packets Transmitted Successfully......... {}".format(ns_diff(cntr['tx_all'], + old_cntr['tx_all']))) + print("Unicast Packets Transmitted.................... {}".format(ns_diff(cntr['tx_uca'], + old_cntr['tx_uca']))) + print("Multicast Packets Transmitted.................. {}".format(ns_diff(cntr['tx_mca'], + old_cntr['tx_mca']))) + print("Broadcast Packets Transmitted.................. {}".format(ns_diff(cntr['tx_bca'], + old_cntr['tx_bca']))) + + print("Time Since Counters Last Cleared............... " + str(cnstat_old_dict.get('time'))) + + def cnstat_diff_print(self, cnstat_new_dict, cnstat_old_dict, + ratestat_dict, intf_list, use_json, + print_all, errors_only, fec_stats_only, + rates_only, detail=False): + """ + Print the difference between two cnstat results. + """ + + if intf_list and detail: + self.cnstat_intf_diff_print(cnstat_new_dict, cnstat_old_dict, intf_list) + return None + + table = [] + header = None + + for key in natsorted(cnstat_new_dict.keys()): + cntr = cnstat_new_dict.get(key) + if key == 'time': + continue + old_cntr = None + if key in cnstat_old_dict: + old_cntr = cnstat_old_dict.get(key) + + rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(ratestat_fields))) + + if intf_list and key not in intf_list: + continue + port_speed = self.get_port_speed(key) + + if print_all: + header = header_all + if old_cntr is not None: + table.append((key, self.get_port_state(key), + ns_diff(cntr["rx_ok"], old_cntr["rx_ok"]), + format_brate(rates.rx_bps), + format_prate(rates.rx_pps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + ns_diff(cntr["rx_err"], old_cntr["rx_err"]), + ns_diff(cntr["rx_drop"], old_cntr["rx_drop"]), + ns_diff(cntr["rx_ovr"], old_cntr["rx_ovr"]), + ns_diff(cntr["tx_ok"], old_cntr["tx_ok"]), + format_brate(rates.tx_bps), + format_prate(rates.tx_pps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util), + ns_diff(cntr["tx_err"], old_cntr["tx_err"]), + ns_diff(cntr["tx_drop"], old_cntr["tx_drop"]), + ns_diff(cntr["tx_ovr"], old_cntr["tx_ovr"]))) + else: + table.append((key, self.get_port_state(key), + format_number_with_comma(cntr["rx_ok"]), + format_brate(rates.rx_bps), + format_prate(rates.rx_pps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + format_number_with_comma(cntr["rx_err"]), + format_number_with_comma(cntr["rx_drop"]), + format_number_with_comma(cntr["rx_ovr"]), + format_number_with_comma(cntr["tx_ok"]), + format_brate(rates.tx_bps), + format_prate(rates.tx_pps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util), + format_number_with_comma(cntr["tx_err"]), + format_number_with_comma(cntr["tx_drop"]), + format_number_with_comma(cntr["tx_ovr"]))) + elif errors_only: + header = header_errors_only + if old_cntr is not None: + table.append((key, self.get_port_state(key), + ns_diff(cntr["rx_err"], old_cntr["rx_err"]), + ns_diff(cntr["rx_drop"], old_cntr["rx_drop"]), + ns_diff(cntr["rx_ovr"], old_cntr["rx_ovr"]), + ns_diff(cntr["tx_err"], old_cntr["tx_err"]), + ns_diff(cntr["tx_drop"], old_cntr["tx_drop"]), + ns_diff(cntr["tx_ovr"], old_cntr["tx_ovr"]))) + else: + table.append((key, self.get_port_state(key), + format_number_with_comma(cntr["rx_err"]), + format_number_with_comma(cntr["rx_drop"]), + format_number_with_comma(cntr["rx_ovr"]), + format_number_with_comma(cntr["tx_err"]), + format_number_with_comma(cntr["tx_drop"]), + format_number_with_comma(cntr["tx_ovr"]))) + elif fec_stats_only: + header = header_fec_only + if old_cntr is not None: + table.append((key, self.get_port_state(key), + ns_diff(cntr['fec_corr'], old_cntr['fec_corr']), + ns_diff(cntr['fec_uncorr'], old_cntr['fec_uncorr']), + ns_diff(cntr['fec_symbol_err'], old_cntr['fec_symbol_err']))) + else: + table.append((key, self.get_port_state(key), + format_number_with_comma(cntr['fec_corr']), + format_number_with_comma(cntr['fec_uncorr']), + format_number_with_comma(cntr['fec_symbol_err']))) + + elif rates_only: + header = header_rates_only + if old_cntr is not None: + table.append((key, + self.get_port_state(key), + ns_diff(cntr["rx_ok"], old_cntr["rx_ok"]), + format_brate(rates.rx_bps), + format_prate(rates.rx_pps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + ns_diff(cntr["tx_ok"], old_cntr["tx_ok"]), + format_brate(rates.tx_bps), + format_prate(rates.tx_pps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util))) + else: + table.append((key, + self.get_port_state(key), + format_number_with_comma(cntr["rx_ok"]), + format_brate(rates.rx_bps), + format_prate(rates.rx_pps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + format_number_with_comma(cntr["tx_ok"]), + format_brate(rates.tx_bps), + format_prate(rates.tx_pps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util))) + else: + header = header_std + if old_cntr is not None: + table.append((key, + self.get_port_state(key), + ns_diff(cntr["rx_ok"], old_cntr["rx_ok"]), + format_brate(rates.rx_bps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + ns_diff(cntr["rx_err"], old_cntr["rx_err"]), + ns_diff(cntr["rx_drop"], old_cntr["rx_drop"]), + ns_diff(cntr["rx_ovr"], old_cntr["rx_ovr"]), + ns_diff(cntr["tx_ok"], old_cntr["tx_ok"]), + format_brate(rates.tx_bps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util), + ns_diff(cntr["tx_err"], old_cntr["tx_err"]), + ns_diff(cntr["tx_drop"], old_cntr["tx_drop"]), + ns_diff(cntr["tx_ovr"], old_cntr["tx_ovr"]))) + else: + table.append((key, + self.get_port_state(key), + format_number_with_comma(cntr["rx_ok"]), + format_brate(rates.rx_bps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + format_number_with_comma(cntr["rx_err"]), + format_number_with_comma(cntr["rx_drop"]), + format_number_with_comma(cntr["rx_ovr"]), + format_number_with_comma(cntr["tx_ok"]), + format_brate(rates.tx_bps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util), + format_number_with_comma(cntr["tx_err"]), + format_number_with_comma(cntr["tx_drop"]), + format_number_with_comma(cntr["tx_ovr"]))) + if table: + if use_json: + print(table_as_json(table, header)) + else: + print(tabulate(table, header, tablefmt='simple', stralign='right')) + if (multi_asic.is_multi_asic() or device_info.is_chassis()) and not use_json: + print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") From ee906811f33f213b4d5358c2cca30a4b0e54f2c6 Mon Sep 17 00:00:00 2001 From: Sudharsan Dhamal Gopalarathnam Date: Mon, 2 Sep 2024 06:44:18 -0700 Subject: [PATCH 32/67] Revert "Remove suppress-fib-pending CLI and make route_check.py check suppress-fib in BGP configuration" (#3477) Reverts #3331 BGP zebra enhancements is merged to master branch sonic-net/sonic-buildimage#19717 Reverting the revert of bgp suppress pending feature to enable it in master branch --- config/main.py | 14 +++++++++++ doc/Command-Reference.md | 38 ++++++++++++++++++++++++++++++ scripts/route_check.py | 35 ++++++++++----------------- show/main.py | 11 +++++++++ tests/route_check_test.py | 7 ++---- tests/suppress_pending_fib_test.py | 34 ++++++++++++++++++++++++++ 6 files changed, 111 insertions(+), 28 deletions(-) create mode 100644 tests/suppress_pending_fib_test.py diff --git a/config/main.py b/config/main.py index 7509628a67..0399639d97 100644 --- a/config/main.py +++ b/config/main.py @@ -2376,6 +2376,20 @@ def synchronous_mode(sync_mode): config reload -y \n Option 2. systemctl restart swss""" % sync_mode) + +# +# 'suppress-fib-pending' command ('config suppress-fib-pending ...') +# +@config.command('suppress-fib-pending') +@click.argument('state', metavar='', required=True, type=click.Choice(['enabled', 'disabled'])) +@clicommon.pass_db +def suppress_pending_fib(db, state): + ''' Enable or disable pending FIB suppression. Once enabled, + BGP will not advertise routes that are not yet installed in the hardware ''' + + config_db = db.cfgdb + config_db.mod_entry('DEVICE_METADATA', 'localhost', {"suppress-fib-pending": state}) + # # 'yang_config_validation' command ('config yang_config_validation ...') # diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 689ca23b73..cdc3f5644d 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -2612,6 +2612,26 @@ This command displays the routing policy that takes precedence over the other ro Exit routemap ``` +**show suppress-fib-pending** + +This command is used to show the status of suppress pending FIB feature. +When enabled, BGP will not advertise routes which aren't yet offloaded. + +- Usage: + ``` + show suppress-fib-pending + ``` + +- Examples: + ``` + admin@sonic:~$ show suppress-fib-pending + Enabled + ``` + ``` + admin@sonic:~$ show suppress-fib-pending + Disabled + ``` + **show bgp device-global** This command displays BGP device global configuration. @@ -2724,6 +2744,24 @@ This command is used to remove particular IPv4 or IPv6 BGP neighbor configuratio admin@sonic:~$ sudo config bgp remove neighbor SONIC02SPINE ``` +**config suppress-fib-pending** + +This command is used to enable or disable announcements of routes not yet installed in the HW. +Once enabled, BGP will not advertise routes which aren't yet offloaded. + +- Usage: + ``` + config suppress-fib-pending + ``` + +- Examples: + ``` + admin@sonic:~$ sudo config suppress-fib-pending enabled + ``` + ``` + admin@sonic:~$ sudo config suppress-fib-pending disabled + ``` + **config bgp device-global tsa/w-ecmp** This command is used to manage BGP device global configuration. diff --git a/scripts/route_check.py b/scripts/route_check.py index 2fbe041547..a1abd3c352 100755 --- a/scripts/route_check.py +++ b/scripts/route_check.py @@ -328,16 +328,6 @@ def get_asicdb_routes(namespace): return (selector, subs, sorted(rt)) -def is_bgp_suppress_fib_pending_enabled(namespace): - """ - Retruns True if FIB suppression is enabled in BGP config, False otherwise - """ - show_run_cmd = ['show', 'runningconfiguration', 'bgp', '-n', namespace] - - output = subprocess.check_output(show_run_cmd, text=True) - return 'bgp suppress-fib-pending' in output - - def is_suppress_fib_pending_enabled(namespace): """ Returns True if FIB suppression is enabled, False otherwise @@ -791,20 +781,19 @@ def check_routes(namespace): results[namespace] = {} results[namespace]["Unaccounted_ROUTE_ENTRY_TABLE_entries"] = rt_asic_miss - if is_bgp_suppress_fib_pending_enabled(namespace): - rt_frr_miss = check_frr_pending_routes(namespace) - - if rt_frr_miss: - if namespace not in results: - results[namespace] = {} - results[namespace]["missed_FRR_routes"] = rt_frr_miss + rt_frr_miss = check_frr_pending_routes(namespace) - if results: - if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: - print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR{} but all " - "routes in APPL_DB and ASIC_DB are in sync".format(namespace)) - if is_suppress_fib_pending_enabled(namespace): - mitigate_installed_not_offloaded_frr_routes(namespace, rt_frr_miss, rt_appl) + if rt_frr_miss: + if namespace not in results: + results[namespace] = {} + results[namespace]["missed_FRR_routes"] = rt_frr_miss + + if results: + if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: + print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR{} \ + but all routes in APPL_DB and ASIC_DB are in sync".format(namespace)) + if is_suppress_fib_pending_enabled(namespace): + mitigate_installed_not_offloaded_frr_routes(namespace, rt_frr_miss, rt_appl) if results: print_message(syslog.LOG_WARNING, "Failure results: {", json.dumps(results, indent=4), "}") diff --git a/show/main.py b/show/main.py index c9e5e2086c..8d3f117b2f 100755 --- a/show/main.py +++ b/show/main.py @@ -2159,6 +2159,17 @@ def peer(db, peer_ip): click.echo(tabulate(bfd_body, bfd_headers)) +# 'suppress-fib-pending' subcommand ("show suppress-fib-pending") +@cli.command('suppress-fib-pending') +@clicommon.pass_db +def suppress_pending_fib(db): + """ Show the status of suppress pending FIB feature """ + + field_values = db.cfgdb.get_entry('DEVICE_METADATA', 'localhost') + state = field_values.get('suppress-fib-pending', 'disabled').title() + click.echo(state) + + # asic-sdk-health-event subcommand ("show asic-sdk-health-event") @cli.group(cls=clicommon.AliasedGroup) def asic_sdk_health_event(): diff --git a/tests/route_check_test.py b/tests/route_check_test.py index 26c632d742..1f92b3d19a 100644 --- a/tests/route_check_test.py +++ b/tests/route_check_test.py @@ -252,11 +252,8 @@ def run_test(self, ct_data): def mock_check_output(self, ct_data, *args, **kwargs): ns = self.extract_namespace_from_args(args[0]) - if 'show runningconfiguration bgp' in ' '.join(args[0]): - return 'bgp suppress-fib-pending' - else: - routes = ct_data.get(FRR_ROUTES, {}).get(ns, {}) - return json.dumps(routes) + routes = ct_data.get(FRR_ROUTES, {}).get(ns, {}) + return json.dumps(routes) def assert_results(self, ct_data, ret, res): expect_ret = ct_data.get(RET, 0) diff --git a/tests/suppress_pending_fib_test.py b/tests/suppress_pending_fib_test.py new file mode 100644 index 0000000000..b4dcc7d4bc --- /dev/null +++ b/tests/suppress_pending_fib_test.py @@ -0,0 +1,34 @@ +from click.testing import CliRunner + +import config.main as config +import show.main as show +from utilities_common.db import Db + + +class TestSuppressFibPending: + def test_synchronous_mode(self): + runner = CliRunner() + + db = Db() + + result = runner.invoke(config.config.commands['suppress-fib-pending'], ['enabled'], obj=db) + print(result.output) + assert result.exit_code == 0 + assert db.cfgdb.get_entry('DEVICE_METADATA', 'localhost')['suppress-fib-pending'] == 'enabled' + + result = runner.invoke(show.cli.commands['suppress-fib-pending'], obj=db) + assert result.exit_code == 0 + assert result.output == 'Enabled\n' + + result = runner.invoke(config.config.commands['suppress-fib-pending'], ['disabled'], obj=db) + print(result.output) + assert result.exit_code == 0 + assert db.cfgdb.get_entry('DEVICE_METADATA', 'localhost')['suppress-fib-pending'] == 'disabled' + + result = runner.invoke(show.cli.commands['suppress-fib-pending'], obj=db) + assert result.exit_code == 0 + assert result.output == 'Disabled\n' + + result = runner.invoke(config.config.commands['suppress-fib-pending'], ['invalid-input'], obj=db) + print(result.output) + assert result.exit_code != 0 From d25e531f1b587ffd4c280a8ca2b7ad6a15f339ab Mon Sep 17 00:00:00 2001 From: Nazarii Hnydyn Date: Tue, 3 Sep 2024 04:17:11 +0300 Subject: [PATCH 33/67] [reboot]: Allow reboot to happen regardless vendor hook errors (#3454) * [reboot]: Allow reboot to happen regardless vendor hook errors. Signed-off-by: Nazarii Hnydyn * [reboot]: Handle review comments. Signed-off-by: Nazarii Hnydyn --------- Signed-off-by: Nazarii Hnydyn --- scripts/reboot | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/scripts/reboot b/scripts/reboot index b6f8ff96fb..3b4717a17c 100755 --- a/scripts/reboot +++ b/scripts/reboot @@ -41,7 +41,6 @@ REBOOT_SCRIPT_NAME=$(basename $0) REBOOT_TYPE="${REBOOT_SCRIPT_NAME}" TAG_LATEST=no REBOOT_FLAGS="" -FORCE_REBOOT="no" function debug() { @@ -192,7 +191,6 @@ function parse_options() ;; f ) REBOOT_FLAGS+=" -f" - FORCE_REBOOT="yes" ;; esac done @@ -278,12 +276,9 @@ fi if [ -x ${DEVPATH}/${PLATFORM}/${PRE_REBOOT_HOOK} ]; then debug "Executing the pre-reboot script" ${DEVPATH}/${PLATFORM}/${PRE_REBOOT_HOOK} - EXIT_CODE=$? - if [[ ${EXIT_CODE} != ${EXIT_SUCCESS} ]]; then - if [[ "${FORCE_REBOOT}" != "yes" ]]; then - echo "Reboot is interrupted: use -f (force) to override" - exit ${EXIT_ERROR} - fi + EXIT_CODE="$?" + if [[ "${EXIT_CODE}" != "${EXIT_SUCCESS}" ]]; then + debug "WARNING: Failed to handle pre-reboot script: rc=${EXIT_CODE}" fi fi From dc955e8002a43ae5c1032b05483c37ef710af7db Mon Sep 17 00:00:00 2001 From: Tomer Shalvi <116184476+tshalvi@users.noreply.github.com> Date: Tue, 3 Sep 2024 11:40:28 +0300 Subject: [PATCH 34/67] [Mellanox] Add CMIS Host Management Files to 'show techsupport' Dumps (#3501) - What I did For Mellanox platforms, I added the following CMIS host management-related files to the 'show techsupport' dumps (if they exist): sai.profile, pmon_daemon_control.json, media_settings.json, optics_si_settings.json, and autoneg.status. - How I did it I copied the relevant files from the SKU/platform folder and ran the 'show interface autoneg status' command to store the auto-negotiation status for all ports. - How to verify it Run 'show techsupport' and verify that autoneg.status is located in the 'dumps' directory and that the other files are present in the cmis-host-mgmt path within the generated dump. --- scripts/generate_dump | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/scripts/generate_dump b/scripts/generate_dump index 3d0ef3430d..38774c4a37 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -1209,6 +1209,16 @@ collect_mellanox() { local timeout_cmd="timeout --foreground ${TIMEOUT_MIN}m" local sai_dump_folder="/tmp/saisdkdump" local sai_dump_filename="${sai_dump_folder}/sai_sdk_dump_$(date +"%m_%d_%Y_%I_%M_%p")" + local platform=$(python3 -c "from sonic_py_common import device_info; print(device_info.get_platform())") + local platform_folder="/usr/share/sonic/device/${platform}" + local hwsku=$(python3 -c "from sonic_py_common import device_info; print(device_info.get_hwsku())") + local sku_folder="/usr/share/sonic/device/${platform}/${hwsku}" + local cmis_host_mgmt_files=( + "/tmp/nv-syncd-shared/sai.profile" + "${sku_folder}/pmon_daemon_control.json" + "${sku_folder}/media_settings.json" + "${sku_folder}/optics_si_settings.json" + ) if [[ "$( docker container inspect -f '{{.State.Running}}' syncd )" == "true" ]]; then if [[ x"$(sonic-db-cli APPL_DB EXISTS PORT_TABLE:PortInitDone)" == x"1" ]]; then @@ -1251,6 +1261,21 @@ collect_mellanox() { fi save_cmd "get_component_versions.py" "component_versions" + + # Save CMIS-host-management related files + local cmis_host_mgmt_path="cmis-host-mgmt" + + for file in "${cmis_host_mgmt_files[@]}"; do + if [[ -f "${file}" ]]; then + ${CMD_PREFIX}save_file "${file}" "$cmis_host_mgmt_path" false true + fi + done + + if [[ ! -f "${sku_folder}/pmon_daemon_control.json" && -f "${platform_folder}/pmon_daemon_control.json" ]]; then + ${CMD_PREFIX}save_file "${platform_folder}/pmon_daemon_control.json" "$cmis_host_mgmt_path" false true + fi + + save_cmd "show interfaces autoneg status" "autoneg.status" } ############################################################################### From 544584ea4a32ca890d8975c830780a042e701cd0 Mon Sep 17 00:00:00 2001 From: DavidZagury <32644413+DavidZagury@users.noreply.github.com> Date: Tue, 3 Sep 2024 11:41:18 +0300 Subject: [PATCH 35/67] Add back the option f to the reboot script (#3492) - What I did Add back the support to call reboot -f The support for this option was accidentally removed as part of this PR #3203 The same PR for 202311 doesn't have this mistake - #3204 - How I did it - How to verify it Call reboot -f --- scripts/reboot | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/reboot b/scripts/reboot index 3b4717a17c..044334af3e 100755 --- a/scripts/reboot +++ b/scripts/reboot @@ -178,7 +178,7 @@ function check_conflict_boot_in_fw_update() function parse_options() { - while getopts "h?v" opt; do + while getopts "h?vf" opt; do case ${opt} in h|\? ) show_help_and_exit From 4c7e54a9fe8c7c78b661433208e0b519a0bc81bb Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Tue, 3 Sep 2024 11:43:19 +0300 Subject: [PATCH 36/67] [qos reload] Fix "config qos reload" overriding entire CONFIG_DB (#3479) Fixes sonic-net/sonic-buildimage#15894 - What I did config qos reload command uses a combination of sonic-cfggen's flags -d and --write-to-db that makes it override entire CONFIG_DB, updating every key. This leads to issues with Orchs, daemons that do not support updating keys in CONFIG_DB. Best case, it causes errors in logs. - How I did it First, render templates to temporary files, then load those files into CONFIG_DB. Also, fixed an issue where using dry_run option only produced QOS config but not the buffer configuration and updated test files accordingly. - How to verify it Run on switch: root@sonic/home/admin# config qos reload Running command: /usr/local/bin/sonic-cfggen -d -t /usr/share/sonic/device/x86_64-mlnx_msn2100-r0/ACS-MSN2100/buffers_dynamic.json.j2,/tmp/cfg_buffer.json -t /usr/share/sonic/device/x86_64-mlnx_msn2100-r0/ACS-MSN2100/qos.json.j2,/tmp/cfg_qos.json -y /etc/sonic/sonic_version.yml Running command: /usr/local/bin/sonic-cfggen -j /tmp/cfg_buffer.json -j /tmp/cfg_qos.json --write-to-db Signed-off-by: Stepan Blyschak --- config/main.py | 26 +- tests/qos_config_input/0/config_qos.json | 574 +++++++++++++++++++--- tests/qos_config_input/1/config_qos.json | 574 +++++++++++++++++++--- tests/qos_config_input/config_qos.json | 590 ++++++++++++++++++++--- 4 files changed, 1574 insertions(+), 190 deletions(-) diff --git a/config/main.py b/config/main.py index 0399639d97..80f08bebd9 100644 --- a/config/main.py +++ b/config/main.py @@ -3141,7 +3141,7 @@ def reload(ctx, no_dynamic_buffer, no_delay, dry_run, json_data, ports, verbose) _, hwsku_path = device_info.get_paths_to_platform_and_hwsku_dirs() sonic_version_file = device_info.get_sonic_version_file() - from_db = ['-d', '--write-to-db'] + from_db = ['-d'] if dry_run: from_db = ['--additional-data'] + [str(json_data)] if json_data else [] @@ -3187,11 +3187,27 @@ def reload(ctx, no_dynamic_buffer, no_delay, dry_run, json_data, ports, verbose) ) if os.path.isfile(qos_template_file): cmd_ns = [] if ns is DEFAULT_NAMESPACE else ['-n', str(ns)] - fname = "{}{}".format(dry_run, asic_id_suffix) if dry_run else "config-db" - command = [SONIC_CFGGEN_PATH] + cmd_ns + from_db + ['-t', '{},{}'.format(buffer_template_file, fname), '-t', '{},{}'.format(qos_template_file, fname), '-y', sonic_version_file] - # Apply the configurations only when both buffer and qos - # configuration files are present + buffer_fname = "/tmp/cfg_buffer{}.json".format(asic_id_suffix) + qos_fname = "/tmp/cfg_qos{}.json".format(asic_id_suffix) + + command = [SONIC_CFGGEN_PATH] + cmd_ns + from_db + [ + '-t', '{},{}'.format(buffer_template_file, buffer_fname), + '-t', '{},{}'.format(qos_template_file, qos_fname), + '-y', sonic_version_file + ] clicommon.run_command(command, display_cmd=True) + + command = [SONIC_CFGGEN_PATH] + cmd_ns + ["-j", buffer_fname, "-j", qos_fname] + if dry_run: + out, rc = clicommon.run_command(command + ["--print-data"], display_cmd=True, return_cmd=True) + if rc != 0: + # clicommon.run_command does this by default when rc != 0 and return_cmd=False + sys.exit(rc) + with open("{}{}".format(dry_run, asic_id_suffix), 'w') as f: + json.dump(json.loads(out), f, sort_keys=True, indent=4) + else: + clicommon.run_command(command + ["--write-to-db"], display_cmd=True) + else: click.secho("QoS definition template not found at {}".format( qos_template_file diff --git a/tests/qos_config_input/0/config_qos.json b/tests/qos_config_input/0/config_qos.json index 40c1903a06..5ef4b07f8d 100644 --- a/tests/qos_config_input/0/config_qos.json +++ b/tests/qos_config_input/0/config_qos.json @@ -1,52 +1,466 @@ { - "TC_TO_PRIORITY_GROUP_MAP": { - "AZURE": { - "0": "0", - "1": "0", - "2": "0", - "3": "3", - "4": "4", - "5": "0", - "6": "0", - "7": "7" + "BUFFER_PG": { + "Ethernet0|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet100|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet104|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet108|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet112|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet116|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet120|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet124|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet12|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet16|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet20|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet24|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet28|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet32|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet36|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet40|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet44|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet48|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet4|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet52|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet56|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet60|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet64|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet68|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet72|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet76|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet80|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet84|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet88|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet8|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet92|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet96|0": { + "profile": "ingress_lossy_profile" } }, - "MAP_PFC_PRIORITY_TO_QUEUE": { - "AZURE": { - "0": "0", - "1": "1", - "2": "2", - "3": "3", - "4": "4", - "5": "5", - "6": "6", - "7": "7" + "BUFFER_POOL": { + "egress_lossless_pool": { + "mode": "static", + "size": "12766208", + "type": "egress" + }, + "egress_lossy_pool": { + "mode": "dynamic", + "size": "7326924", + "type": "egress" + }, + "ingress_lossless_pool": { + "mode": "dynamic", + "size": "12766208", + "type": "ingress" } }, - "TC_TO_QUEUE_MAP": { + "BUFFER_PROFILE": { + "egress_lossless_profile": { + "pool": "egress_lossless_pool", + "size": "0", + "static_th": "12766208" + }, + "egress_lossy_profile": { + "dynamic_th": "3", + "pool": "egress_lossy_pool", + "size": "1518" + }, + "ingress_lossy_profile": { + "dynamic_th": "3", + "pool": "ingress_lossless_pool", + "size": "0" + } + }, + "BUFFER_QUEUE": { + "Ethernet0|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet0|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet0|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet100|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet100|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet100|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet104|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet104|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet104|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet108|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet108|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet108|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet112|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet112|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet112|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet116|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet116|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet116|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet120|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet120|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet120|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet124|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet124|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet124|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet12|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet12|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet12|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet16|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet16|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet16|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet20|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet20|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet20|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet24|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet24|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet24|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet28|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet28|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet28|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet32|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet32|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet32|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet36|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet36|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet36|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet40|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet40|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet40|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet44|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet44|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet44|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet48|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet48|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet48|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet4|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet4|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet4|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet52|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet52|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet52|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet56|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet56|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet56|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet60|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet60|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet60|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet64|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet64|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet64|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet68|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet68|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet68|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet72|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet72|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet72|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet76|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet76|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet76|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet80|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet80|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet80|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet84|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet84|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet84|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet88|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet88|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet88|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet8|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet8|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet8|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet92|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet92|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet92|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet96|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet96|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet96|5-6": { + "profile": "egress_lossy_profile" + } + }, + "CABLE_LENGTH": { "AZURE": { - "0": "0", - "1": "1", - "2": "2", - "3": "3", - "4": "4", - "5": "5", - "6": "6", - "7": "7" + "Ethernet0": "300m", + "Ethernet100": "300m", + "Ethernet104": "300m", + "Ethernet108": "300m", + "Ethernet112": "300m", + "Ethernet116": "300m", + "Ethernet12": "300m", + "Ethernet120": "300m", + "Ethernet124": "300m", + "Ethernet16": "300m", + "Ethernet20": "300m", + "Ethernet24": "300m", + "Ethernet28": "300m", + "Ethernet32": "300m", + "Ethernet36": "300m", + "Ethernet4": "300m", + "Ethernet40": "300m", + "Ethernet44": "300m", + "Ethernet48": "300m", + "Ethernet52": "300m", + "Ethernet56": "300m", + "Ethernet60": "300m", + "Ethernet64": "300m", + "Ethernet68": "300m", + "Ethernet72": "300m", + "Ethernet76": "300m", + "Ethernet8": "300m", + "Ethernet80": "300m", + "Ethernet84": "300m", + "Ethernet88": "300m", + "Ethernet92": "300m", + "Ethernet96": "300m" } }, "DSCP_TO_TC_MAP": { "AZURE": { - "0" : "1", - "1" : "1", - "2" : "1", - "3" : "3", - "4" : "4", - "5" : "2", - "6" : "1", - "7" : "1", - "8" : "0", - "9" : "1", + "0": "1", + "1": "1", "10": "1", "11": "1", "12": "1", @@ -57,6 +471,7 @@ "17": "1", "18": "1", "19": "1", + "2": "1", "20": "1", "21": "1", "22": "1", @@ -67,6 +482,7 @@ "27": "1", "28": "1", "29": "1", + "3": "3", "30": "1", "31": "1", "32": "1", @@ -77,6 +493,7 @@ "37": "1", "38": "1", "39": "1", + "4": "4", "40": "1", "41": "1", "42": "1", @@ -87,6 +504,7 @@ "47": "1", "48": "6", "49": "1", + "5": "2", "50": "1", "51": "1", "52": "1", @@ -97,41 +515,79 @@ "57": "1", "58": "1", "59": "1", + "6": "1", "60": "1", "61": "1", "62": "1", - "63": "1" + "63": "1", + "7": "1", + "8": "0", + "9": "1" + } + }, + "MAP_PFC_PRIORITY_TO_QUEUE": { + "AZURE": { + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7" } }, + "PORT_QOS_MAP": {}, + "QUEUE": {}, "SCHEDULER": { "scheduler.0": { - "type" : "DWRR", + "type": "DWRR", "weight": "14" }, "scheduler.1": { - "type" : "DWRR", + "type": "DWRR", "weight": "15" } }, - "PORT_QOS_MAP": { + "TC_TO_PRIORITY_GROUP_MAP": { + "AZURE": { + "0": "0", + "1": "0", + "2": "0", + "3": "3", + "4": "4", + "5": "0", + "6": "0", + "7": "7" + } + }, + "TC_TO_QUEUE_MAP": { + "AZURE": { + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7" + } }, "WRED_PROFILE": { - "AZURE_LOSSLESS" : { - "wred_green_enable" : "true", - "wred_yellow_enable" : "true", - "wred_red_enable" : "true", - "ecn" : "ecn_all", - "green_max_threshold" : "2097152", - "green_min_threshold" : "1048576", - "yellow_max_threshold" : "2097152", - "yellow_min_threshold" : "1048576", - "red_max_threshold" : "2097152", - "red_min_threshold" : "1048576", - "green_drop_probability" : "5", + "AZURE_LOSSLESS": { + "ecn": "ecn_all", + "green_drop_probability": "5", + "green_max_threshold": "2097152", + "green_min_threshold": "1048576", + "red_drop_probability": "5", + "red_max_threshold": "2097152", + "red_min_threshold": "1048576", + "wred_green_enable": "true", + "wred_red_enable": "true", + "wred_yellow_enable": "true", "yellow_drop_probability": "5", - "red_drop_probability" : "5" + "yellow_max_threshold": "2097152", + "yellow_min_threshold": "1048576" } - }, - "QUEUE": { } -} +} \ No newline at end of file diff --git a/tests/qos_config_input/1/config_qos.json b/tests/qos_config_input/1/config_qos.json index 40c1903a06..5ef4b07f8d 100644 --- a/tests/qos_config_input/1/config_qos.json +++ b/tests/qos_config_input/1/config_qos.json @@ -1,52 +1,466 @@ { - "TC_TO_PRIORITY_GROUP_MAP": { - "AZURE": { - "0": "0", - "1": "0", - "2": "0", - "3": "3", - "4": "4", - "5": "0", - "6": "0", - "7": "7" + "BUFFER_PG": { + "Ethernet0|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet100|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet104|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet108|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet112|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet116|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet120|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet124|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet12|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet16|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet20|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet24|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet28|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet32|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet36|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet40|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet44|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet48|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet4|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet52|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet56|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet60|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet64|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet68|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet72|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet76|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet80|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet84|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet88|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet8|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet92|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet96|0": { + "profile": "ingress_lossy_profile" } }, - "MAP_PFC_PRIORITY_TO_QUEUE": { - "AZURE": { - "0": "0", - "1": "1", - "2": "2", - "3": "3", - "4": "4", - "5": "5", - "6": "6", - "7": "7" + "BUFFER_POOL": { + "egress_lossless_pool": { + "mode": "static", + "size": "12766208", + "type": "egress" + }, + "egress_lossy_pool": { + "mode": "dynamic", + "size": "7326924", + "type": "egress" + }, + "ingress_lossless_pool": { + "mode": "dynamic", + "size": "12766208", + "type": "ingress" } }, - "TC_TO_QUEUE_MAP": { + "BUFFER_PROFILE": { + "egress_lossless_profile": { + "pool": "egress_lossless_pool", + "size": "0", + "static_th": "12766208" + }, + "egress_lossy_profile": { + "dynamic_th": "3", + "pool": "egress_lossy_pool", + "size": "1518" + }, + "ingress_lossy_profile": { + "dynamic_th": "3", + "pool": "ingress_lossless_pool", + "size": "0" + } + }, + "BUFFER_QUEUE": { + "Ethernet0|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet0|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet0|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet100|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet100|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet100|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet104|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet104|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet104|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet108|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet108|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet108|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet112|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet112|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet112|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet116|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet116|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet116|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet120|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet120|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet120|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet124|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet124|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet124|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet12|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet12|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet12|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet16|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet16|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet16|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet20|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet20|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet20|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet24|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet24|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet24|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet28|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet28|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet28|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet32|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet32|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet32|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet36|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet36|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet36|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet40|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet40|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet40|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet44|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet44|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet44|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet48|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet48|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet48|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet4|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet4|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet4|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet52|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet52|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet52|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet56|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet56|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet56|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet60|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet60|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet60|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet64|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet64|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet64|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet68|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet68|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet68|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet72|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet72|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet72|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet76|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet76|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet76|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet80|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet80|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet80|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet84|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet84|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet84|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet88|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet88|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet88|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet8|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet8|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet8|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet92|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet92|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet92|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet96|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet96|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet96|5-6": { + "profile": "egress_lossy_profile" + } + }, + "CABLE_LENGTH": { "AZURE": { - "0": "0", - "1": "1", - "2": "2", - "3": "3", - "4": "4", - "5": "5", - "6": "6", - "7": "7" + "Ethernet0": "300m", + "Ethernet100": "300m", + "Ethernet104": "300m", + "Ethernet108": "300m", + "Ethernet112": "300m", + "Ethernet116": "300m", + "Ethernet12": "300m", + "Ethernet120": "300m", + "Ethernet124": "300m", + "Ethernet16": "300m", + "Ethernet20": "300m", + "Ethernet24": "300m", + "Ethernet28": "300m", + "Ethernet32": "300m", + "Ethernet36": "300m", + "Ethernet4": "300m", + "Ethernet40": "300m", + "Ethernet44": "300m", + "Ethernet48": "300m", + "Ethernet52": "300m", + "Ethernet56": "300m", + "Ethernet60": "300m", + "Ethernet64": "300m", + "Ethernet68": "300m", + "Ethernet72": "300m", + "Ethernet76": "300m", + "Ethernet8": "300m", + "Ethernet80": "300m", + "Ethernet84": "300m", + "Ethernet88": "300m", + "Ethernet92": "300m", + "Ethernet96": "300m" } }, "DSCP_TO_TC_MAP": { "AZURE": { - "0" : "1", - "1" : "1", - "2" : "1", - "3" : "3", - "4" : "4", - "5" : "2", - "6" : "1", - "7" : "1", - "8" : "0", - "9" : "1", + "0": "1", + "1": "1", "10": "1", "11": "1", "12": "1", @@ -57,6 +471,7 @@ "17": "1", "18": "1", "19": "1", + "2": "1", "20": "1", "21": "1", "22": "1", @@ -67,6 +482,7 @@ "27": "1", "28": "1", "29": "1", + "3": "3", "30": "1", "31": "1", "32": "1", @@ -77,6 +493,7 @@ "37": "1", "38": "1", "39": "1", + "4": "4", "40": "1", "41": "1", "42": "1", @@ -87,6 +504,7 @@ "47": "1", "48": "6", "49": "1", + "5": "2", "50": "1", "51": "1", "52": "1", @@ -97,41 +515,79 @@ "57": "1", "58": "1", "59": "1", + "6": "1", "60": "1", "61": "1", "62": "1", - "63": "1" + "63": "1", + "7": "1", + "8": "0", + "9": "1" + } + }, + "MAP_PFC_PRIORITY_TO_QUEUE": { + "AZURE": { + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7" } }, + "PORT_QOS_MAP": {}, + "QUEUE": {}, "SCHEDULER": { "scheduler.0": { - "type" : "DWRR", + "type": "DWRR", "weight": "14" }, "scheduler.1": { - "type" : "DWRR", + "type": "DWRR", "weight": "15" } }, - "PORT_QOS_MAP": { + "TC_TO_PRIORITY_GROUP_MAP": { + "AZURE": { + "0": "0", + "1": "0", + "2": "0", + "3": "3", + "4": "4", + "5": "0", + "6": "0", + "7": "7" + } + }, + "TC_TO_QUEUE_MAP": { + "AZURE": { + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7" + } }, "WRED_PROFILE": { - "AZURE_LOSSLESS" : { - "wred_green_enable" : "true", - "wred_yellow_enable" : "true", - "wred_red_enable" : "true", - "ecn" : "ecn_all", - "green_max_threshold" : "2097152", - "green_min_threshold" : "1048576", - "yellow_max_threshold" : "2097152", - "yellow_min_threshold" : "1048576", - "red_max_threshold" : "2097152", - "red_min_threshold" : "1048576", - "green_drop_probability" : "5", + "AZURE_LOSSLESS": { + "ecn": "ecn_all", + "green_drop_probability": "5", + "green_max_threshold": "2097152", + "green_min_threshold": "1048576", + "red_drop_probability": "5", + "red_max_threshold": "2097152", + "red_min_threshold": "1048576", + "wred_green_enable": "true", + "wred_red_enable": "true", + "wred_yellow_enable": "true", "yellow_drop_probability": "5", - "red_drop_probability" : "5" + "yellow_max_threshold": "2097152", + "yellow_min_threshold": "1048576" } - }, - "QUEUE": { } -} +} \ No newline at end of file diff --git a/tests/qos_config_input/config_qos.json b/tests/qos_config_input/config_qos.json index fd76373983..0d44b421bd 100644 --- a/tests/qos_config_input/config_qos.json +++ b/tests/qos_config_input/config_qos.json @@ -1,52 +1,466 @@ { - "TC_TO_PRIORITY_GROUP_MAP": { - "AZURE": { - "0": "0", - "1": "0", - "2": "0", - "3": "3", - "4": "4", - "5": "0", - "6": "0", - "7": "7" + "BUFFER_PG": { + "Ethernet0|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet100|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet104|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet108|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet112|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet116|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet120|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet124|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet12|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet16|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet20|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet24|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet28|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet32|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet36|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet40|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet44|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet48|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet4|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet52|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet56|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet60|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet64|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet68|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet72|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet76|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet80|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet84|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet88|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet8|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet92|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet96|0": { + "profile": "ingress_lossy_profile" } }, - "MAP_PFC_PRIORITY_TO_QUEUE": { - "AZURE": { - "0": "0", - "1": "1", - "2": "2", - "3": "3", - "4": "4", - "5": "5", - "6": "6", - "7": "7" + "BUFFER_POOL": { + "egress_lossless_pool": { + "mode": "static", + "size": "12766208", + "type": "egress" + }, + "egress_lossy_pool": { + "mode": "dynamic", + "size": "7326924", + "type": "egress" + }, + "ingress_lossless_pool": { + "mode": "dynamic", + "size": "12766208", + "type": "ingress" } }, - "TC_TO_QUEUE_MAP": { + "BUFFER_PROFILE": { + "egress_lossless_profile": { + "pool": "egress_lossless_pool", + "size": "0", + "static_th": "12766208" + }, + "egress_lossy_profile": { + "dynamic_th": "3", + "pool": "egress_lossy_pool", + "size": "1518" + }, + "ingress_lossy_profile": { + "dynamic_th": "3", + "pool": "ingress_lossless_pool", + "size": "0" + } + }, + "BUFFER_QUEUE": { + "Ethernet0|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet0|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet0|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet100|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet100|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet100|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet104|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet104|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet104|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet108|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet108|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet108|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet112|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet112|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet112|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet116|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet116|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet116|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet120|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet120|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet120|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet124|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet124|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet124|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet12|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet12|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet12|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet16|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet16|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet16|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet20|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet20|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet20|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet24|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet24|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet24|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet28|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet28|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet28|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet32|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet32|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet32|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet36|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet36|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet36|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet40|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet40|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet40|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet44|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet44|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet44|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet48|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet48|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet48|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet4|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet4|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet4|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet52|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet52|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet52|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet56|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet56|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet56|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet60|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet60|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet60|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet64|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet64|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet64|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet68|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet68|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet68|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet72|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet72|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet72|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet76|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet76|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet76|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet80|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet80|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet80|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet84|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet84|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet84|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet88|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet88|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet88|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet8|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet8|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet8|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet92|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet92|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet92|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet96|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet96|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet96|5-6": { + "profile": "egress_lossy_profile" + } + }, + "CABLE_LENGTH": { "AZURE": { - "0": "0", - "1": "1", - "2": "2", - "3": "3", - "4": "4", - "5": "5", - "6": "6", - "7": "7" + "Ethernet0": "300m", + "Ethernet100": "300m", + "Ethernet104": "300m", + "Ethernet108": "300m", + "Ethernet112": "300m", + "Ethernet116": "300m", + "Ethernet12": "300m", + "Ethernet120": "300m", + "Ethernet124": "300m", + "Ethernet16": "300m", + "Ethernet20": "300m", + "Ethernet24": "300m", + "Ethernet28": "300m", + "Ethernet32": "300m", + "Ethernet36": "300m", + "Ethernet4": "300m", + "Ethernet40": "300m", + "Ethernet44": "300m", + "Ethernet48": "300m", + "Ethernet52": "300m", + "Ethernet56": "300m", + "Ethernet60": "300m", + "Ethernet64": "300m", + "Ethernet68": "300m", + "Ethernet72": "300m", + "Ethernet76": "300m", + "Ethernet8": "300m", + "Ethernet80": "300m", + "Ethernet84": "300m", + "Ethernet88": "300m", + "Ethernet92": "300m", + "Ethernet96": "300m" } }, "DSCP_TO_TC_MAP": { "AZURE": { - "0" : "1", - "1" : "1", - "2" : "1", - "3" : "3", - "4" : "4", - "5" : "2", - "6" : "1", - "7" : "1", - "8" : "0", - "9" : "1", + "0": "1", + "1": "1", "10": "1", "11": "1", "12": "1", @@ -57,6 +471,7 @@ "17": "1", "18": "1", "19": "1", + "2": "1", "20": "1", "21": "1", "22": "1", @@ -67,6 +482,7 @@ "27": "1", "28": "1", "29": "1", + "3": "3", "30": "1", "31": "1", "32": "1", @@ -77,6 +493,7 @@ "37": "1", "38": "1", "39": "1", + "4": "4", "40": "1", "41": "1", "42": "1", @@ -87,6 +504,7 @@ "47": "1", "48": "6", "49": "1", + "5": "2", "50": "1", "51": "1", "52": "1", @@ -97,53 +515,91 @@ "57": "1", "58": "1", "59": "1", + "6": "1", "60": "1", "61": "1", "62": "1", - "63": "1" + "63": "1", + "7": "1", + "8": "0", + "9": "1" + } + }, + "MAP_PFC_PRIORITY_TO_QUEUE": { + "AZURE": { + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7" } }, "MPLS_TC_TO_TC_MAP": { "AZURE": { - "0" : "1", - "1" : "1", - "2" : "1", - "3" : "3", - "4" : "4", - "5" : "2", - "6" : "1", - "7" : "1" + "0": "1", + "1": "1", + "2": "1", + "3": "3", + "4": "4", + "5": "2", + "6": "1", + "7": "1" } }, + "PORT_QOS_MAP": {}, + "QUEUE": {}, "SCHEDULER": { "scheduler.0": { - "type" : "DWRR", + "type": "DWRR", "weight": "14" }, "scheduler.1": { - "type" : "DWRR", + "type": "DWRR", "weight": "15" } }, - "PORT_QOS_MAP": { + "TC_TO_PRIORITY_GROUP_MAP": { + "AZURE": { + "0": "0", + "1": "0", + "2": "0", + "3": "3", + "4": "4", + "5": "0", + "6": "0", + "7": "7" + } + }, + "TC_TO_QUEUE_MAP": { + "AZURE": { + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7" + } }, "WRED_PROFILE": { - "AZURE_LOSSLESS" : { - "wred_green_enable" : "true", - "wred_yellow_enable" : "true", - "wred_red_enable" : "true", - "ecn" : "ecn_all", - "green_max_threshold" : "2097152", - "green_min_threshold" : "1048576", - "yellow_max_threshold" : "2097152", - "yellow_min_threshold" : "1048576", - "red_max_threshold" : "2097152", - "red_min_threshold" : "1048576", - "green_drop_probability" : "5", + "AZURE_LOSSLESS": { + "ecn": "ecn_all", + "green_drop_probability": "5", + "green_max_threshold": "2097152", + "green_min_threshold": "1048576", + "red_drop_probability": "5", + "red_max_threshold": "2097152", + "red_min_threshold": "1048576", + "wred_green_enable": "true", + "wred_red_enable": "true", + "wred_yellow_enable": "true", "yellow_drop_probability": "5", - "red_drop_probability" : "5" + "yellow_max_threshold": "2097152", + "yellow_min_threshold": "1048576" } - }, - "QUEUE": { } -} +} \ No newline at end of file From 9357c45f6513a4eddebfece358925d68dab7c159 Mon Sep 17 00:00:00 2001 From: "Marty Y. Lok" <76118573+mlok-nokia@users.noreply.github.com> Date: Tue, 3 Sep 2024 13:51:04 -0400 Subject: [PATCH 37/67] [chassis][cli] Fix config chassis module startup/shutdown command for fabric module (#3483) * [chassis][cli] Fix config chassis module starup/shutdown command for fabric module Signed-off-by: mlok * Address review comment: modify UT to test if the run_command parameter type is list Signed-off-by: mlok --------- Signed-off-by: mlok --- config/chassis_modules.py | 8 ++++---- tests/chassis_modules_test.py | 8 +++++++- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/config/chassis_modules.py b/config/chassis_modules.py index 4e7fd8096b..5f70ef404a 100755 --- a/config/chassis_modules.py +++ b/config/chassis_modules.py @@ -72,7 +72,7 @@ def fabric_module_set_admin_status(db, chassis_module_name, state): if state == "down": for asic in asic_list: click.echo("Stop swss@{} and peer services".format(asic)) - clicommon.run_command('sudo systemctl stop swss@{}.service'.format(asic)) + clicommon.run_command(['sudo', 'systemctl', 'stop', 'swss@{}.service'.format(asic)]) is_active = subprocess.call(["systemctl", "is-active", "--quiet", "swss@{}.service".format(asic)]) @@ -89,13 +89,13 @@ def fabric_module_set_admin_status(db, chassis_module_name, state): # without bring down the hardware for asic in asic_list: # To address systemd service restart limit by resetting the count - clicommon.run_command('sudo systemctl reset-failed swss@{}.service'.format(asic)) + clicommon.run_command(['sudo', 'systemctl', 'reset-failed', 'swss@{}.service'.format(asic)]) click.echo("Start swss@{} and peer services".format(asic)) - clicommon.run_command('sudo systemctl start swss@{}.service'.format(asic)) + clicommon.run_command(['sudo', 'systemctl', 'start', 'swss@{}.service'.format(asic)]) elif state == "up": for asic in asic_list: click.echo("Start swss@{} and peer services".format(asic)) - clicommon.run_command('sudo systemctl start swss@{}.service'.format(asic)) + clicommon.run_command(['sudo', 'systemctl', 'start', 'swss@{}.service'.format(asic)]) # # 'shutdown' subcommand ('config chassis_modules shutdown ...') diff --git a/tests/chassis_modules_test.py b/tests/chassis_modules_test.py index 681e3d2c13..f59341a487 100755 --- a/tests/chassis_modules_test.py +++ b/tests/chassis_modules_test.py @@ -126,7 +126,13 @@ def mock_run_command_side_effect(*args, **kwargs): - return '', 0 + print("command: {}".format(*args)) + if isinstance(*args, list): + return '', 0 + else: + print("Expected type of command is list. Actual type is {}".format(*args)) + assert 0 + return '', 0 class TestChassisModules(object): From d29b8241abdec0c90cafd2fa5bd6b64d698fd98a Mon Sep 17 00:00:00 2001 From: Wenda Chu <32250288+w1nda@users.noreply.github.com> Date: Wed, 4 Sep 2024 08:35:59 +0800 Subject: [PATCH 38/67] Revert "[wol] Implement wol command line utility" (#3515) * Revert "[wol] Implement wol command line utility (#3048)" This reverts commit 1e8131050a5d49aadcfe9dafbc10fadba3e61752. * Save doc --- setup.py | 2 - tests/wol_test.py | 229 ---------------------------------------------- wol/__init__.py | 0 wol/main.py | 202 ---------------------------------------- 4 files changed, 433 deletions(-) delete mode 100644 tests/wol_test.py delete mode 100644 wol/__init__.py delete mode 100644 wol/main.py diff --git a/setup.py b/setup.py index 6a66f012f9..5d0dc0ea35 100644 --- a/setup.py +++ b/setup.py @@ -88,7 +88,6 @@ 'utilities_common', 'watchdogutil', 'sonic_cli_gen', - 'wol', ], package_data={ 'generic_config_updater': ['gcu_services_validator.conf.json', 'gcu_field_operation_validators.conf.json'], @@ -223,7 +222,6 @@ 'undebug = undebug.main:cli', 'watchdogutil = watchdogutil.main:watchdogutil', 'sonic-cli-gen = sonic_cli_gen.main:cli', - 'wol = wol.main:wol', ] }, install_requires=[ diff --git a/tests/wol_test.py b/tests/wol_test.py deleted file mode 100644 index 011676eeac..0000000000 --- a/tests/wol_test.py +++ /dev/null @@ -1,229 +0,0 @@ -import click -import io -import pytest -import wol.main as wol -from click.testing import CliRunner -from unittest.mock import patch, MagicMock - -ETHER_TYPE_WOL = b'\x08\x42' -BROADCAST_MAC = wol.MacAddress('ff:ff:ff:ff:ff:ff') - -SAMPLE_INTERFACE_ETH0 = "Ethernet0" -SAMPLE_INTERFACE_VLAN1000 = "Vlan1000" -SAMPLE_INTERFACE_PO100 = "PortChannel100" - -SAMPLE_ETH0_MAC = wol.MacAddress('11:33:55:77:99:bb') -SAMPLE_VLAN1000_MAC = wol.MacAddress('22:44:66:88:aa:cc') -SAMPLE_PO100_MAC = wol.MacAddress('33:55:77:99:bb:dd') -SAMPLE_TARGET_MAC = wol.MacAddress('44:66:88:aa:cc:ee') -SAMPLE_TARGET_MAC_LIST = [wol.MacAddress('44:66:88:aa:cc:ee'), wol.MacAddress('55:77:99:bb:dd:ff')] - -SAMPLE_MAGIC_PACKET_UNICAST = SAMPLE_TARGET_MAC.to_bytes() + SAMPLE_ETH0_MAC.to_bytes() + ETHER_TYPE_WOL + b'\xff' * 6 + SAMPLE_TARGET_MAC.to_bytes() * 16 -SAMPLE_MAGIC_PACKET_BROADCAST = BROADCAST_MAC.to_bytes() + SAMPLE_ETH0_MAC.to_bytes() + ETHER_TYPE_WOL + b'\xff' * 6 + SAMPLE_TARGET_MAC.to_bytes() * 16 - - -class TestMacAddress(): - def test_init(self): - # Test Case 1: Test with a valid MAC address - assert wol.MacAddress('00:11:22:33:44:55').address == b'\x00\x11\x22\x33\x44\x55' - # Test Case 2: Test with an invalid MAC address - with pytest.raises(ValueError) as exc_info: - wol.MacAddress('INVALID_MAC_ADDRESS') - assert exc_info.value.message == "invalid MAC address" - with pytest.raises(ValueError) as exc_info: - wol.MacAddress('00:11:22:33:44') - assert exc_info.value.message == "invalid MAC address" - - def test_str(self): - assert str(wol.MacAddress('00:01:0a:a0:aa:ee')) == '00:01:0a:a0:aa:ee' - assert str(wol.MacAddress('ff:ff:ff:ff:ff:ff')) == 'ff:ff:ff:ff:ff:ff' - - def test_eq(self): - # Test Case 1: Test with two equal MAC addresses - assert wol.MacAddress('00:11:22:33:44:55') == wol.MacAddress('00:11:22:33:44:55') - # Test Case 2: Test with two unequal MAC addresses - assert wol.MacAddress('00:11:22:33:44:55') != wol.MacAddress('55:44:33:22:11:00') - - def test_to_bytes(self): - assert wol.MacAddress('00:11:22:33:44:55').to_bytes() == b'\x00\x11\x22\x33\x44\x55' - - -@patch('wol.main.get_interface_mac', MagicMock(return_value=SAMPLE_ETH0_MAC)) -def test_build_magic_packet(): - # Test Case 1: Test build magic packet basic - expected_output = SAMPLE_TARGET_MAC.to_bytes() + SAMPLE_ETH0_MAC.to_bytes() + ETHER_TYPE_WOL \ - + b'\xff' * 6 + SAMPLE_TARGET_MAC.to_bytes() * 16 - assert wol.build_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, broadcast=False, password=b'') == expected_output - # Test Case 2: Test build magic packet with broadcast flag - expected_output = BROADCAST_MAC.to_bytes() + SAMPLE_ETH0_MAC.to_bytes() + ETHER_TYPE_WOL \ - + b'\xff' * 6 + SAMPLE_TARGET_MAC.to_bytes() * 16 - assert wol.build_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, broadcast=True, password=b'') == expected_output - # Test Case 3: Test build magic packet with 4-byte password - password = b'\x12\x34' - expected_output = SAMPLE_TARGET_MAC.to_bytes() + SAMPLE_ETH0_MAC.to_bytes() + ETHER_TYPE_WOL \ - + b'\xff' * 6 + SAMPLE_TARGET_MAC.to_bytes() * 16 + password - assert wol.build_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, broadcast=False, password=password) == expected_output - # Test Case 4: Test build magic packet with 6-byte password - password = b'\x12\x34\x56\x78\x9a\xbc' - expected_output = SAMPLE_TARGET_MAC.to_bytes() + SAMPLE_ETH0_MAC.to_bytes() + ETHER_TYPE_WOL \ - + b'\xff' * 6 + SAMPLE_TARGET_MAC.to_bytes() * 16 + password - assert wol.build_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, broadcast=False, password=password) == expected_output - - -def test_send_magic_packet(): - # Test Case 1: Test send magic packet with count is 1 - with patch('socket.socket') as mock_socket: - wol.send_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, SAMPLE_MAGIC_PACKET_UNICAST, count=1, interval=0, verbose=False) - mock_socket.return_value.bind.assert_called_once_with((SAMPLE_INTERFACE_ETH0, 0)) - mock_socket.return_value.send.assert_called_once_with(SAMPLE_MAGIC_PACKET_UNICAST) - # Test Case 2: Test send magic packet with count is 3 - with patch('socket.socket') as mock_socket: - wol.send_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, SAMPLE_MAGIC_PACKET_UNICAST, count=3, interval=0, verbose=False) - assert mock_socket.return_value.bind.call_count == 1 - assert mock_socket.return_value.send.call_count == 3 - # Test Case 3: Test send magic packet with interval is 1000 - with patch('socket.socket') as mock_socket, \ - patch('time.sleep') as mock_sleep: - wol.send_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, SAMPLE_MAGIC_PACKET_UNICAST, count=3, interval=1000, verbose=False) - assert mock_socket.return_value.bind.call_count == 1 - assert mock_socket.return_value.send.call_count == 3 - assert mock_sleep.call_count == 2 # sleep twice between 3 packets - mock_sleep.assert_called_with(1) - # Test Case 4: Test send magic packet with verbose is True - expected_verbose_output = f"Sending 5 magic packet to {SAMPLE_TARGET_MAC} via interface {SAMPLE_INTERFACE_ETH0}\n" + \ - f"1st magic packet sent to {SAMPLE_TARGET_MAC}\n" + \ - f"2nd magic packet sent to {SAMPLE_TARGET_MAC}\n" + \ - f"3rd magic packet sent to {SAMPLE_TARGET_MAC}\n" + \ - f"4th magic packet sent to {SAMPLE_TARGET_MAC}\n" + \ - f"5th magic packet sent to {SAMPLE_TARGET_MAC}\n" - with patch('socket.socket') as mock_socket, patch('time.sleep'), patch('sys.stdout', new_callable=io.StringIO) as mock_stdout: - wol.send_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, SAMPLE_MAGIC_PACKET_UNICAST, count=5, interval=1000, verbose=True) - assert mock_socket.return_value.bind.call_count == 1 - assert mock_socket.return_value.send.call_count == 5 - assert mock_stdout.getvalue() == expected_verbose_output - - -@patch('netifaces.interfaces', MagicMock(return_value=[SAMPLE_INTERFACE_ETH0])) -@patch('wol.main.get_interface_operstate', MagicMock(return_value="up")) -def test_validate_interface(): - # Test Case 1: Test with a valid SONiC interface name - assert wol.validate_interface(None, None, SAMPLE_INTERFACE_ETH0) == SAMPLE_INTERFACE_ETH0 - # Test Case 2: Test with an invalid SONiC interface name - with pytest.raises(click.BadParameter) as exc_info: - wol.validate_interface(None, None, "INVALID_SONIC_INTERFACE") - assert exc_info.value.message == "invalid SONiC interface name INVALID_SONIC_INTERFACE" - # Test Case 3: Test with an valid SONiC interface name, but the interface operstat is down - with patch('wol.main.get_interface_operstate', MagicMock(return_value="down")): - with pytest.raises(click.BadParameter) as exc_info: - wol.validate_interface(None, None, SAMPLE_INTERFACE_ETH0) - assert exc_info.value.message == f"interface {SAMPLE_INTERFACE_ETH0} is not up" - - -def test_parse_target_mac(): - # Test Case 1: Test with a single valid target MAC address - wol.parse_target_mac(None, None, str(SAMPLE_TARGET_MAC)) == [SAMPLE_TARGET_MAC] - # Test Case 2: Test with a list of valid target MAC addresses - mac_list = [SAMPLE_ETH0_MAC, SAMPLE_VLAN1000_MAC, SAMPLE_PO100_MAC] - assert wol.parse_target_mac(None, None, ",".join([str(x) for x in mac_list])) == mac_list - # Test Case 3: Test with a single invalid target MAC address - with pytest.raises(click.BadParameter) as exc_info: - wol.parse_target_mac(None, None, "INVALID_MAC_ADDRESS") - assert exc_info.value.message == "invalid MAC address INVALID_MAC_ADDRESS" - # Test Case 4: Test with a list of target MAC addresses, one of them is invalid - with pytest.raises(click.BadParameter) as exc_info: - wol.parse_target_mac(None, None, ",".join([str(SAMPLE_ETH0_MAC), "INVALID_MAC_ADDRESS"])) - assert exc_info.value.message == "invalid MAC address INVALID_MAC_ADDRESS" - - -def test_parse_password(): - # Test Case 1: Test with an empty password - assert wol.parse_password(None, None, "") == b'' - # Test Case 2: Test with a valid 4-byte password - assert wol.parse_password(None, None, "1.2.3.4") == b'\x01\x02\x03\x04' - # Test Case 3: Test with an invalid 4-byte password - with pytest.raises(click.BadParameter) as exc_info: - wol.parse_password(None, None, "1.2.3.999") - assert exc_info.value.message == "invalid password 1.2.3.999" - # Test Case 4: Test with a valid 6-byte password - assert wol.parse_password(None, None, str(SAMPLE_TARGET_MAC)) == SAMPLE_TARGET_MAC.to_bytes() - # Test Case 5: Test with an invalid 6-byte password - with pytest.raises(click.BadParameter) as exc_info: - wol.parse_password(None, None, "11:22:33:44:55:999") - assert exc_info.value.message == "invalid password 11:22:33:44:55:999" - # Test Case 6: Test with an invalid password string - with pytest.raises(click.BadParameter) as exc_info: - wol.parse_password(None, None, "INVALID_PASSWORD") - assert exc_info.value.message == "invalid password INVALID_PASSWORD" - - -def test_validate_count_interval(): - # Test Case 1: input valid count and interval - assert wol.validate_count_interval(1, 1000) == (1, 1000) - # Test Case 2: Test with both count and interval are not provided - assert wol.validate_count_interval(None, None) == (1, 0) - # Test Case 3: Test count and interval not provided together - with pytest.raises(click.BadParameter) as exc_info: - wol.validate_count_interval(3, None) - assert exc_info.value.message == "count and interval must be used together" - with pytest.raises(click.BadParameter) as exc_info: - wol.validate_count_interval(None, 1000) - assert exc_info.value.message == "count and interval must be used together" - # Test Case 4: Test with count or interval not in valid range - # This restriction is validated by click.IntRange(), so no need to call the command line function - runner = CliRunner() - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, str(SAMPLE_TARGET_MAC), '-c', '100', '-i', '1000']) - assert 'Invalid value for "-c": 100 is not in the valid range of 1 to 5.' in result.stdout - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, str(SAMPLE_TARGET_MAC), '-c', '3', '-i', '100000']) - assert 'Invalid value for "-i": 100000 is not in the valid range of 0 to 2000.' in result.stdout - - -@patch('netifaces.interfaces', MagicMock(return_value=[SAMPLE_INTERFACE_ETH0])) -@patch('wol.main.is_root', MagicMock(return_value=True)) -@patch('wol.main.get_interface_operstate', MagicMock(return_value="up")) -@patch('wol.main.get_interface_mac', MagicMock(return_value=SAMPLE_ETH0_MAC)) -def test_wol_send_magic_packet_call_count(): - """ - Test the count of send_magic_packet() function call in wol is correct. - """ - runner = CliRunner() - # Test Case 1: Test with only required arguments - # 1.1 Single Target Mac - with patch('wol.main.send_magic_packet') as mock_send_magic_packet: - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, str(SAMPLE_TARGET_MAC)]) - assert result.exit_code == 0 - mock_send_magic_packet.assert_called_once_with(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, SAMPLE_MAGIC_PACKET_UNICAST, 1, 0, False) - # 1.2 Multiple Target Mac - with patch('wol.main.send_magic_packet') as mock_send_magic_packet: - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, ','.join([str(v) for v in SAMPLE_TARGET_MAC_LIST])]) - assert result.exit_code == 0 - assert mock_send_magic_packet.call_count == 2 - # Test Case 2: Test with specified count and interval - # 2.1 Single Target Mac - with patch('wol.main.send_magic_packet') as mock_send_magic_packet: - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, str(SAMPLE_TARGET_MAC), '-c', '5', '-i', '1000']) - assert result.exit_code == 0 - mock_send_magic_packet.assert_called_once_with(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, SAMPLE_MAGIC_PACKET_UNICAST, 5, 1000, False) - # 2.2 Multiple Target Mac - with patch('wol.main.send_magic_packet') as mock_send_magic_packet: - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, ','.join([str(v) for v in SAMPLE_TARGET_MAC_LIST]), '-c', '5', '-i', '1000']) - assert result.exit_code == 0 - assert mock_send_magic_packet.call_count == 2 - - -@patch('netifaces.interfaces', MagicMock(return_value=[SAMPLE_INTERFACE_ETH0])) -@patch('wol.main.is_root', MagicMock(return_value=True)) -@patch('wol.main.get_interface_operstate', MagicMock(return_value="up")) -@patch('wol.main.get_interface_mac', MagicMock(return_value=SAMPLE_ETH0_MAC)) -def test_wol_send_magic_packet_throw_exception(): - """ - Test the exception handling of send_magic_packet() function in wol. - """ - runner = CliRunner() - # Test Case 1: Test with OSError exception (interface flap) - with patch('wol.main.send_magic_packet', MagicMock(side_effect=OSError("[Errno 100] Network is down"))): - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, str(SAMPLE_TARGET_MAC)]) - assert "Exception: [Errno 100] Network is down" in result.stdout - # Test Case 2: Test with other exception - with patch('wol.main.send_magic_packet', MagicMock(side_effect=Exception("Exception message"))): - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, str(SAMPLE_TARGET_MAC)]) - assert "Exception: Exception message" in result.stdout diff --git a/wol/__init__.py b/wol/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/wol/main.py b/wol/main.py deleted file mode 100644 index 3b569a3a4f..0000000000 --- a/wol/main.py +++ /dev/null @@ -1,202 +0,0 @@ -#!/usr/bin/env python3 - -""" -use wol to generate and send Wake-On-LAN (WOL) "Magic Packet" to specific interface - -Usage: wol_click [OPTIONS] INTERFACE TARGET_MAC - - Generate and send Wake-On-LAN (WOL) "Magic Packet" to specific interface - -Options: - -b Use broadcast MAC address instead of target device's MAC - address as Destination MAC Address in Ethernet Frame Header. - [default: False] - -p password An optional 4 or 6 byte password, in ethernet hex format or - quad-dotted decimal [default: ] - -c count For each target MAC address, the count of magic packets to - send. count must between 1 and 5. This param must use with -i. - [default: 1] - -i interval Wait interval milliseconds between sending each magic packet. - interval must between 0 and 2000. This param must use with -c. - [default: 0] - -v Verbose output [default: False] - -h, --help Show this message and exit. - -Examples: - wol Ethernet10 00:11:22:33:44:55 - wol Ethernet10 00:11:22:33:44:55 -b - wol Vlan1000 00:11:22:33:44:55,11:33:55:77:99:bb -p 00:22:44:66:88:aa - wol Vlan1000 00:11:22:33:44:55,11:33:55:77:99:bb -p 192.168.1.1 -c 3 -i 2000 -""" - -import binascii -import click -import copy -import netifaces -import os -import socket -import time - -CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) -EPILOG = """\b -Examples: - wol Ethernet10 00:11:22:33:44:55 - wol Ethernet10 00:11:22:33:44:55 -b - wol Vlan1000 00:11:22:33:44:55,11:33:55:77:99:bb -p 00:22:44:66:88:aa - wol Vlan1000 00:11:22:33:44:55,11:33:55:77:99:bb -p 192.168.1.1 -c 3 -i 2000 -""" -ORDINAL_NUMBER = ["0", "1st", "2nd", "3rd", "4th", "5th"] -ETHER_TYPE_WOL = b'\x08\x42' - - -class MacAddress(object): - """ - Class to handle MAC addresses and perform operations on them. - - Attributes: - - address: bytes - """ - - def __init__(self, address: str): - """ - Constructor to instantiate the MacAddress class. - - Parameters: - - address: str - The MAC address in the format '01:23:45:67:89:AB' or '01-23-45-67-89-AB'. - - Raises: - - ValueError: - Throws an error if the provided address is not in the correct format. - """ - try: - self.address = binascii.unhexlify(address.replace(':', '').replace('-', '')) - except binascii.Error: - raise ValueError("invalid MAC address") - if len(self.address) != 6: - raise ValueError("invalid MAC address") - - def __str__(self): - return ":".join(["%02x" % v for v in self.address]) - - def __eq__(self, other): - return self.address == other.address - - def to_bytes(self): - return copy.copy(self.address) - - -BROADCAST_MAC = MacAddress('ff:ff:ff:ff:ff:ff') - - -def is_root(): - return os.geteuid() == 0 - - -def get_interface_operstate(interface): - with open('/sys/class/net/{}/operstate'.format(interface), 'r') as f: - return f.read().strip().lower() - - -def get_interface_mac(interface): - return MacAddress(netifaces.ifaddresses(interface)[netifaces.AF_LINK][0].get('addr')) - - -def build_magic_packet(interface, target_mac, broadcast, password): - dst_mac = BROADCAST_MAC if broadcast else target_mac - src_mac = get_interface_mac(interface) - return dst_mac.to_bytes() + src_mac.to_bytes() + ETHER_TYPE_WOL \ - + b'\xff' * 6 + target_mac.to_bytes() * 16 + password - - -def send_magic_packet(interface, target_mac, pkt, count, interval, verbose): - if verbose: - print("Sending {} magic packet to {} via interface {}".format(count, target_mac, interface)) - sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW) - sock.bind((interface, 0)) - for i in range(count): - sock.send(pkt) - if verbose: - print("{} magic packet sent to {}".format(ORDINAL_NUMBER[i + 1], target_mac)) - if i + 1 != count: - time.sleep(interval / 1000) - sock.close() - - -def validate_interface(ctx, param, value): - if value not in netifaces.interfaces(): - raise click.BadParameter("invalid SONiC interface name {}".format(value)) - if get_interface_operstate(value) != 'up': - raise click.BadParameter("interface {} is not up".format(value)) - return value - - -def parse_target_mac(ctx, param, value): - mac_list = [] - for mac in value.split(','): - try: - mac_list.append(MacAddress(mac)) - except ValueError: - raise click.BadParameter("invalid MAC address {}".format(mac)) - return mac_list - - -def parse_password(ctx, param, value): - if len(value) == 0: - return b'' # Empty password is valid. - elif len(value) <= 15: # The length of a valid IPv4 address is less or equal to 15. - try: - password = socket.inet_aton(value) - except OSError: - raise click.BadParameter("invalid password format") - else: # The length of a valid MAC address is 17. - try: - password = MacAddress(value).to_bytes() - except ValueError: - raise click.BadParameter("invalid password format") - if len(password) not in [4, 6]: - raise click.BadParameter("password must be 4 or 6 bytes or empty") - return password - - -def validate_count_interval(count, interval): - if count is None and interval is None: - return 1, 0 # By default, count=1 and interval=0. - if count is None or interval is None: - raise click.BadParameter("count and interval must be used together") - # The values are confirmed in valid range by click.IntRange(). - return count, interval - - -@click.command(context_settings=CONTEXT_SETTINGS, epilog=EPILOG) -@click.argument('interface', type=click.STRING, callback=validate_interface) -@click.argument('target_mac', type=click.STRING, callback=parse_target_mac) -@click.option('-b', 'broadcast', is_flag=True, show_default=True, default=False, - help="Use broadcast MAC address instead of target device's MAC address as Destination MAC Address in Ethernet Frame Header.") -@click.option('-p', 'password', type=click.STRING, show_default=True, default='', callback=parse_password, metavar='password', - help='An optional 4 or 6 byte password, in ethernet hex format or quad-dotted decimal') -@click.option('-c', 'count', type=click.IntRange(1, 5), metavar='count', show_default=True, # default=1, - help='For each target MAC address, the count of magic packets to send. count must between 1 and 5. This param must use with -i.') -@click.option('-i', 'interval', type=click.IntRange(0, 2000), metavar='interval', # show_default=True, default=0, - help="Wait interval milliseconds between sending each magic packet. interval must between 0 and 2000. This param must use with -c.") -@click.option('-v', 'verbose', is_flag=True, show_default=True, default=False, - help='Verbose output') -def wol(interface, target_mac, broadcast, password, count, interval, verbose): - """ - Generate and send Wake-On-LAN (WOL) "Magic Packet" to specific interface - """ - count, interval = validate_count_interval(count, interval) - - if not is_root(): - raise click.ClickException("root priviledge is required to run this script") - - for mac in target_mac: - pkt = build_magic_packet(interface, mac, broadcast, password) - try: - send_magic_packet(interface, mac, pkt, count, interval, verbose) - except Exception as e: - raise click.ClickException(f'Exception: {e}') - - -if __name__ == '__main__': - wol() From 40026f98cf23ef529ea44116fe4ac64f0b16c104 Mon Sep 17 00:00:00 2001 From: HP Date: Wed, 4 Sep 2024 13:25:58 -0700 Subject: [PATCH 39/67] Remove redundant mmuconfig file (#3446) --- mmuconfig | 199 ------------------------------------------------------ 1 file changed, 199 deletions(-) delete mode 100755 mmuconfig diff --git a/mmuconfig b/mmuconfig deleted file mode 100755 index f9dc178625..0000000000 --- a/mmuconfig +++ /dev/null @@ -1,199 +0,0 @@ -#!/usr/bin/python3 - -""" -mmuconfig is the utility to show and change mmu configuration - -usage: mmuconfig [-h] [-v] [-l] [-p PROFILE] [-a ALPHA] [-s staticth] [-vv] - -optional arguments: - -h --help show this help message and exit - -v --version show program's version number and exit - -vv --verbose verbose output - -l --list show mmu configuration - -p --profile specify buffer profile name - -a --alpha set n for dyanmic threshold alpha 2^(n) - -s --staticth set static threshold - -""" - -import os -import sys -import argparse -import tabulate -import traceback - -BUFFER_POOL_TABLE_NAME = "BUFFER_POOL" -BUFFER_PROFILE_TABLE_NAME = "BUFFER_PROFILE" -DEFAULT_LOSSLESS_BUFFER_PARAMETER_NAME = "DEFAULT_LOSSLESS_BUFFER_PARAMETER" - -DYNAMIC_THRESHOLD = "dynamic_th" -STATIC_THRESHOLD = "static_th" -BUFFER_PROFILE_FIELDS = { - "alpha": DYNAMIC_THRESHOLD, - "staticth": STATIC_THRESHOLD -} - -# mock the redis for unit test purposes # -try: - if os.environ["UTILITIES_UNIT_TESTING"] == "2": - modules_path = os.path.join(os.path.dirname(__file__), "..") - tests_path = os.path.join(modules_path, "tests") - sys.path.insert(0, modules_path) - sys.path.insert(0, tests_path) - import mock_tables.dbconnector - -except KeyError: - pass - -from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector - -BUFFER_POOL_TABLE_NAME = "BUFFER_POOL" -BUFFER_PROFILE_TABLE_NAME = "BUFFER_PROFILE" - -''' -DYNAMIC_THRESHOLD = "dynamic_th" -BUFFER_PROFILE_FIELDS = { - "alpha": DYNAMIC_THRESHOLD -} -''' - -class MmuConfig(object): - def __init__(self, verbose, config): - self.verbose = verbose - self.config = config - - # Set up db connections - if self.config: - self.db = ConfigDBConnector() - self.db.connect() - else: - self.db = SonicV2Connector(use_unix_socket_path=False) - self.db.connect(self.db.STATE_DB, False) - - def get_table(self, tablename): - if self.config: - return self.db.get_table(tablename) - - entries = {} - keys = self.db.keys(self.db.STATE_DB, tablename + '*') - - if not keys: - return None - - for key in keys: - entries[key.split('|')[1]] = self.db.get_all(self.db.STATE_DB, key) - - return entries - - def list(self): - lossless_traffic_pattern = self.get_table(DEFAULT_LOSSLESS_BUFFER_PARAMETER_NAME) - if lossless_traffic_pattern: - for _, pattern in lossless_traffic_pattern.items(): - config = [] - - print("Lossless traffic pattern:") - for field, value in pattern.items(): - config.append([field, value]) - print(tabulate.tabulate(config) + "\n") - - buf_pools = self.get_table(BUFFER_POOL_TABLE_NAME) - if buf_pools: - for pool_name, pool_data in buf_pools.items(): - config = [] - - print("Pool: " + pool_name) - for field, value in pool_data.items(): - config.append([field, value]) - print(tabulate.tabulate(config) + "\n") - if self.verbose: - print("Total pools: %d\n\n" % len(buf_pools)) - else: - print("No buffer pool information available") - - buf_profs = self.get_table(BUFFER_PROFILE_TABLE_NAME) - if buf_profs: - for prof_name, prof_data in buf_profs.items(): - config = [] - - print("Profile: " + prof_name) - for field, value in prof_data.items(): - config.append([field, value]) - print(tabulate.tabulate(config) + "\n") - if self.verbose: - print("Total profiles: %d" % len(buf_profs)) - else: - print("No buffer profile information available") - - def set(self, profile, field_alias, value): - if os.geteuid() != 0: - sys.exit("Root privileges required for this operation") - - field = BUFFER_PROFILE_FIELDS[field_alias] - buf_profs = self.db.get_table(BUFFER_PROFILE_TABLE_NAME) - v = int(value) - if field == DYNAMIC_THRESHOLD: - if v < -8 or v > 8: - sys.exit("Invalid alpha value: 2^(%s)" % (value)) - - if profile in buf_profs and DYNAMIC_THRESHOLD not in buf_profs[profile]: - sys.exit("%s not using dynamic thresholding" % (profile)) - elif field == STATIC_THRESHOLD: - if v < 0: - sys.exit("Invalid static threshold value: (%s)" % (value)) - - buf_profs = self.db.get_table(BUFFER_PROFILE_TABLE_NAME) - if profile in buf_profs and STATIC_THRESHOLD not in buf_profs[profile]: - sys.exit("%s not using static threshold" % (profile)) - else: - sys.exit("Set field %s not supported" % (field)) - - if self.verbose: - print("Setting %s %s value to %s" % (profile, field, value)) - self.db.mod_entry(BUFFER_PROFILE_TABLE_NAME, profile, {field: value}) - - -def main(config): - if config: - parser = argparse.ArgumentParser(description='Show and change: mmu configuration', - formatter_class=argparse.RawTextHelpFormatter) - - parser.add_argument('-l', '--list', action='store_true', help='show mmu configuration') - parser.add_argument('-p', '--profile', type=str, help='specify buffer profile name', default=None) - parser.add_argument('-a', '--alpha', type=str, help='set n for dyanmic threshold alpha 2^(n)', default=None) - parser.add_argument('-s', '--staticth', type=str, help='set n for static threshold', default=None) - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - else: - parser = argparse.ArgumentParser(description='Show buffer state', - formatter_class=argparse.RawTextHelpFormatter) - - parser.add_argument('-l', '--list', action='store_true', help='show buffer state') - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - - parser.add_argument('-vv', '--verbose', action='store_true', help='verbose output', default=False) - - args = parser.parse_args() - - try: - mmu_cfg = MmuConfig(args.verbose, config) - if args.list: - mmu_cfg.list() - elif config and args.profile: - import pdb; pdb.set_trace() - if args.alpha: - mmu_cfg.set(args.profile, "alpha", args.alpha) - elif args.staticth: - mmu_cfg.set(args.profile, "staticth", args.staticth) - else: - parser.print_help() - sys.exit(1) - - except Exception as e: - print("Exception caught: ", str(e), file=sys.stderr) - traceback.print_exc() - sys.exit(1) - -if __name__ == "__main__": - if sys.argv[0].split('/')[-1] == "mmuconfig": - main(True) - else: - main(False) From c019c48b0988099ec8ee726ca44021c2dba6c1ee Mon Sep 17 00:00:00 2001 From: jingwenxie Date: Thu, 5 Sep 2024 07:52:48 +0800 Subject: [PATCH 40/67] Exit early if YANG validation fails in Golden Config (#3490) What I did Exit early if golden config fails YANG validation How I did it Check before stop service in load_minigraph How to verify it Unit test Previous command output (if the output of --- config/main.py | 21 ++++- .../golden_input_yang_failure.json | 89 ------------------- .../partial_config_override.json | 24 +++++ tests/config_override_test.py | 14 +-- tests/config_test.py | 3 +- 5 files changed, 44 insertions(+), 107 deletions(-) delete mode 100644 tests/config_override_input/golden_input_yang_failure.json diff --git a/config/main.py b/config/main.py index 80f08bebd9..a19f1bf486 100644 --- a/config/main.py +++ b/config/main.py @@ -17,6 +17,7 @@ import itertools import copy import tempfile +import sonic_yang from jsonpatch import JsonPatchConflict from jsonpointer import JsonPointerException @@ -59,7 +60,7 @@ from . import vlan from . import vxlan from . import plugins -from .config_mgmt import ConfigMgmtDPB, ConfigMgmt +from .config_mgmt import ConfigMgmtDPB, ConfigMgmt, YANG_DIR from . import mclag from . import syslog from . import switchport @@ -1994,8 +1995,22 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, fg='magenta') raise click.Abort() - # Dependency check golden config json config_to_check = read_json_file(golden_config_path) + if multi_asic.is_multi_asic(): + # Multiasic has not 100% fully validated. Thus pass here. + pass + else: + sy = sonic_yang.SonicYang(YANG_DIR) + sy.loadYangModel() + try: + sy.loadData(configdbJson=config_to_check) + sy.validate_data_tree() + except sonic_yang.SonicYangException as e: + click.secho("{} fails YANG validation! Error: {}".format(golden_config_path, str(e)), + fg='magenta') + raise click.Abort() + + # Dependency check golden config json if multi_asic.is_multi_asic(): host_config = config_to_check.get('localhost', {}) else: @@ -2322,7 +2337,7 @@ def aaa_table_hard_dependency_check(config_json): tacacs_enable = "tacacs+" in aaa_authentication_login.split(",") tacplus_passkey = TACPLUS_TABLE.get("global", {}).get("passkey", "") if tacacs_enable and len(tacplus_passkey) == 0: - click.secho("Authentication with 'tacacs+' is not allowed when passkey not exits.", fg="magenta") + click.secho("Authentication with 'tacacs+' is not allowed when passkey not exists.", fg="magenta") sys.exit(1) diff --git a/tests/config_override_input/golden_input_yang_failure.json b/tests/config_override_input/golden_input_yang_failure.json deleted file mode 100644 index 4b533e1598..0000000000 --- a/tests/config_override_input/golden_input_yang_failure.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "running_config": { - "ACL_TABLE": { - "DATAACL": { - "policy_desc": "DATAACL", - "ports": [ - "Ethernet4" - ], - "stage": "ingress", - "type": "L3" - }, - "NTP_ACL": { - "policy_desc": "NTP_ACL", - "services": [ - "NTP" - ], - "stage": "ingress", - "type": "CTRLPLANE" - } - }, - "AUTO_TECHSUPPORT_FEATURE": { - "bgp": { - "rate_limit_interval": "600", - "state": "enabled" - }, - "database": { - "rate_limit_interval": "600", - "state": "enabled" - } - }, - "PORT": { - "Ethernet4": { - "admin_status": "up", - "alias": "fortyGigE0/4", - "description": "Servers0:eth0", - "index": "1", - "lanes": "29,30,31,32", - "mtu": "9100", - "pfc_asym": "off", - "speed": "40000", - "tpid": "0x8100" - }, - "Ethernet8": { - "admin_status": "up", - "alias": "fortyGigE0/8", - "description": "Servers1:eth0", - "index": "2", - "lanes": "33,34,35,36", - "mtu": "9100", - "pfc_asym": "off", - "speed": "40000", - "tpid": "0x8100" - } - } - }, - "golden_config": { - "ACL_TABLE": { - "EVERFLOWV6": { - "policy_desc": "EVERFLOWV6", - "ports": [ - "Ethernet0" - ], - "stage": "ingress", - "type": "MIRRORV6" - } - }, - "AUTO_TECHSUPPORT_FEATURE": { - "bgp": { - "state": "disabled" - }, - "database": { - "state": "disabled" - } - }, - "PORT": { - "Ethernet12": { - "admin_status": "up", - "alias": "fortyGigE0/12", - "description": "Servers2:eth0", - "index": "3", - "lanes": "37,38,39,40", - "mtu": "9100", - "pfc_asym": "off", - "speed": "40000", - "tpid": "0x8100" - } - } - } -} diff --git a/tests/config_override_input/partial_config_override.json b/tests/config_override_input/partial_config_override.json index 2021ea282b..f28a8ed7ae 100644 --- a/tests/config_override_input/partial_config_override.json +++ b/tests/config_override_input/partial_config_override.json @@ -71,6 +71,30 @@ "stage": "ingress", "type": "CTRLPLANE" } + }, + "PORT": { + "Ethernet4": { + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": "1", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + }, + "Ethernet8": { + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": "2", + "lanes": "33,34,35,36", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + } } }, "expected_config": { diff --git a/tests/config_override_test.py b/tests/config_override_test.py index a46be5ef60..5137585832 100644 --- a/tests/config_override_test.py +++ b/tests/config_override_test.py @@ -20,7 +20,6 @@ EMPTY_TABLE_REMOVAL = os.path.join(DATA_DIR, "empty_table_removal.json") AAA_YANG_HARD_CHECK = os.path.join(DATA_DIR, "aaa_yang_hard_check.json") RUNNING_CONFIG_YANG_FAILURE = os.path.join(DATA_DIR, "running_config_yang_failure.json") -GOLDEN_INPUT_YANG_FAILURE = os.path.join(DATA_DIR, "golden_input_yang_failure.json") FINAL_CONFIG_YANG_FAILURE = os.path.join(DATA_DIR, "final_config_yang_failure.json") MULTI_ASIC_MACSEC_OV = os.path.join(DATA_DIR, "multi_asic_macsec_ov.json") MULTI_ASIC_FEATURE_RM = os.path.join(DATA_DIR, "multi_asic_feature_rm.json") @@ -179,7 +178,7 @@ def read_json_file_side_effect(filename): ['golden_config_db.json'], obj=db) assert result.exit_code != 0 - assert "Authentication with 'tacacs+' is not allowed when passkey not exits." in result.output + assert "Authentication with 'tacacs+' is not allowed when passkey not exists." in result.output def check_override_config_table(self, db, config, running_config, golden_config, expected_config): @@ -233,17 +232,6 @@ def is_yang_config_validation_enabled_side_effect(filename): self.check_yang_verification_failure( db, config, read_data['running_config'], read_data['golden_config'], "running config") - def test_golden_input_yang_failure(self): - def is_yang_config_validation_enabled_side_effect(filename): - return True - db = Db() - with open(GOLDEN_INPUT_YANG_FAILURE, "r") as f: - read_data = json.load(f) - with mock.patch('config.main.device_info.is_yang_config_validation_enabled', - mock.MagicMock(side_effect=is_yang_config_validation_enabled_side_effect)): - self.check_yang_verification_failure( - db, config, read_data['running_config'], read_data['golden_config'], "config_input") - def test_final_config_yang_failure(self): def is_yang_config_validation_enabled_side_effect(filename): return True diff --git a/tests/config_test.py b/tests/config_test.py index 74bc0e1093..21eb095789 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -1128,7 +1128,6 @@ def read_json_file_side_effect(filename): }, "TACPLUS": { "global": { - "passkey": "" } } } @@ -1140,7 +1139,7 @@ def read_json_file_side_effect(filename): runner = CliRunner() result = runner.invoke(config.config.commands["load_minigraph"], ["--override_config", "-y"]) assert result.exit_code != 0 - assert "Authentication with 'tacacs+' is not allowed when passkey not exits." in result.output + assert "Authentication with 'tacacs+' is not allowed when passkey not exists." in result.output @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=("dummy_path", None))) def test_load_minigraph_with_traffic_shift_away(self, get_cmd_module): From 8f5e4b6fb85402592e2a44e27ac92385c9bcbeac Mon Sep 17 00:00:00 2001 From: bktsim <144830673+bktsim-arista@users.noreply.github.com> Date: Thu, 5 Sep 2024 09:43:26 -0700 Subject: [PATCH 41/67] Fix multi-asic behaviour for mmuconfig (#3061) * Fixes multi-asic behaviour for mmuconfig Previously, mmuconfig did not function correctly on multi-asic devices as it did not traverse through the correct namespaces, as required for multi-asic devices. This change adds multi-asic support to mmuconfig with the use of multi-asic helper libraries, ensuring that mmuconfig searches throuugh the correct namespaces when used on multi-asic devices, and adds the '-n' optional argument for users to specify namespaces on multi-asic devices. * Fixes for linter * More linter fixes * Enhanced multi-asic support for mmuconfig - Resolve pre-commit errors - Remove use_unix_socket_path argument from DB connectors - Support multiple namespace when none specified - Refactor tests to use the testData dict - Delete single_asic_mmuconfig_test.py - Replace argparse with click in mmuconfig - Add support for namespace in show and config - Modified multi-asic tests to use show/config cli --------- Co-authored-by: rdjeric Co-authored-by: arista-hpandya --- config/main.py | 17 +- scripts/mmuconfig | 128 ++++----- show/main.py | 33 ++- .../mmuconfig_input/mmuconfig_test_vectors.py | 265 +++++++++++++++++- tests/mmuconfig_test.py | 38 +-- tests/mock_tables/asic0/config_db.json | 19 ++ tests/mock_tables/asic1/config_db.json | 35 +++ tests/multi_asic_mmuconfig_test.py | 49 ++++ 8 files changed, 483 insertions(+), 101 deletions(-) create mode 100644 tests/multi_asic_mmuconfig_test.py diff --git a/config/main.py b/config/main.py index a19f1bf486..80d64b98eb 100644 --- a/config/main.py +++ b/config/main.py @@ -6470,13 +6470,26 @@ def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, rdrop, ydrop, gdrop, verbos @config.command() @click.option('-p', metavar='', type=str, required=True, help="Profile name") @click.option('-a', metavar='', type=click.IntRange(-8,8), help="Set alpha for profile type dynamic") -@click.option('-s', metavar='', type=int, help="Set staticth for profile type static") -def mmu(p, a, s): +@click.option('-s', metavar='', type=click.IntRange(min=0), help="Set staticth for profile type static") +@click.option('--verbose', '-vv', is_flag=True, help="Enable verbose output") +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def mmu(p, a, s, namespace, verbose): """mmuconfig configuration tasks""" log.log_info("'mmuconfig -p {}' executing...".format(p)) command = ['mmuconfig', '-p', str(p)] if a is not None: command += ['-a', str(a)] if s is not None: command += ['-s', str(s)] + if namespace is not None: + command += ['-n', str(namespace)] + if verbose: + command += ['-vv'] clicommon.run_command(command) diff --git a/scripts/mmuconfig b/scripts/mmuconfig index ebeb74fdaf..3986f3ba1b 100755 --- a/scripts/mmuconfig +++ b/scripts/mmuconfig @@ -18,17 +18,23 @@ optional arguments: import os import sys -import argparse +import click import tabulate import traceback import json +from utilities_common.general import load_db_config +from sonic_py_common import multi_asic +from utilities_common import multi_asic as multi_asic_util BUFFER_POOL_TABLE_NAME = "BUFFER_POOL" BUFFER_PROFILE_TABLE_NAME = "BUFFER_PROFILE" DEFAULT_LOSSLESS_BUFFER_PARAMETER_NAME = "DEFAULT_LOSSLESS_BUFFER_PARAMETER" DYNAMIC_THRESHOLD = "dynamic_th" +DYNAMIC_THRESHOLD_MIN = -8 +DYNAMIC_THRESHOLD_MAX = 8 STATIC_THRESHOLD = "static_th" +STATIC_THRESHOLD_MIN = 0 BUFFER_PROFILE_FIELDS = { "alpha": DYNAMIC_THRESHOLD, "staticth" : STATIC_THRESHOLD @@ -42,6 +48,11 @@ try: sys.path.insert(0, modules_path) sys.path.insert(0, tests_path) import mock_tables.dbconnector + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import mock_tables.mock_multi_asic + mock_tables.dbconnector.load_namespace_config() + else: + mock_tables.dbconnector.load_database_config() except KeyError: pass @@ -49,22 +60,21 @@ except KeyError: from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector class MmuConfig(object): - def __init__(self, verbose, config, filename): + def __init__(self, verbose, config, filename, namespace): self.verbose = verbose self.config = config self.filename = filename + self.namespace = namespace + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.config_db = None + self.db = None - # Set up db connections - if self.config: - self.db = ConfigDBConnector() - self.db.connect() - else: - self.db = SonicV2Connector(use_unix_socket_path=False) - self.db.connect(self.db.STATE_DB, False) + # For unit testing + self.updated_profile_table = {} def get_table(self, tablename): if self.config: - return self.db.get_table(tablename) + return self.config_db.get_table(tablename) entries = {} keys = self.db.keys(self.db.STATE_DB, tablename + '*') @@ -77,13 +87,15 @@ class MmuConfig(object): return entries + @multi_asic_util.run_on_multi_asic def list(self): + namespace_str = f" for namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' lossless_traffic_pattern = self.get_table(DEFAULT_LOSSLESS_BUFFER_PARAMETER_NAME) if lossless_traffic_pattern: for _, pattern in lossless_traffic_pattern.items(): config = [] - print("Lossless traffic pattern:") + print(f"Lossless traffic pattern{namespace_str}:") for field, value in pattern.items(): config.append([field, value]) print(tabulate.tabulate(config) + "\n") @@ -93,97 +105,88 @@ class MmuConfig(object): for pool_name, pool_data in buf_pools.items(): config = [] - print("Pool: " + pool_name) + print(f"Pool{namespace_str}: " + pool_name) for field, value in pool_data.items(): config.append([field, value]) print(tabulate.tabulate(config) + "\n") if self.verbose: print("Total pools: %d\n\n" % len(buf_pools)) else: - print("No buffer pool information available") + print(f"No buffer pool information available{namespace_str}") buf_profs = self.get_table(BUFFER_PROFILE_TABLE_NAME) if buf_profs: for prof_name, prof_data in buf_profs.items(): config = [] - print("Profile: " + prof_name) + print(f"Profile{namespace_str}: " + prof_name) for field, value in prof_data.items(): config.append([field, value]) print(tabulate.tabulate(config) + "\n") if self.verbose: print("Total profiles: %d" % len(buf_profs)) else: - print("No buffer profile information available") + print(f"No buffer profile information available{namespace_str}") + @multi_asic_util.run_on_multi_asic def set(self, profile, field_alias, value): + namespace_str = f" for namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' if os.geteuid() != 0 and os.environ.get("UTILITIES_UNIT_TESTING", "0") != "2": sys.exit("Root privileges required for this operation") field = BUFFER_PROFILE_FIELDS[field_alias] - buf_profs = self.db.get_table(BUFFER_PROFILE_TABLE_NAME) - v = int(value) + buf_profs = self.config_db.get_table(BUFFER_PROFILE_TABLE_NAME) if field == DYNAMIC_THRESHOLD: - if v < -8 or v > 8: - sys.exit("Invalid alpha value: 2^(%s)" % (value)) - if profile in buf_profs and DYNAMIC_THRESHOLD not in buf_profs[profile]: sys.exit("%s not using dynamic thresholding" % (profile)) elif field == STATIC_THRESHOLD: - if v < 0: - sys.exit("Invalid static threshold value: (%s)" % (value)) - if profile in buf_profs and STATIC_THRESHOLD not in buf_profs[profile]: sys.exit("%s not using static threshold" % (profile)) else: sys.exit("Set field %s not supported" % (field)) if self.verbose: - print("Setting %s %s value to %s" % (profile, field, value)) - self.db.mod_entry(BUFFER_PROFILE_TABLE_NAME, profile, {field: value}) + print("Setting %s %s value to %s%s" % (profile, field, value, namespace_str)) + self.config_db.mod_entry(BUFFER_PROFILE_TABLE_NAME, profile, {field: value}) if self.filename is not None: - prof_table = self.db.get_table(BUFFER_PROFILE_TABLE_NAME) + self.updated_profile_table[self.multi_asic.current_namespace] = self.config_db.get_table(BUFFER_PROFILE_TABLE_NAME) with open(self.filename, "w") as fd: - json.dump(prof_table, fd) - - -def main(config): - if config: - parser = argparse.ArgumentParser(description='Show and change: mmu configuration', - formatter_class=argparse.RawTextHelpFormatter) - - parser.add_argument('-l', '--list', action='store_true', help='show mmu configuration') - parser.add_argument('-p', '--profile', type=str, help='specify buffer profile name', default=None) - parser.add_argument('-a', '--alpha', type=str, help='set n for dyanmic threshold alpha 2^(n)', default=None) - parser.add_argument('-s', '--staticth', type=str, help='set static threshold', default=None) - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - else: - parser = argparse.ArgumentParser(description='Show buffer state', - formatter_class=argparse.RawTextHelpFormatter) - - parser.add_argument('-l', '--list', action='store_true', help='show buffer state') - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - - parser.add_argument('-vv', '--verbose', action='store_true', help='verbose output', default=False) - parser.add_argument('-f', '--filename', help='file used by mock tests', type=str, default=None) - + json.dump(self.updated_profile_table, fd) + +@click.command(help='Show and change: mmu configuration') +@click.option('-l', '--list', 'show_config', is_flag=True, help='show mmu configuration') +@click.option('-p', '--profile', type=str, help='specify buffer profile name', default=None) +@click.option('-a', '--alpha', type=click.IntRange(DYNAMIC_THRESHOLD_MIN, DYNAMIC_THRESHOLD_MAX), help='set n for dyanmic threshold alpha 2^(n)', default=None) +@click.option('-s', '--staticth', type=click.IntRange(min=STATIC_THRESHOLD_MIN), help='set static threshold', default=None) +@click.option('-n', '--namespace', type=click.Choice(multi_asic.get_namespace_list()), help='Namespace name or skip for all', default=None) +@click.option('-vv', '--verbose', is_flag=True, help='verbose output', default=False) +@click.version_option(version='1.0') +def main(show_config, profile, alpha, staticth, namespace, verbose): + # A test file created for unit test purposes + filename=None if os.environ.get("UTILITIES_UNIT_TESTING", "0") == "2": - sys.argv.extend(['-f', '/tmp/mmuconfig']) + filename = '/tmp/mmuconfig' - - args = parser.parse_args() + # Buffershow and mmuconfig cmds share this script + # Buffershow cmd cannot modify configs hence config is set to False + config = True if sys.argv[0].split('/')[-1] == "mmuconfig" else False try: - mmu_cfg = MmuConfig(args.verbose, config, args.filename) - if args.list: + load_db_config() + mmu_cfg = MmuConfig(verbose, config, filename, namespace) + + # Both mmuconfig and buffershow have access to show_config option + if show_config: mmu_cfg.list() - elif config and args.profile: - if args.alpha: - mmu_cfg.set(args.profile, "alpha", args.alpha) - elif args.staticth: - mmu_cfg.set(args.profile, "staticth", args.staticth) + # Buffershow cannot modify profiles + elif config and profile: + if alpha: + mmu_cfg.set(profile, "alpha", alpha) + elif staticth: + mmu_cfg.set(profile, "staticth", staticth) else: - parser.print_help() + ctx = click.get_current_context() + click.echo(ctx.get_help()) sys.exit(1) except Exception as e: @@ -192,7 +195,4 @@ def main(config): sys.exit(1) if __name__ == "__main__": - if sys.argv[0].split('/')[-1] == "mmuconfig": - main(True) - else: - main(False) + main() diff --git a/show/main.py b/show/main.py index 8d3f117b2f..1275c9e28a 100755 --- a/show/main.py +++ b/show/main.py @@ -291,7 +291,6 @@ def cli(ctx): load_db_config() ctx.obj = Db() - # Add groups from other modules cli.add_command(acl.acl) cli.add_command(chassis_modules.chassis) @@ -2033,9 +2032,22 @@ def boot(): # 'mmu' command ("show mmu") # @cli.command('mmu') -def mmu(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +@click.option('--verbose', '-vv', is_flag=True, help="Enable verbose output") +def mmu(namespace, verbose): """Show mmu configuration""" cmd = ['mmuconfig', '-l'] + if namespace is not None: + cmd += ['-n', str(namespace)] + if verbose: + cmd += ['-vv'] run_command(cmd) # @@ -2049,10 +2061,25 @@ def buffer(): # # 'configuration' command ("show buffer command") # + + @buffer.command() -def configuration(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +@click.option('--verbose', '-vv', is_flag=True, help="Enable verbose output") +def configuration(namespace, verbose): """show buffer configuration""" cmd = ['mmuconfig', '-l'] + if namespace is not None: + cmd += ['-n', str(namespace)] + if verbose: + cmd += ['-vv'] run_command(cmd) # diff --git a/tests/mmuconfig_input/mmuconfig_test_vectors.py b/tests/mmuconfig_input/mmuconfig_test_vectors.py index c20a964516..1d72ed6725 100644 --- a/tests/mmuconfig_input/mmuconfig_test_vectors.py +++ b/tests/mmuconfig_input/mmuconfig_test_vectors.py @@ -83,30 +83,267 @@ """ +show_mmu_config_asic0 = """\ +Pool for namespace asic0: ingress_lossy_pool +---- ------- +mode dynamic +type ingress +---- ------- + +Pool for namespace asic0: ingress_lossless_pool_hbm +---- --------- +mode static +size 139458240 +type ingress +---- --------- + +Profile for namespace asic0: ingress_lossy_profile +---------- ------------------ +dynamic_th 3 +pool ingress_lossy_pool +size 0 +---------- ------------------ + +Profile for namespace asic0: ingress_lossless_profile_hbm +--------- ------------------------- +static_th 12121212 +pool ingress_lossless_pool_hbm +size 0 +--------- ------------------------- + +""" + +show_mmu_config_asic1_verbose = """\ +Pool for namespace asic1: ingress_lossless_pool +---- ------- +mode dynamic +type ingress +---- ------- + +Pool for namespace asic1: egress_lossless_pool +---- -------- +mode dynamic +size 13945824 +type egress +---- -------- + +Pool for namespace asic1: egress_lossy_pool +---- ------- +mode dynamic +type egress +---- ------- + +Total pools: 3 + + +Profile for namespace asic1: alpha_profile +------------- --------------------- +dynamic_th 0 +pool ingress_lossless_pool +headroom_type dynamic +------------- --------------------- + +Profile for namespace asic1: headroom_profile +---------- --------------------- +dynamic_th 0 +pool ingress_lossless_pool +xon 18432 +xoff 32768 +size 51200 +---------- --------------------- + +Profile for namespace asic1: egress_lossless_profile +---------- -------------------- +dynamic_th 0 +pool egress_lossless_pool +size 0 +---------- -------------------- + +Profile for namespace asic1: egress_lossy_profile +---------- ----------------- +dynamic_th 0 +pool egress_lossy_pool +size 0 +---------- ----------------- + +Total profiles: 4 +""" + +show_mmu_config_all_masic = """\ +Pool for namespace asic0: ingress_lossy_pool +---- ------- +mode dynamic +type ingress +---- ------- + +Pool for namespace asic0: ingress_lossless_pool_hbm +---- --------- +mode static +size 139458240 +type ingress +---- --------- + +Profile for namespace asic0: ingress_lossy_profile +---------- ------------------ +dynamic_th 3 +pool ingress_lossy_pool +size 0 +---------- ------------------ + +Profile for namespace asic0: ingress_lossless_profile_hbm +--------- ------------------------- +static_th 12121212 +pool ingress_lossless_pool_hbm +size 0 +--------- ------------------------- + +Pool for namespace asic1: ingress_lossless_pool +---- ------- +mode dynamic +type ingress +---- ------- + +Pool for namespace asic1: egress_lossless_pool +---- -------- +mode dynamic +size 13945824 +type egress +---- -------- + +Pool for namespace asic1: egress_lossy_pool +---- ------- +mode dynamic +type egress +---- ------- + +Profile for namespace asic1: alpha_profile +------------- --------------------- +dynamic_th 0 +pool ingress_lossless_pool +headroom_type dynamic +------------- --------------------- + +Profile for namespace asic1: headroom_profile +---------- --------------------- +dynamic_th 0 +pool ingress_lossless_pool +xon 18432 +xoff 32768 +size 51200 +---------- --------------------- + +Profile for namespace asic1: egress_lossless_profile +---------- -------------------- +dynamic_th 0 +pool egress_lossless_pool +size 0 +---------- -------------------- + +Profile for namespace asic1: egress_lossy_profile +---------- ----------------- +dynamic_th 0 +pool egress_lossy_pool +size 0 +---------- ----------------- + +""" + testData = { 'mmuconfig_list' : {'cmd' : ['show'], 'args' : [], 'rc' : 0, 'rc_output': show_mmu_config }, - 'mmu_cfg_static_th' : {'cmd' : ['config'], - 'args' : ['-p', 'ingress_lossless_profile_hbm', '-s', '12121213'], - 'rc' : 0, - 'db_table' : 'BUFFER_PROFILE', - 'cmp_args' : ['ingress_lossless_profile_hbm,static_th,12121213'], - 'rc_msg' : '' - }, + 'mmu_cfg_static_th': {'cmd': ['config'], + 'args': ['-p', 'ingress_lossless_profile_hbm', '-s', '12121213'], + 'rc': 0, + 'db_table': 'BUFFER_PROFILE', + 'cmp_args': [',ingress_lossless_profile_hbm,static_th,12121213'], + 'rc_msg': '' + }, 'mmu_cfg_alpha' : {'cmd' : ['config'], 'args' : ['-p', 'alpha_profile', '-a', '2'], 'rc' : 0, 'db_table' : 'BUFFER_PROFILE', - 'cmp_args' : ['alpha_profile,dynamic_th,2'], + 'cmp_args': [',alpha_profile,dynamic_th,2'], 'rc_msg' : '' }, - 'mmu_cfg_alpha_invalid' : {'cmd' : ['config'], - 'args' : ['-p', 'alpha_profile', '-a', '12'], - 'rc' : 2, - 'rc_msg' : 'Usage: mmu [OPTIONS]\nTry "mmu --help" for help.\n\nError: Invalid value for "-a": 12 is not in the valid range of -8 to 8.\n' - } - + 'mmu_cfg_alpha_invalid': {'cmd': ['config'], + 'args': ['-p', 'alpha_profile', '-a', '12'], + 'rc': 2, + 'rc_msg': ('Usage: mmu [OPTIONS]\nTry "mmu --help" for help.\n' + '\nError: Invalid value for "-a": 12 is not in the ' + 'valid range of -8 to 8.\n') + }, + 'mmu_cfg_list_one_masic': {'cmd': ['show'], + 'args': ['-n', 'asic0'], + 'rc': 0, + 'rc_output': show_mmu_config_asic0 + }, + 'mmu_cfg_list_one_verbose_masic': {'cmd': ['show'], + 'args': ['-n', 'asic1', '-vv'], + 'rc': 0, + 'rc_output': show_mmu_config_asic1_verbose + }, + 'mmu_cfg_list_all_masic': {'cmd': ['show'], + 'args': [], + 'rc': 0, + 'rc_output': show_mmu_config_all_masic + }, + 'mmu_cfg_alpha_one_masic': {'cmd': ['config'], + 'args': ['-p', 'alpha_profile', '-a', '2', '-n', 'asic0'], + 'rc': 0, + 'db_table': 'BUFFER_PROFILE', + 'cmp_args': ['asic0,alpha_profile,dynamic_th,2'], + 'rc_msg': '' + }, + 'mmu_cfg_alpha_all_verbose_masic': {'cmd': ['config'], + 'args': ['-p', 'alpha_profile', '-a', '2', '-vv'], + 'rc': 0, + 'db_table': 'BUFFER_PROFILE', + 'cmp_args': ['asic0,alpha_profile,dynamic_th,2', + 'asic1,alpha_profile,dynamic_th,2'], + 'rc_msg': ('Setting alpha_profile dynamic_th value ' + 'to 2 for namespace asic0\n' + 'Setting alpha_profile dynamic_th value ' + 'to 2 for namespace asic1\n') + }, + 'mmu_cfg_static_th_one_masic': {'cmd': ['config'], + 'args': ['-p', 'ingress_lossless_profile_hbm', + '-s', '12121215', '-n', 'asic0'], + 'rc': 0, + 'db_table': 'BUFFER_PROFILE', + 'cmp_args': ['asic0,ingress_lossless_profile_hbm,static_th,12121215'], + 'rc_msg': '' + }, + 'mmu_cfg_static_th_all_verbose_masic': {'cmd': ['config'], + 'args': ['-p', 'ingress_lossless_profile_hbm', + '-s', '12121214', '-vv'], + 'rc': 0, + 'db_table': 'BUFFER_PROFILE', + 'cmp_args': [('asic0,ingress_lossless_profile_hbm,' + 'static_th,12121214'), + ('asic1,ingress_lossless_profile_hbm,' + 'static_th,12121214')], + 'rc_msg': ('Setting ingress_lossless_profile_hbm static_th ' + 'value to 12121214 for namespace asic0\n' + 'Setting ingress_lossless_profile_hbm static_th ' + 'value to 12121214 for namespace asic1\n') + }, + 'mmu_cfg_alpha_invalid_masic': {'cmd': ['config'], + 'args': ['-p', 'alpha_profile', '-a', '12'], + 'rc': 2, + 'rc_msg': ('Usage: mmu [OPTIONS]\n' + 'Try "mmu --help" for help.\n\n' + 'Error: Invalid value for "-a": 12 ' + 'is not in the valid range of -8 to 8.\n') + }, + 'mmu_cfg_static_th_invalid_masic': {'cmd': ['config'], + 'args': ['-p', 'ingress_lossless_profile_hbm', '-s', '-1'], + 'rc': 2, + 'rc_msg': ('Usage: mmu [OPTIONS]\n' + 'Try "mmu --help" for help.\n\n' + 'Error: Invalid value for "-s": ' + '-1 is smaller than the minimum valid value 0.\n') + } } diff --git a/tests/mmuconfig_test.py b/tests/mmuconfig_test.py index 7218270e36..03a849eed5 100644 --- a/tests/mmuconfig_test.py +++ b/tests/mmuconfig_test.py @@ -7,7 +7,7 @@ import config.main as config import show.main as show from utilities_common.db import Db -from .mmuconfig_input.mmuconfig_test_vectors import * +from .mmuconfig_input.mmuconfig_test_vectors import testData test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -16,24 +16,12 @@ sys.path.insert(0, modules_path) -class Testmmuconfig(object): +class TestMmuConfigBase(object): @classmethod def setup_class(cls): + print('SETUP') os.environ["PATH"] += os.pathsep + scripts_path os.environ['UTILITIES_UNIT_TESTING'] = "2" - print("SETUP") - - def test_mmu_show_config(self): - self.executor(testData['mmuconfig_list']) - - def test_mmu_alpha_config(self): - self.executor(testData['mmu_cfg_alpha']) - - def test_mmu_alpha_invalid_config(self): - self.executor(testData['mmu_cfg_alpha_invalid']) - - def test_mmu_staticth_config(self): - self.executor(testData['mmu_cfg_static_th']) def executor(self, input): runner = CliRunner() @@ -48,6 +36,7 @@ def executor(self, input): result = runner.invoke(exec_cmd, input['args']) exit_code = result.exit_code output = result.output + elif 'config' in input['cmd']: exec_cmd = config.config.commands["mmu"] result = runner.invoke(exec_cmd, input['args'], catch_exceptions=False) @@ -66,8 +55,8 @@ def executor(self, input): fd = open('/tmp/mmuconfig', 'r') cmp_data = json.load(fd) for args in input['cmp_args']: - profile, name, value = args.split(',') - assert(cmp_data[profile][name] == value) + namespace, profile, name, value = args.split(',') + assert(cmp_data[namespace][profile][name] == value) fd.close() if 'rc_msg' in input: @@ -76,7 +65,6 @@ def executor(self, input): if 'rc_output' in input: assert output == input['rc_output'] - @classmethod def teardown_class(cls): os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) @@ -84,3 +72,17 @@ def teardown_class(cls): if os.path.isfile('/tmp/mmuconfig'): os.remove('/tmp/mmuconfig') print("TEARDOWN") + + +class TestMmuConfig(TestMmuConfigBase): + def test_mmu_show_config(self): + self.executor(testData['mmuconfig_list']) + + def test_mmu_alpha_config(self): + self.executor(testData['mmu_cfg_alpha']) + + def test_mmu_alpha_invalid_config(self): + self.executor(testData['mmu_cfg_alpha_invalid']) + + def test_mmu_staticth_config(self): + self.executor(testData['mmu_cfg_static_th']) diff --git a/tests/mock_tables/asic0/config_db.json b/tests/mock_tables/asic0/config_db.json index da38af13dd..593170630f 100644 --- a/tests/mock_tables/asic0/config_db.json +++ b/tests/mock_tables/asic0/config_db.json @@ -326,5 +326,24 @@ "QUEUE|Ethernet4|1": { "scheduler": "[SCHEDULAR|scheduler.0]", "wred_profile": "AZURE_LOSSLESS" + }, + "BUFFER_POOL|ingress_lossy_pool": { + "mode": "dynamic", + "type": "ingress" + }, + "BUFFER_POOL|ingress_lossless_pool_hbm": { + "mode": "static", + "size": "139458240", + "type": "ingress" + }, + "BUFFER_PROFILE|ingress_lossy_profile": { + "dynamic_th": "3", + "pool": "ingress_lossy_pool", + "size": "0" + }, + "BUFFER_PROFILE|ingress_lossless_profile_hbm": { + "static_th": "12121212", + "pool": "ingress_lossless_pool_hbm", + "size": "0" } } diff --git a/tests/mock_tables/asic1/config_db.json b/tests/mock_tables/asic1/config_db.json index 1bcd812ef2..5c1d9f344c 100644 --- a/tests/mock_tables/asic1/config_db.json +++ b/tests/mock_tables/asic1/config_db.json @@ -262,5 +262,40 @@ }, "QUEUE|Ethernet0|1": { "scheduler": "[SCHEDULAR|scheduler.0]" + }, + "BUFFER_POOL|ingress_lossless_pool": { + "mode": "dynamic", + "type": "ingress" + }, + "BUFFER_PROFILE|alpha_profile": { + "dynamic_th": "0", + "pool": "ingress_lossless_pool", + "headroom_type": "dynamic" + }, + "BUFFER_PROFILE|headroom_profile": { + "dynamic_th": "0", + "pool": "ingress_lossless_pool", + "xon": "18432", + "xoff": "32768", + "size": "51200" + }, + "BUFFER_POOL|egress_lossless_pool": { + "mode": "dynamic", + "size": "13945824", + "type": "egress" + }, + "BUFFER_PROFILE|egress_lossless_profile": { + "dynamic_th": "0", + "pool": "egress_lossless_pool", + "size": "0" + }, + "BUFFER_POOL|egress_lossy_pool": { + "mode": "dynamic", + "type": "egress" + }, + "BUFFER_PROFILE|egress_lossy_profile": { + "dynamic_th": "0", + "pool": "egress_lossy_pool", + "size": "0" } } diff --git a/tests/multi_asic_mmuconfig_test.py b/tests/multi_asic_mmuconfig_test.py new file mode 100644 index 0000000000..1590d3f38f --- /dev/null +++ b/tests/multi_asic_mmuconfig_test.py @@ -0,0 +1,49 @@ +import os +import sys +from .mmuconfig_test import TestMmuConfigBase +from .mmuconfig_input.mmuconfig_test_vectors import testData + +root_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(root_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, root_path) +sys.path.insert(0, modules_path) + + +class TestMmuConfigMultiAsic(TestMmuConfigBase): + @classmethod + def setup_class(cls): + super().setup_class() + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + + def test_mmu_show_config_one_masic(self): + self.executor(testData['mmu_cfg_list_one_masic']) + + def test_mmu_show_config_one_verbose_masic(self): + self.executor(testData['mmu_cfg_list_one_verbose_masic']) + + def test_mmu_show_config_all_masic(self): + self.executor(testData['mmu_cfg_list_all_masic']) + + def test_mmu_alpha_config_one_masic(self): + self.executor(testData['mmu_cfg_alpha_one_masic']) + + def test_mmu_alpha_config_all_verbose_masic(self): + self.executor(testData['mmu_cfg_alpha_all_verbose_masic']) + + def test_mmu_staticth_config_one_masic(self): + self.executor(testData['mmu_cfg_static_th_one_masic']) + + def test_mmu_staticth_config_all_verbose_masic(self): + self.executor(testData['mmu_cfg_static_th_all_verbose_masic']) + + def test_mmu_alpha_config_invalid_masic(self): + self.executor(testData['mmu_cfg_alpha_invalid_masic']) + + def test_mmu_staticth_config_invalid_masic(self): + self.executor(testData['mmu_cfg_static_th_invalid_masic']) + + @classmethod + def teardown_class(cls): + super().teardown_class() + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" From 785d7bd5a1d20d427deedbf290eed4f7cdfcf3be Mon Sep 17 00:00:00 2001 From: HP Date: Thu, 5 Sep 2024 13:56:01 -0700 Subject: [PATCH 42/67] Fix multi-asic support to PFC config/show (#3521) * Add multi-asic support to pfc - Add namespace arg for show and config cmds for pfc - Replace test DB with JSON to support verification of multiple namespaces in unit test - Add unit tests for multi-asic behaviour - Added a test vector file for better test organization * Fix linter errors --- config/main.py | 18 +- pfc/main.py | 132 +++++++------ show/main.py | 10 +- tests/multi_asic_pfc_test.py | 133 +++++++++++++ tests/pfc_input/pfc_test_vectors.py | 286 ++++++++++++++++++++++++++++ tests/pfc_test.py | 48 +++-- 6 files changed, 554 insertions(+), 73 deletions(-) create mode 100644 tests/multi_asic_pfc_test.py create mode 100644 tests/pfc_input/pfc_test_vectors.py diff --git a/config/main.py b/config/main.py index 80d64b98eb..f4ea93e53f 100644 --- a/config/main.py +++ b/config/main.py @@ -6511,8 +6511,9 @@ def pfc(ctx): @pfc.command() @click.argument('interface_name', metavar='', required=True) @click.argument('status', type=click.Choice(['on', 'off'])) +@multi_asic_util.multi_asic_click_option_namespace @click.pass_context -def asymmetric(ctx, interface_name, status): +def asymmetric(ctx, interface_name, status, namespace): """Set asymmetric PFC configuration.""" # Get the config_db connector config_db = ctx.obj['config_db'] @@ -6522,7 +6523,11 @@ def asymmetric(ctx, interface_name, status): if interface_name is None: ctx.fail("'interface_name' is None!") - clicommon.run_command(['pfc', 'config', 'asymmetric', str(status), str(interface_name)]) + cmd = ['pfc', 'config', 'asymmetric', str(status), str(interface_name)] + if namespace is not None: + cmd += ['-n', str(namespace)] + + clicommon.run_command(cmd) # # 'pfc priority' command ('config interface pfc priority ...') @@ -6532,8 +6537,9 @@ def asymmetric(ctx, interface_name, status): @click.argument('interface_name', metavar='', required=True) @click.argument('priority', type=click.Choice([str(x) for x in range(8)])) @click.argument('status', type=click.Choice(['on', 'off'])) +@multi_asic_util.multi_asic_click_option_namespace @click.pass_context -def priority(ctx, interface_name, priority, status): +def priority(ctx, interface_name, priority, status, namespace): """Set PFC priority configuration.""" # Get the config_db connector config_db = ctx.obj['config_db'] @@ -6543,7 +6549,11 @@ def priority(ctx, interface_name, priority, status): if interface_name is None: ctx.fail("'interface_name' is None!") - clicommon.run_command(['pfc', 'config', 'priority', str(status), str(interface_name), str(priority)]) + cmd = ['pfc', 'config', 'priority', str(status), str(interface_name), str(priority)] + if namespace is not None: + cmd += ['-n', str(namespace)] + + clicommon.run_command(cmd) # # 'buffer' group ('config buffer ...') diff --git a/pfc/main.py b/pfc/main.py index f0b376e242..071b4a304e 100644 --- a/pfc/main.py +++ b/pfc/main.py @@ -1,39 +1,63 @@ #!/usr/bin/env python3 +import os import click -from swsscommon.swsscommon import ConfigDBConnector +import json +from sonic_py_common import multi_asic from tabulate import tabulate from natsort import natsorted +from utilities_common import multi_asic as multi_asic_util +# Constants ALL_PRIORITIES = [str(x) for x in range(8)] PRIORITY_STATUS = ['on', 'off'] +PORT_TABLE_NAME = "PORT" +PORT_QOS_MAP_TABLE_NAME = "PORT_QOS_MAP" class Pfc(object): - def __init__(self, cfgdb=None): - self.cfgdb = cfgdb + def __init__(self, namespace=None): + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.config_db = None + # For unit testing + self.updated_port_tables = {} + self.test_filename = '/tmp/pfc_testdata.json' + + def dump_config_to_json(self, table_name, namespace): + """ + This function dumps the current config in a JSON file for unit testing. + """ + # Only dump files in unit testing mode + if os.environ["UTILITIES_UNIT_TESTING"] != "2": + return + + if namespace not in self.updated_port_tables.keys(): + self.updated_port_tables[namespace] = {} + + self.updated_port_tables[namespace][table_name] = self.config_db.get_table(table_name) + with open(self.test_filename, "w") as fd: + json.dump(self.updated_port_tables, fd) + + @multi_asic_util.run_on_multi_asic def configPfcAsym(self, interface, pfc_asym): """ PFC handler to configure asymmetric PFC. """ - configdb = ConfigDBConnector() if self.cfgdb is None else self.cfgdb - configdb.connect() - - configdb.mod_entry("PORT", interface, {'pfc_asym': pfc_asym}) + self.config_db.mod_entry(PORT_TABLE_NAME, interface, {'pfc_asym': pfc_asym}) + self.dump_config_to_json(PORT_TABLE_NAME, self.multi_asic.current_namespace) + @multi_asic_util.run_on_multi_asic def showPfcAsym(self, interface): """ PFC handler to display asymmetric PFC information. """ + namespace_str = f"Namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' header = ('Interface', 'Asymmetric') - configdb = ConfigDBConnector() if self.cfgdb is None else self.cfgdb - configdb.connect() - if interface: - db_keys = configdb.keys(configdb.CONFIG_DB, 'PORT|{0}'.format(interface)) + db_keys = self.config_db.keys(self.config_db.CONFIG_DB, 'PORT|{0}'.format(interface)) else: - db_keys = configdb.keys(configdb.CONFIG_DB, 'PORT|*') + db_keys = self.config_db.keys(self.config_db.CONFIG_DB, 'PORT|*') table = [] @@ -43,36 +67,35 @@ def showPfcAsym(self, interface): key = i.split('|')[-1] if key and key.startswith('Ethernet'): - entry = configdb.get_entry('PORT', key) + entry = self.config_db.get_entry(PORT_TABLE_NAME, key) table.append([key, entry.get('pfc_asym', 'N/A')]) sorted_table = natsorted(table) - click.echo() + click.echo(namespace_str) click.echo(tabulate(sorted_table, headers=header, tablefmt="simple", missingval="")) click.echo() + @multi_asic_util.run_on_multi_asic def configPfcPrio(self, status, interface, priority): - configdb = ConfigDBConnector() if self.cfgdb is None else self.cfgdb - configdb.connect() - - if interface not in configdb.get_keys('PORT_QOS_MAP'): + if interface not in self.config_db.get_keys(PORT_QOS_MAP_TABLE_NAME): click.echo('Cannot find interface {0}'.format(interface)) return """Current lossless priorities on the interface""" - entry = configdb.get_entry('PORT_QOS_MAP', interface) + entry = self.config_db.get_entry('PORT_QOS_MAP', interface) enable_prio = entry.get('pfc_enable').split(',') """Avoid '' in enable_prio""" enable_prio = [x.strip() for x in enable_prio if x.strip()] + namespace_str = f" for namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' if status == 'on' and priority in enable_prio: - click.echo('Priority {0} has already been enabled on {1}'.format(priority, interface)) + click.echo('Priority {0} has already been enabled on {1}{2}'.format(priority, interface, namespace_str)) return if status == 'off' and priority not in enable_prio: - click.echo('Priority {0} is not enabled on {1}'.format(priority, interface)) + click.echo('Priority {0} is not enabled on {1}{2}'.format(priority, interface, namespace_str)) return if status == 'on': @@ -82,11 +105,10 @@ def configPfcPrio(self, status, interface, priority): enable_prio.remove(priority) enable_prio.sort() - configdb.mod_entry("PORT_QOS_MAP", interface, {'pfc_enable': ','.join(enable_prio)}) + self.config_db.mod_entry(PORT_QOS_MAP_TABLE_NAME, interface, {'pfc_enable': ','.join(enable_prio)}) + self.dump_config_to_json(PORT_QOS_MAP_TABLE_NAME, self.multi_asic.current_namespace) - """Show the latest PFC configuration""" - self.showPfcPrio(interface) - + @multi_asic_util.run_on_multi_asic def showPfcPrio(self, interface): """ PFC handler to display PFC enabled priority information. @@ -94,80 +116,82 @@ def showPfcPrio(self, interface): header = ('Interface', 'Lossless priorities') table = [] - configdb = ConfigDBConnector() if self.cfgdb is None else self.cfgdb - configdb.connect() - """Get all the interfaces with QoS map information""" - intfs = configdb.get_keys('PORT_QOS_MAP') + intfs = self.config_db.get_keys('PORT_QOS_MAP') """The user specifies an interface but we cannot find it""" + namespace_str = f"Namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' if interface and interface not in intfs: - click.echo('Cannot find interface {0}'.format(interface)) + if multi_asic.is_multi_asic(): + click.echo('Cannot find interface {0} for {1}'.format(interface, namespace_str)) + else: + click.echo('Cannot find interface {0}'.format(interface)) return if interface: intfs = [interface] for intf in intfs: - entry = configdb.get_entry('PORT_QOS_MAP', intf) + entry = self.config_db.get_entry('PORT_QOS_MAP', intf) table.append([intf, entry.get('pfc_enable', 'N/A')]) sorted_table = natsorted(table) - click.echo() + click.echo(namespace_str) click.echo(tabulate(sorted_table, headers=header, tablefmt="simple", missingval="")) click.echo() - + + @click.group() -@click.pass_context -def cli(ctx): +def cli(): """PFC Command Line""" - # Use the cfgdb object if given as input. - cfgdb = None if ctx.obj is None else ctx.obj.cfgdb - ctx.obj = {'pfc': Pfc(cfgdb)} @cli.group() -@click.pass_context -def config(ctx): +def config(): """Config PFC""" pass + @cli.group() -@click.pass_context -def show(ctx): +def show(): """Show PFC information""" pass + @click.command() @click.argument('status', type=click.Choice(PRIORITY_STATUS)) @click.argument('interface', type=click.STRING) -@click.pass_context -def configAsym(ctx, status, interface): +@multi_asic_util.multi_asic_click_option_namespace +def configAsym(status, interface, namespace): """Configure asymmetric PFC on a given port.""" - ctx.obj['pfc'].configPfcAsym(interface, status) + Pfc(namespace).configPfcAsym(interface, status) + @click.command() @click.argument('status', type=click.Choice(PRIORITY_STATUS)) @click.argument('interface', type=click.STRING) @click.argument('priority', type=click.Choice(ALL_PRIORITIES)) -@click.pass_context -def configPrio(ctx, status, interface, priority): +@multi_asic_util.multi_asic_click_option_namespace +def configPrio(status, interface, priority, namespace): """Configure PFC on a given priority.""" - ctx.obj['pfc'].configPfcPrio(status, interface, priority) + Pfc(namespace).configPfcPrio(status, interface, priority) + @click.command() @click.argument('interface', type=click.STRING, required=False) -@click.pass_context -def showAsym(ctx, interface): +@multi_asic_util.multi_asic_click_option_namespace +def showAsym(interface, namespace): """Show asymmetric PFC information""" - ctx.obj['pfc'].showPfcAsym(interface) + Pfc(namespace).showPfcAsym(interface) + @click.command() @click.argument('interface', type=click.STRING, required=False) -@click.pass_context -def showPrio(ctx, interface): +@multi_asic_util.multi_asic_click_option_namespace +def showPrio(interface, namespace): """Show PFC priority information""" - ctx.obj['pfc'].showPfcPrio(interface) + Pfc(namespace).showPfcPrio(interface) + config.add_command(configAsym, "asymmetric") config.add_command(configPrio, "priority") diff --git a/show/main.py b/show/main.py index 1275c9e28a..bf8e491798 100755 --- a/show/main.py +++ b/show/main.py @@ -647,7 +647,8 @@ def counters(namespace, display, verbose): @pfc.command() @click.argument('interface', type=click.STRING, required=False) -def priority(interface): +@multi_asic_util.multi_asic_click_option_namespace +def priority(interface, namespace): """Show pfc priority""" cmd = ['pfc', 'show', 'priority'] if interface is not None and clicommon.get_interface_naming_mode() == "alias": @@ -655,12 +656,15 @@ def priority(interface): if interface is not None: cmd += [str(interface)] + if namespace is not None: + cmd += ['-n', str(namespace)] run_command(cmd) @pfc.command() @click.argument('interface', type=click.STRING, required=False) -def asymmetric(interface): +@multi_asic_util.multi_asic_click_option_namespace +def asymmetric(interface, namespace): """Show asymmetric pfc""" cmd = ['pfc', 'show', 'asymmetric'] if interface is not None and clicommon.get_interface_naming_mode() == "alias": @@ -668,6 +672,8 @@ def asymmetric(interface): if interface is not None: cmd += [str(interface)] + if namespace is not None: + cmd += ['-n', str(namespace)] run_command(cmd) diff --git a/tests/multi_asic_pfc_test.py b/tests/multi_asic_pfc_test.py new file mode 100644 index 0000000000..52bfcf4982 --- /dev/null +++ b/tests/multi_asic_pfc_test.py @@ -0,0 +1,133 @@ +import os +import sys +import json +import importlib +import pfc.main as pfc +from .pfc_test import TestPfcBase +from click.testing import CliRunner +from .pfc_input.pfc_test_vectors import testData + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "pfc") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + + +class TestPfcMultiAsic(TestPfcBase): + @classmethod + def setup_class(cls): + super().setup_class() + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + + # Multi-asic utils rely on the database that is loaded + # We reload the multi_asic database and update the multi-asic utils + # Pfc uses click cmds that use multi_asic utils, hence we reload pfc too + + import mock_tables.mock_multi_asic + importlib.reload(mock_tables.mock_multi_asic) + mock_tables.dbconnector.load_namespace_config() + + import utilities_common + importlib.reload(utilities_common.multi_asic) + importlib.reload(pfc) + + def executor(self, input): + runner = CliRunner() + result = runner.invoke(pfc.cli, input['cmd']) + exit_code = result.exit_code + output = result.output + + print(exit_code) + print(output) + + assert exit_code == input['rc'] + + # For config commands we dump modified value in a tmp JSON file for testing + if 'cmp_args' in input: + fd = open('/tmp/pfc_testdata.json', 'r') + cmp_data = json.load(fd) + + # Verify assignments + for args in input['cmp_args']: + namespace, table, key, field, expected_val = args + assert(cmp_data[namespace][table][key][field] == expected_val) + fd.close() + + if 'rc_msg' in input: + assert input['rc_msg'] in output + + if 'rc_output' in input: + assert output == input['rc_output'] + + def test_pfc_show_asymmetric_all_asic0_masic(self): + self.executor(testData['pfc_show_asymmetric_all_asic0_masic']) + + def test_pfc_show_asymmetric_all_asic1_masic(self): + self.executor(testData['pfc_show_asymmetric_all_asic1_masic']) + + def test_pfc_show_asymmetric_all_masic(self): + self.executor(testData['pfc_show_asymmetric_all_masic']) + + def test_pfc_show_asymmetric_intf_one_masic(self): + self.executor(testData['pfc_show_asymmetric_intf_one_masic']) + + def test_pfc_show_asymmetric_intf_all_masic(self): + self.executor(testData['pfc_show_asymmetric_intf_all_masic']) + + def test_pfc_show_asymmetric_intf_fake_one_masic(self): + self.executor(testData['pfc_show_asymmetric_intf_fake_one_masic']) + + def test_pfc_show_priority_all_asic0_masic(self): + self.executor(testData['pfc_show_priority_all_asic0_masic']) + + def test_pfc_show_priority_all_asic1_masic(self): + self.executor(testData['pfc_show_priority_all_asic1_masic']) + + def test_pfc_show_priority_all_masic(self): + self.executor(testData['pfc_show_priority_all_masic']) + + def test_pfc_show_priority_intf_one_masic(self): + self.executor(testData['pfc_show_priority_intf_one_masic']) + + def test_pfc_show_priority_intf_all_masic(self): + self.executor(testData['pfc_show_priority_intf_all_masic']) + + def test_pfc_show_priority_intf_fake_one_masic(self): + self.executor(testData['pfc_show_priority_intf_fake_one_masic']) + + def test_pfc_show_priority_intf_fake_all_masic(self): + self.executor(testData['pfc_show_priority_intf_fake_all_masic']) + + def test_pfc_config_asymmetric_one_masic(self): + self.executor(testData['pfc_config_asymmetric_one_masic']) + + def test_pfc_config_asymmetric_invalid_one_masic(self): + self.executor(testData['pfc_config_asymmetric_invalid_one_masic']) + + def test_pfc_config_asymmetric_all_masic(self): + self.executor(testData['pfc_config_asymmetric_all_masic']) + + def test_pfc_config_asymmetric_invalid_all_masic(self): + self.executor(testData['pfc_config_asymmetric_invalid_all_masic']) + + def test_pfc_config_priority_one_masic(self): + self.executor(testData['pfc_config_priority_one_masic']) + + def test_pfc_config_priority_invalid_one_masic(self): + self.executor(testData['pfc_config_priority_invalid_one_masic']) + + def test_pfc_config_priority_all_masic(self): + self.executor(testData['pfc_config_priority_all_masic']) + + def test_pfc_config_priority_invalid_all_masic(self): + self.executor(testData['pfc_config_priority_invalid_all_masic']) + + @classmethod + def teardown_class(cls): + # Reset the database to mock single-asic state + import mock_tables.mock_single_asic + mock_tables.dbconnector.load_database_config() + + super().teardown_class() + os.environ.pop("UTILITIES_UNIT_TESTING_TOPOLOGY") diff --git a/tests/pfc_input/pfc_test_vectors.py b/tests/pfc_input/pfc_test_vectors.py new file mode 100644 index 0000000000..20d6b59af3 --- /dev/null +++ b/tests/pfc_input/pfc_test_vectors.py @@ -0,0 +1,286 @@ +# Golden outputs +show_asym_all_asic0_masic = """\ +Namespace asic0 +Interface Asymmetric +------------ ------------ +Ethernet0 off +Ethernet4 off +Ethernet16 off +Ethernet-BP0 off +Ethernet-BP4 off + +""" + +show_asym_all_asic1_masic = """\ +Namespace asic1 +Interface Asymmetric +-------------- ------------ +Ethernet64 off +Ethernet-BP256 off +Ethernet-BP260 off + +""" + +show_asym_all_masic = """\ +Namespace asic0 +Interface Asymmetric +------------ ------------ +Ethernet0 off +Ethernet4 off +Ethernet16 off +Ethernet-BP0 off +Ethernet-BP4 off + +Namespace asic1 +Interface Asymmetric +-------------- ------------ +Ethernet64 off +Ethernet-BP256 off +Ethernet-BP260 off + +""" + +show_asym_intf_one_masic = """\ +Namespace asic0 +Interface Asymmetric +----------- ------------ +Ethernet0 off + +""" + +show_asym_intf_all_masic = """\ +Namespace asic0 +Interface Asymmetric +----------- ------------ +Ethernet0 off + +Namespace asic1 +Interface Asymmetric +----------- ------------ + +""" + +show_asym_intf_fake_one_masic = """\ +Namespace asic0 +Interface Asymmetric +----------- ------------ + +""" + +show_prio_all_asic0_masic = """\ +Namespace asic0 +Interface Lossless priorities +-------------- --------------------- +Ethernet0 3,4 +Ethernet4 3,4 +Ethernet8 3,4 +Ethernet-BP0 3,4 +Ethernet-BP4 3,4 +Ethernet-BP256 3,4 +Ethernet-BP260 3,4 + +""" + +show_prio_all_asic1_masic = """\ +Namespace asic1 +Interface Lossless priorities +-------------- --------------------- +Ethernet0 3,4 +Ethernet4 3,4 +Ethernet8 3,4 +Ethernet-BP0 3,4 +Ethernet-BP4 3,4 +Ethernet-BP256 3,4 + +""" + +show_prio_all_masic = """\ +Namespace asic0 +Interface Lossless priorities +-------------- --------------------- +Ethernet0 3,4 +Ethernet4 3,4 +Ethernet8 3,4 +Ethernet-BP0 3,4 +Ethernet-BP4 3,4 +Ethernet-BP256 3,4 +Ethernet-BP260 3,4 + +Namespace asic1 +Interface Lossless priorities +-------------- --------------------- +Ethernet0 3,4 +Ethernet4 3,4 +Ethernet8 3,4 +Ethernet-BP0 3,4 +Ethernet-BP4 3,4 +Ethernet-BP256 3,4 + +""" + +show_prio_intf_one_masic = """\ +Namespace asic0 +Interface Lossless priorities +----------- --------------------- +Ethernet0 3,4 + +""" + +show_prio_intf_all_masic = """\ +Namespace asic0 +Interface Lossless priorities +----------- --------------------- +Ethernet0 3,4 + +Namespace asic1 +Interface Lossless priorities +----------- --------------------- +Ethernet0 3,4 + +""" + +show_prio_intf_fake_one_masic = """\ +Cannot find interface Ethernet1234 for Namespace asic0 +""" + +show_prio_intf_fake_all_masic = """\ +Cannot find interface Ethernet1234 for Namespace asic0 +Cannot find interface Ethernet1234 for Namespace asic1 +""" + +testData = { + 'pfc_show_asymmetric_all_asic0_masic': {'cmd': ['show', 'asymmetric', + '--namespace', 'asic0'], + 'rc': 0, + 'rc_output': show_asym_all_asic0_masic + }, + 'pfc_show_asymmetric_all_asic1_masic': {'cmd': ['show', 'asymmetric', + '--namespace', 'asic1'], + 'rc': 0, + 'rc_output': show_asym_all_asic1_masic + }, + 'pfc_show_asymmetric_all_masic': {'cmd': ['show', 'asymmetric'], + 'rc': 0, + 'rc_output': show_asym_all_masic + }, + 'pfc_show_asymmetric_intf_one_masic': {'cmd': ['show', 'asymmetric', + 'Ethernet0', '--namespace', + 'asic0'], + 'rc': 0, + 'rc_output': show_asym_intf_one_masic + }, + 'pfc_show_asymmetric_intf_all_masic': {'cmd': ['show', 'asymmetric', + 'Ethernet0'], + 'rc': 0, + 'rc_output': show_asym_intf_all_masic + }, + 'pfc_show_asymmetric_intf_fake_one_masic': {'cmd': ['show', 'asymmetric', + 'Ethernet1234', '--namespace', + 'asic0'], + 'rc': 0, + 'rc_output': show_asym_intf_fake_one_masic + }, + 'pfc_show_priority_all_asic0_masic': {'cmd': ['show', 'priority', + '--namespace', 'asic0'], + 'rc': 0, + 'rc_output': show_prio_all_asic0_masic + }, + 'pfc_show_priority_all_asic1_masic': {'cmd': ['show', 'priority', + '--namespace', 'asic1'], + 'rc': 0, + 'rc_output': show_prio_all_asic1_masic + }, + 'pfc_show_priority_all_masic': {'cmd': ['show', 'priority'], + 'rc': 0, + 'rc_output': show_prio_all_masic + }, + 'pfc_show_priority_intf_one_masic': {'cmd': ['show', 'priority', + 'Ethernet0', '--namespace', + 'asic0'], + 'rc': 0, + 'rc_output': show_prio_intf_one_masic + }, + 'pfc_show_priority_intf_all_masic': {'cmd': ['show', 'priority', + 'Ethernet0'], + 'rc': 0, + 'rc_output': show_prio_intf_all_masic + }, + 'pfc_show_priority_intf_fake_one_masic': {'cmd': ['show', 'priority', + 'Ethernet1234', '--namespace', + 'asic0'], + 'rc': 0, + 'rc_output': show_prio_intf_fake_one_masic + }, + 'pfc_show_priority_intf_fake_all_masic': {'cmd': ['show', 'priority', + 'Ethernet1234'], + 'rc': 0, + 'rc_output': show_prio_intf_fake_all_masic + }, + 'pfc_config_asymmetric_one_masic': {'cmd': ['config', 'asymmetric', + 'on', 'Ethernet0', '--namespace', + 'asic0'], + 'rc': 0, + 'cmp_args': [['asic0', 'PORT', 'Ethernet0', 'pfc_asym', 'on']] + }, + 'pfc_config_asymmetric_invalid_one_masic': {'cmd': ['config', 'asymmetric', + 'onn', 'Ethernet0', '--namespace', + 'asic0'], + 'rc': 2, + 'rc_msg': ('Usage: cli config asymmetric [OPTIONS] ' + '[on|off] INTERFACE\nTry "cli config ' + 'asymmetric --help" for help.\n\n' + 'Error: Invalid value for "[on|off]": ' + 'invalid choice: onn. (choose from on, off)') + }, + 'pfc_config_asymmetric_all_masic': {'cmd': ['config', 'asymmetric', + 'on', 'Ethernet0'], + 'rc': 0, + 'cmp_args': [['asic0', 'PORT', 'Ethernet0', 'pfc_asym', 'on'], + ['asic1', 'PORT', 'Ethernet0', 'pfc_asym', 'on']] + }, + 'pfc_config_asymmetric_invalid_all_masic': {'cmd': ['config', 'asymmetric', + 'onn', 'Ethernet0'], + 'rc': 2, + 'rc_msg': ('Usage: cli config asymmetric [OPTIONS] ' + '[on|off] INTERFACE\nTry "cli config ' + 'asymmetric --help" for help.\n\n' + 'Error: Invalid value for "[on|off]": ' + 'invalid choice: onn. (choose from on, off)') + }, + 'pfc_config_priority_one_masic': {'cmd': ['config', 'priority', + 'on', 'Ethernet0', '5', + '--namespace', 'asic0'], + 'rc': 0, + 'cmp_args': [['asic0', 'PORT_QOS_MAP', 'Ethernet0', + 'pfc_enable', '3,4,5']] + }, + 'pfc_config_priority_invalid_one_masic': {'cmd': ['config', 'priority', + 'onn', 'Ethernet0', '5', + '--namespace', 'asic0'], + 'rc': 2, + 'rc_msg': ('Usage: cli config priority [OPTIONS] ' + '[on|off] INTERFACE [0|1|2|3|4|5|6|7]\n' + 'Try "cli config priority --help" for ' + 'help.\n\nError: Invalid value for ' + '"[on|off]": invalid choice: onn. ' + '(choose from on, off)') + }, + 'pfc_config_priority_all_masic': {'cmd': ['config', 'priority', + 'on', 'Ethernet0', '5'], + 'rc': 0, + 'cmp_args': [['asic0', 'PORT_QOS_MAP', 'Ethernet0', + 'pfc_enable', '3,4,5'], + ['asic1', 'PORT_QOS_MAP', 'Ethernet0', + 'pfc_enable', '3,4,5']] + }, + 'pfc_config_priority_invalid_all_masic': {'cmd': ['config', 'priority', + 'onn', 'Ethernet0', '5'], + 'rc': 2, + 'rc_msg': ('Usage: cli config priority [OPTIONS] ' + '[on|off] INTERFACE [0|1|2|3|4|5|6|7]\n' + 'Try "cli config priority --help" for ' + 'help.\n\nError: Invalid value for ' + '"[on|off]": invalid choice: onn. ' + '(choose from on, off)') + }, +} diff --git a/tests/pfc_test.py b/tests/pfc_test.py index 101aa476cc..136dab2623 100644 --- a/tests/pfc_test.py +++ b/tests/pfc_test.py @@ -1,10 +1,10 @@ import os import sys +import json import pfc.main as pfc from .pfc_input.assert_show_output import pfc_cannot_find_intf, pfc_show_asymmetric_all, \ pfc_show_asymmetric_intf, pfc_show_priority_all, pfc_show_priority_intf, \ pfc_config_priority_on, pfc_asym_cannot_find_intf -from utilities_common.db import Db from click.testing import CliRunner from importlib import reload @@ -17,11 +17,15 @@ class TestPfcBase(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" - def executor(self, cliobj, command, expected_rc=0, expected_output=None, expected_cfgdb_entry=None, + def executor(self, cliobj, command, expected_rc=0, expected_output=None, expected_cfgdb_entries=None, runner=CliRunner()): - db = Db() - result = runner.invoke(cliobj, command, obj=db) + result = runner.invoke(cliobj, command) print(result.exit_code) print(result.output) @@ -32,21 +36,37 @@ def executor(self, cliobj, command, expected_rc=0, expected_output=None, expecte if expected_output: assert result.output == expected_output - if expected_cfgdb_entry: - (table, key, field, expected_val) = expected_cfgdb_entry - configdb = db.cfgdb - entry = configdb.get_entry(table, key) - assert entry.get(field) == expected_val + if expected_cfgdb_entries: + fd = open('/tmp/pfc_testdata.json', 'r') + cmp_data = json.load(fd) + for expected_cfgdb_entry in expected_cfgdb_entries: + (namespace, table, key, field, expected_val) = expected_cfgdb_entry + entry = cmp_data[namespace][table][key][field] + assert entry == expected_val + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join( + os.environ["PATH"].split(os.pathsep)[:-1] + ) + os.environ.pop("UTILITIES_UNIT_TESTING") + if os.path.isfile('/tmp/pfc_testdata.json'): + os.remove('/tmp/pfc_testdata.json') class TestPfc(TestPfcBase): - @classmethod def setup_class(cls): + super().setup_class() + from mock_tables import dbconnector from mock_tables import mock_single_asic reload(mock_single_asic) - dbconnector.load_namespace_config() + dbconnector.load_database_config() + + import utilities_common + reload(utilities_common.multi_asic) + reload(pfc) def test_pfc_show_asymmetric_all(self): self.executor(pfc.cli, ['show', 'asymmetric'], @@ -74,8 +94,10 @@ def test_pfc_show_priority_intf_fake(self): def test_pfc_config_asymmetric(self): self.executor(pfc.cli, ['config', 'asymmetric', 'on', 'Ethernet0'], - expected_cfgdb_entry=('PORT', 'Ethernet0', 'pfc_asym', 'on')) + # namespace, table, key, field, expected_val + expected_cfgdb_entries=[('', 'PORT', 'Ethernet0', 'pfc_asym', 'on')]) def test_pfc_config_priority(self): self.executor(pfc.cli, ['config', 'priority', 'on', 'Ethernet0', '5'], - expected_output=pfc_config_priority_on) + # namespace, table, key, field, expected_val + expected_cfgdb_entries=[('', 'PORT_QOS_MAP', 'Ethernet0', 'pfc_enable', '3,4,5')]) From b82115c941869f13999c8f7abe93f32711a9472a Mon Sep 17 00:00:00 2001 From: Feng-msft Date: Fri, 6 Sep 2024 10:19:01 +0800 Subject: [PATCH 43/67] Add show CLI for bmp related dataset. (#3289) * Add CLI for bmp state_db dataset show. * Add CLI for bmp state_db dataset show. * Update db instance * Update db instance * update test --- show/main.py | 133 ++++++++++++++ tests/mock_tables/asic0/database_config.json | 10 ++ tests/mock_tables/asic1/database_config.json | 10 ++ tests/mock_tables/asic2/database_config.json | 10 ++ tests/mock_tables/database_config.json | 10 ++ tests/show_bmp_test.py | 178 +++++++++++++++++++ 6 files changed, 351 insertions(+) create mode 100644 tests/show_bmp_test.py diff --git a/show/main.py b/show/main.py index bf8e491798..ac9557a6ef 100755 --- a/show/main.py +++ b/show/main.py @@ -72,6 +72,7 @@ PLATFORM_JSON = 'platform.json' HWSKU_JSON = 'hwsku.json' PORT_STR = "Ethernet" +BMP_STATE_DB = 'BMP_STATE_DB' VLAN_SUB_INTERFACE_SEPARATOR = '.' @@ -2128,6 +2129,138 @@ def ztp(status, verbose): run_command(cmd, display_cmd=verbose) +# +# 'bmp' group ("show bmp ...") +# +@cli.group(cls=clicommon.AliasedGroup) +def bmp(): + """Show details of the bmp dataset""" + pass + + +# 'bgp-neighbor-table' subcommand ("show bmp bgp-neighbor-table") +@bmp.command('bgp-neighbor-table') +@clicommon.pass_db +def bmp_neighbor_table(db): + """Show bmp bgp-neighbor-table information""" + bmp_headers = ["Neighbor_Address", "Peer_Address", "Peer_ASN", "Peer_RD", "Peer_Port", + "Local_Address", "Local_ASN", "Local_Port", "Advertised_Capabilities", "Received_Capabilities"] + + # BGP_NEIGHBOR_TABLE|10.0.1.2 + bmp_keys = db.db.keys(BMP_STATE_DB, "BGP_NEIGHBOR_TABLE|*") + + click.echo("Total number of bmp neighbors: {}".format(0 if bmp_keys is None else len(bmp_keys))) + + bmp_body = [] + if bmp_keys is not None: + for key in bmp_keys: + values = db.db.get_all(BMP_STATE_DB, key) + bmp_body.append([ + values["peer_addr"], # Neighbor_Address + values["peer_addr"], + values["peer_asn"], + values["peer_rd"], + values["peer_port"], + values["local_addr"], + values["local_asn"], + values["local_port"], + values["sent_cap"], + values["recv_cap"] + ]) + + click.echo(tabulate(bmp_body, bmp_headers)) + + +# 'bmp-rib-out-table' subcommand ("show bmp bgp-rib-out-table") +@bmp.command('bgp-rib-out-table') +@clicommon.pass_db +def bmp_rib_out_table(db): + """Show bmp bgp-rib-out-table information""" + bmp_headers = ["Neighbor_Address", "NLRI", "Origin", "AS_Path", "Origin_AS", "Next_Hop", "Local_Pref", + "Originator_ID", "Community_List", "Ext_Community_List"] + + # BGP_RIB_OUT_TABLE|192.181.168.0/25|10.0.0.59 + bmp_keys = db.db.keys(BMP_STATE_DB, "BGP_RIB_OUT_TABLE|*") + delimiter = db.db.get_db_separator(BMP_STATE_DB) + + click.echo("Total number of bmp bgp-rib-out-table: {}".format(0 if bmp_keys is None else len(bmp_keys))) + + bmp_body = [] + if bmp_keys is not None: + for key in bmp_keys: + key_values = key.split(delimiter) + if len(key_values) < 3: + continue + values = db.db.get_all(BMP_STATE_DB, key) + bmp_body.append([ + key_values[2], # Neighbor_Address + key_values[1], # NLRI + values["origin"], + values["as_path"], + values["origin_as"], + values["next_hop"], + values["local_pref"], + values["originator_id"], + values["community_list"], + values["ext_community_list"] + ]) + + click.echo(tabulate(bmp_body, bmp_headers)) + + +# 'bgp-rib-in-table' subcommand ("show bmp bgp-rib-in-table") +@bmp.command('bgp-rib-in-table') +@clicommon.pass_db +def bmp_rib_in_table(db): + """Show bmp bgp-rib-in-table information""" + bmp_headers = ["Neighbor_Address", "NLRI", "Origin", "AS_Path", "Origin_AS", "Next_Hop", "Local_Pref", + "Originator_ID", "Community_List", "Ext_Community_List"] + + # BGP_RIB_IN_TABLE|20c0:ef50::/64|10.0.0.57 + bmp_keys = db.db.keys(BMP_STATE_DB, "BGP_RIB_IN_TABLE|*") + delimiter = db.db.get_db_separator(BMP_STATE_DB) + + click.echo("Total number of bmp bgp-rib-in-table: {}".format(0 if bmp_keys is None else len(bmp_keys))) + + bmp_body = [] + if bmp_keys is not None: + for key in bmp_keys: + key_values = key.split(delimiter) + if len(key_values) < 3: + continue + values = db.db.get_all(BMP_STATE_DB, key) + bmp_body.append([ + key_values[2], # Neighbor_Address + key_values[1], # NLRI + values["origin"], + values["as_path"], + values["origin_as"], + values["next_hop"], + values["local_pref"], + values["originator_id"], + values["community_list"], + values["ext_community_list"] + ]) + + click.echo(tabulate(bmp_body, bmp_headers)) + + +# 'tables' subcommand ("show bmp tables") +@bmp.command('tables') +@clicommon.pass_db +def tables(db): + """Show bmp table status information""" + bmp_headers = ["Table_Name", "Enabled"] + bmp_body = [] + click.echo("BMP tables: ") + bmp_keys = db.cfgdb.get_table('BMP') + if bmp_keys['table']: + bmp_body.append(['bgp_neighbor_table', bmp_keys['table']['bgp_neighbor_table']]) + bmp_body.append(['bgp_rib_in_table', bmp_keys['table']['bgp_rib_in_table']]) + bmp_body.append(['bgp_rib_out_table', bmp_keys['table']['bgp_rib_out_table']]) + click.echo(tabulate(bmp_body, bmp_headers)) + + # # 'bfd' group ("show bfd ...") # diff --git a/tests/mock_tables/asic0/database_config.json b/tests/mock_tables/asic0/database_config.json index d3028b0b45..5fca7834f6 100644 --- a/tests/mock_tables/asic0/database_config.json +++ b/tests/mock_tables/asic0/database_config.json @@ -4,6 +4,11 @@ "hostname" : "127.0.0.1", "port" : 6379, "unix_socket_path" : "/var/run/redis/redis.sock" + }, + "redis_bmp": { + "hostname" : "127.0.0.1", + "port" : 6400, + "unix_socket_path" : "/var/run/redis/redis_bmp.sock" } }, "DATABASES" : { @@ -51,6 +56,11 @@ "id" : 7, "separator": "|", "instance" : "redis" + }, + "BMP_STATE_DB" : { + "id" : 20, + "separator": "|", + "instance" : "redis_bmp" } }, "VERSION" : "1.1" diff --git a/tests/mock_tables/asic1/database_config.json b/tests/mock_tables/asic1/database_config.json index d3028b0b45..5fca7834f6 100644 --- a/tests/mock_tables/asic1/database_config.json +++ b/tests/mock_tables/asic1/database_config.json @@ -4,6 +4,11 @@ "hostname" : "127.0.0.1", "port" : 6379, "unix_socket_path" : "/var/run/redis/redis.sock" + }, + "redis_bmp": { + "hostname" : "127.0.0.1", + "port" : 6400, + "unix_socket_path" : "/var/run/redis/redis_bmp.sock" } }, "DATABASES" : { @@ -51,6 +56,11 @@ "id" : 7, "separator": "|", "instance" : "redis" + }, + "BMP_STATE_DB" : { + "id" : 20, + "separator": "|", + "instance" : "redis_bmp" } }, "VERSION" : "1.1" diff --git a/tests/mock_tables/asic2/database_config.json b/tests/mock_tables/asic2/database_config.json index d3028b0b45..5fca7834f6 100644 --- a/tests/mock_tables/asic2/database_config.json +++ b/tests/mock_tables/asic2/database_config.json @@ -4,6 +4,11 @@ "hostname" : "127.0.0.1", "port" : 6379, "unix_socket_path" : "/var/run/redis/redis.sock" + }, + "redis_bmp": { + "hostname" : "127.0.0.1", + "port" : 6400, + "unix_socket_path" : "/var/run/redis/redis_bmp.sock" } }, "DATABASES" : { @@ -51,6 +56,11 @@ "id" : 7, "separator": "|", "instance" : "redis" + }, + "BMP_STATE_DB" : { + "id" : 20, + "separator": "|", + "instance" : "redis_bmp" } }, "VERSION" : "1.1" diff --git a/tests/mock_tables/database_config.json b/tests/mock_tables/database_config.json index f55c0734c2..9d6125fc74 100644 --- a/tests/mock_tables/database_config.json +++ b/tests/mock_tables/database_config.json @@ -4,6 +4,11 @@ "hostname" : "227.0.0.1", "port" : 6379, "unix_socket_path" : "/var/run/redis/redis.sock" + }, + "redis_bmp": { + "hostname" : "127.0.0.1", + "port" : 6400, + "unix_socket_path" : "/var/run/redis/redis_bmp.sock" } }, "DATABASES" : { @@ -61,6 +66,11 @@ "id" : 13, "separator": "|", "instance" : "redis" + }, + "BMP_STATE_DB" : { + "id" : 20, + "separator": "|", + "instance" : "redis_bmp" } }, "VERSION" : "1.1" diff --git a/tests/show_bmp_test.py b/tests/show_bmp_test.py new file mode 100644 index 0000000000..c0bc556d10 --- /dev/null +++ b/tests/show_bmp_test.py @@ -0,0 +1,178 @@ +import os +from click.testing import CliRunner +from utilities_common.db import Db + +import show.main as show + +test_path = os.path.dirname(os.path.abspath(__file__)) +mock_db_path = os.path.join(test_path, "bmp_input") + + +class TestShowBmp(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["UTILITIES_UNIT_TESTING"] = "1" + + def set_db_values(self, db, key, kvs): + for field, value in kvs.items(): + db.set(db.BMP_STATE_DB, key, field, value) + + def test_show_bmp_neighbor_table(self): + runner = CliRunner() + db = Db() + dbconnector = db.db + self.set_db_values(dbconnector, + "BGP_NEIGHBOR_TABLE|10.0.1.1", + {"peer_addr": "10.0.0.61", + "peer_asn": "64915", + "peer_rd": "300", + "peer_port": "5000", + "local_addr": "10.1.0.32", + "local_asn": "65100", + "local_port": "6000", + "sent_cap": "supports-mpbgp,supports-graceful-restart", + "recv_cap": "supports-mpbgp,supports-graceful-restart"}) + self.set_db_values(dbconnector, + "BGP_NEIGHBOR_TABLE|10.0.1.2", + {"peer_addr": "10.0.0.62", + "peer_asn": "64915", + "peer_rd": "300", + "peer_port": "5000", + "local_addr": "10.1.0.32", + "local_asn": "65100", + "local_port": "6000", + "sent_cap": "supports-mpbgp,supports-graceful-restart", + "recv_cap": "supports-mpbgp,supports-graceful-restart"}) + + expected_output = """\ +Total number of bmp neighbors: 2 +Neighbor_Address Peer_Address Peer_ASN Peer_RD Peer_Port Local_Address Local_ASN \ +Local_Port Advertised_Capabilities Received_Capabilities +------------------ -------------- ---------- --------- ----------- --------------- ----------- \ +------------ ---------------------------------------- ---------------------------------------- +10.0.0.61 10.0.0.61 64915 300 5000 10.1.0.32 65100 6000 \ +supports-mpbgp,supports-graceful-restart supports-mpbgp,supports-graceful-restart +10.0.0.62 10.0.0.62 64915 300 5000 10.1.0.32 65100 6000 \ +supports-mpbgp,supports-graceful-restart supports-mpbgp,supports-graceful-restart +""" + result = runner.invoke(show.cli.commands['bmp'].commands['bgp-neighbor-table'], [], obj=db) + assert result.exit_code == 0 + resultA = result.output.strip().replace(' ', '').replace('\n', '') + resultB = expected_output.strip().replace(' ', '').replace('\n', '') + assert resultA == resultB + + def test_show_bmp_rib_out_table(self): + runner = CliRunner() + db = Db() + dbconnector = db.db + self.set_db_values(dbconnector, + "BGP_RIB_OUT_TABLE|20c0:ef50::/64|10.0.0.57", + {"origin": "igp", + "as_path": "65100 64600", + "origin_as": "64915", + "next_hop": "fc00::7e", + "local_pref": "0", + "originator_id": "0", + "community_list": "residential", + "ext_community_list": "traffic_engineering"}) + self.set_db_values(dbconnector, + "BGP_RIB_OUT_TABLE|192.181.168.0/25|10.0.0.59", + {"origin": "igp", + "as_path": "65100 64600", + "origin_as": "64915", + "next_hop": "10.0.0.63", + "local_pref": "0", + "originator_id": "0", + "community_list": "business", + "ext_community_list": "preferential_transit"}) + + expected_output = """\ +Total number of bmp bgp-rib-out-table: 2 +Neighbor_Address NLRI Origin AS_Path Origin_AS Next_Hop Local_Pref \ +Originator_ID Community_List Ext_Community_List +------------------ ---------------- -------- ----------- ----------- ---------- ------------ \ +--------------- ---------------- -------------------- +10.0.0.57 20c0:ef50::/64 igp 65100 64600 64915 fc00::7e 0 \ +0 residential traffic_engineering +10.0.0.59 192.181.168.0/25 igp 65100 64600 64915 10.0.0.63 0 \ +0 business preferential_transit +""" + result = runner.invoke(show.cli.commands['bmp'].commands['bgp-rib-out-table'], [], obj=db) + assert result.exit_code == 0 + resultA = result.output.strip().replace(' ', '').replace('\n', '') + resultB = expected_output.strip().replace(' ', '').replace('\n', '') + assert resultA == resultB + + def test_show_bmp_rib_in_table(self): + runner = CliRunner() + db = Db() + dbconnector = db.db + self.set_db_values(dbconnector, + "BGP_RIB_IN_TABLE|20c0:ef50::/64|10.0.0.57", + {"origin": "igp", + "as_path": "65100 64600", + "origin_as": "64915", + "next_hop": "fc00::7e", + "local_pref": "0", + "originator_id": "0", + "community_list": "residential", + "ext_community_list": "traffic_engineering"}) + self.set_db_values(dbconnector, + "BGP_RIB_IN_TABLE|192.181.168.0/25|10.0.0.59", + {"origin": "igp", + "as_path": "65100 64600", + "origin_as": "64915", + "next_hop": "10.0.0.63", + "local_pref": "0", + "originator_id": "0", + "community_list": "business", + "ext_community_list": "preferential_transit"}) + + expected_output = """\ +Total number of bmp bgp-rib-in-table: 2 +Neighbor_Address NLRI Origin AS_Path Origin_AS Next_Hop Local_Pref \ +Originator_ID Community_List Ext_Community_List +------------------ ---------------- -------- ----------- ----------- ---------- ------------ \ +--------------- ---------------- -------------------- +10.0.0.57 20c0:ef50::/64 igp 65100 64600 64915 fc00::7e 0 \ +0 residential traffic_engineering +10.0.0.59 192.181.168.0/25 igp 65100 64600 64915 10.0.0.63 0 \ +0 business preferential_transit +""" + result = runner.invoke(show.cli.commands['bmp'].commands['bgp-rib-in-table'], [], obj=db) + assert result.exit_code == 0 + resultA = result.output.strip().replace(' ', '').replace('\n', '') + resultB = expected_output.strip().replace(' ', '').replace('\n', '') + assert resultA == resultB + + def test_tables(self): + runner = CliRunner() + db = Db() + db.cfgdb.mod_entry("BMP", "table", {'bgp_neighbor_table': 'true'}) + db.cfgdb.mod_entry("BMP", "table", {'bgp_rib_in_table': 'false'}) + db.cfgdb.mod_entry("BMP", "table", {'bgp_rib_out_table': 'true'}) + + assert db.cfgdb.get_entry('BMP', 'table')['bgp_neighbor_table'] == 'true' + assert db.cfgdb.get_entry('BMP', 'table')['bgp_rib_in_table'] == 'false' + assert db.cfgdb.get_entry('BMP', 'table')['bgp_rib_out_table'] == 'true' + + expected_output = """\ +BMP tables: +Table_Name Enabled +------------------ --------- +bgp_neighbor_table true +bgp_rib_in_table false +bgp_rib_out_table true +""" + result = runner.invoke(show.cli.commands['bmp'].commands['tables'], [], obj=db) + assert result.exit_code == 0 + resultA = result.output.strip().replace(' ', '').replace('\n', '') + resultB = expected_output.strip().replace(' ', '').replace('\n', '') + assert resultA == resultB + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ["UTILITIES_UNIT_TESTING"] = "0" From 5eb266acf4971d10712bc6b65856d348f29aaa54 Mon Sep 17 00:00:00 2001 From: Arvindsrinivasan Lakshmi Narasimhan <55814491+arlakshm@users.noreply.github.com> Date: Mon, 9 Sep 2024 09:40:31 -0700 Subject: [PATCH 44/67] [chassis] Modify the show ip route to hide the Ethernet-IB port in the output (#3537) * update show ip route for voq chassis Signed-off-by: Arvindsrinivasan Lakshmi Narasimhan * add UT * add more UT * Fix linter errors * fix UT * make linter happy --------- Signed-off-by: Arvindsrinivasan Lakshmi Narasimhan --- show/bgp_common.py | 25 ++-- tests/conftest.py | 15 ++- tests/ip_show_routes_multi_asic_test.py | 3 +- tests/ip_show_routes_voq_chassis_test.py | 112 ++++++++++++++++++ tests/mock_tables/asic0/ip_route_lc.json | 66 +++++++++++ tests/mock_tables/asic0/ip_route_lc_2.json | 56 +++++++++ .../mock_tables/asic0/ip_route_remote_lc.json | 106 +++++++++++++++++ tests/mock_tables/asic1/ip_route_lc.json | 106 +++++++++++++++++ tests/mock_tables/asic1/ip_route_lc_2.json | 56 +++++++++ .../mock_tables/asic1/ip_route_remote_lc.json | 106 +++++++++++++++++ tests/show_ip_route_common.py | 57 +++++++++ 11 files changed, 698 insertions(+), 10 deletions(-) create mode 100644 tests/ip_show_routes_voq_chassis_test.py create mode 100644 tests/mock_tables/asic0/ip_route_lc.json create mode 100644 tests/mock_tables/asic0/ip_route_lc_2.json create mode 100644 tests/mock_tables/asic0/ip_route_remote_lc.json create mode 100644 tests/mock_tables/asic1/ip_route_lc.json create mode 100644 tests/mock_tables/asic1/ip_route_lc_2.json create mode 100644 tests/mock_tables/asic1/ip_route_remote_lc.json diff --git a/show/bgp_common.py b/show/bgp_common.py index b51e9f1879..e9c0e12e8a 100644 --- a/show/bgp_common.py +++ b/show/bgp_common.py @@ -3,7 +3,7 @@ import json import utilities_common.multi_asic as multi_asic_util -from sonic_py_common import multi_asic +from sonic_py_common import device_info, multi_asic from utilities_common import constants ''' @@ -60,10 +60,12 @@ def get_nexthop_info_str(nxhp_info, filterByIp): else: str_2_return = " via {},".format(nxhp_info['ip']) if "interfaceName" in nxhp_info: + intfs = nxhp_info['interfaceName'] if filterByIp: - str_2_return += ", via {}".format(nxhp_info['interfaceName']) + str_2_return += ", via {}".format(intfs) else: - str_2_return += " {},".format(nxhp_info['interfaceName']) + str_2_return += " {},".format(intfs) + elif "directlyConnected" in nxhp_info: str_2_return = " is directly connected," if "interfaceName" in nxhp_info: @@ -80,10 +82,13 @@ def get_nexthop_info_str(nxhp_info, filterByIp): str_2_return += "(vrf {}, {},".format(nxhp_info['vrf'], nxhp_info['interfaceName']) if "active" not in nxhp_info: str_2_return += " inactive" + if "recursive" in nxhp_info: + if device_info.is_voq_chassis(): + str_2_return = " " + str_2_return + " recursive via iBGP" + else: + str_2_return += " (recursive)" if "onLink" in nxhp_info: str_2_return += " onlink" - if "recursive" in nxhp_info: - str_2_return += " (recursive)" if "source" in nxhp_info: str_2_return += ", src {}".format(nxhp_info['source']) if "labels" in nxhp_info: @@ -220,6 +225,12 @@ def merge_to_combined_route(combined_route, route, new_info_l): if nh['interfaceName'] == combined_route[route][j]['nexthops'][y]['interfaceName']: found = True break + if device_info.is_voq_chassis(): + if nh['ip'] == combined_route[route][j]['nexthops'][y]['ip']: + if 'interfaceName' not in combined_route[route][j]['nexthops'][y]: + combined_route[route][j]['nexthops'][y] = nh + found = True + break elif "active" not in nh and "active" not in combined_route[route][j]['nexthops'][y]: if nh['ip'] == combined_route[route][j]['nexthops'][y]['ip']: found = True @@ -253,7 +264,7 @@ def process_route_info(route_info, device, filter_back_end, print_ns_str, asic_c while len(new_info['nexthops']): nh = new_info['nexthops'].pop() if filter_back_end and back_end_intf_set != None and "interfaceName" in nh: - if nh['interfaceName'] in back_end_intf_set: + if nh['interfaceName'] in back_end_intf_set or nh['interfaceName'].startswith('Ethernet-IB'): del_cnt += 1 else: new_nhop_l.append(copy.deepcopy(nh)) @@ -327,6 +338,7 @@ def show_routes(args, namespace, display, verbose, ipver): if display not in ['frontend', 'all']: print("dislay option '{}' is not a valid option.".format(display)) return + device = multi_asic_util.MultiAsic(display, namespace) arg_strg = "" found_json = 0 @@ -376,7 +388,6 @@ def show_routes(args, namespace, display, verbose, ipver): # Need to add "ns" to form bgpX so it is sent to the correct bgpX docker to handle the request cmd = "show {} route {}".format(ipver, arg_strg) output = bgp_util.run_bgp_show_command(cmd, ns) - # in case no output or something went wrong with user specified cmd argument(s) error it out # error from FRR always start with character "%" if output == "": diff --git a/tests/conftest.py b/tests/conftest.py index 5dd31d523a..3874668a67 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -194,7 +194,11 @@ def setup_single_bgp_instance(request): elif request.param == 'ipv6_route': bgp_mocked_json = 'ipv6_route.json' elif request.param == 'ip_special_route': - bgp_mocked_json = 'ip_special_route.json' + bgp_mocked_json = 'ip_special_route.json' + elif request.param == 'ip_route_lc': + bgp_mocked_json = 'ip_route_lc.json' + elif request.param == 'ip_route_remote_lc': + bgp_mocked_json = 'ip_route_remote_lc.json' else: bgp_mocked_json = os.path.join( test_path, 'mock_tables', 'dummy.json') @@ -240,7 +244,8 @@ def mock_run_bgp_route_command(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constan _old_run_bgp_command = bgp_util.run_bgp_command if any([request.param == 'ip_route', request.param == 'ip_specific_route', request.param == 'ip_special_route', - request.param == 'ipv6_route', request.param == 'ipv6_specific_route']): + request.param == 'ipv6_route', request.param == 'ipv6_specific_route', + request.param == 'ip_route_lc', request.param == 'ip_route_remote_lc']): bgp_util.run_bgp_command = mock.MagicMock( return_value=mock_run_bgp_route_command("", "")) elif request.param.startswith('ipv6_route_err'): @@ -303,6 +308,12 @@ def setup_multi_asic_bgp_instance(request): request.param.startswith('bgp_v4_neighbor') or \ request.param.startswith('bgp_v6_neighbor'): m_asic_json_file = request.param + elif request.param == 'ip_route_lc': + m_asic_json_file = 'ip_route_lc.json' + elif request.param == 'ip_route_remote_lc': + m_asic_json_file = 'ip_route_remote_lc.json' + elif request.param == 'ip_route_lc_2': + m_asic_json_file = 'ip_route_lc_2.json' else: m_asic_json_file = os.path.join( test_path, 'mock_tables', 'dummy.json') diff --git a/tests/ip_show_routes_multi_asic_test.py b/tests/ip_show_routes_multi_asic_test.py index bfce5e539d..08bea36910 100644 --- a/tests/ip_show_routes_multi_asic_test.py +++ b/tests/ip_show_routes_multi_asic_test.py @@ -1,10 +1,11 @@ import os from importlib import reload - import pytest + from . import show_ip_route_common from click.testing import CliRunner + test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) scripts_path = os.path.join(modules_path, "scripts") diff --git a/tests/ip_show_routes_voq_chassis_test.py b/tests/ip_show_routes_voq_chassis_test.py new file mode 100644 index 0000000000..de7f7ade8f --- /dev/null +++ b/tests/ip_show_routes_voq_chassis_test.py @@ -0,0 +1,112 @@ +import os +from importlib import reload +import pytest +from unittest import mock + +import show.main as show +from . import show_ip_route_common +import utilities_common.multi_asic as multi_asic_util +from click.testing import CliRunner + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") + + +class TestMultiAsicVoqLcShowIpRouteDisplayAllCommands(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + from .mock_tables import mock_multi_asic + reload(mock_multi_asic) + from .mock_tables import dbconnector + dbconnector.load_namespace_config() + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_lc'], indirect=['setup_multi_asic_bgp_instance']) + @mock.patch("sonic_py_common.device_info.is_voq_chassis", mock.MagicMock(return_value=True)) + def test_voq_chassis_lc( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["-dfrontend"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.SHOW_IP_ROUTE_LC + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_remote_lc'], indirect=['setup_multi_asic_bgp_instance']) + @mock.patch("sonic_py_common.device_info.is_voq_chassis", mock.MagicMock(return_value=True)) + def test_voq_chassis_remote_lc( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["-dfrontend"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.SHOW_IP_ROUTE_REMOTE_LC + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_lc'], indirect=['setup_multi_asic_bgp_instance']) + @mock.patch("sonic_py_common.device_info.is_voq_chassis", mock.MagicMock(return_value=True)) + def test_voq_chassis_lc_def_route( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["0.0.0.0/0"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.SHOW_IP_ROUTE_LC_DEFAULT_ROUTE + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_remote_lc'], indirect=['setup_multi_asic_bgp_instance']) + @mock.patch("sonic_py_common.device_info.is_voq_chassis", mock.MagicMock(return_value=True)) + def test_voq_chassis_remote_lc_default_route( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["0.0.0.0/0"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.SHOW_IP_ROUTE_REMOTE_LC_DEFAULT_ROUTE + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_lc_2'], indirect=['setup_multi_asic_bgp_instance']) + @mock.patch("sonic_py_common.device_info.is_voq_chassis", mock.MagicMock(return_value=True)) + @mock.patch.object(multi_asic_util.MultiAsic, "get_ns_list_based_on_options", + mock.MagicMock(return_value=["asic0", "asic1"])) + def test_voq_chassis_lc_def_route_2( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["0.0.0.0/0"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.SHOW_IP_ROUTE_LC_DEFAULT_ROUTE_2 + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + from .mock_tables import mock_single_asic + reload(mock_single_asic) diff --git a/tests/mock_tables/asic0/ip_route_lc.json b/tests/mock_tables/asic0/ip_route_lc.json new file mode 100644 index 0000000000..19cfd5e5f0 --- /dev/null +++ b/tests/mock_tables/asic0/ip_route_lc.json @@ -0,0 +1,66 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 8, + "internalNextHopNum": 4, + "internalNextHopActiveNum": 4, + "nexthopGroupId": 566, + "installedNexthopGroupId": 566, + "uptime": "04w0d11h", + "nexthops": [ + { + "flags": 3, + "fib": true, + "ip": "20.1.0.128", + "afi": "ipv4", + "interfaceIndex": 2, + "interfaceName": "PortChannel1", + "active": true, + "weight": 1 + }, + { + "flags": 3, + "fib": true, + "ip": "20.1.8.128", + "afi": "ipv4", + "interfaceIndex": 4, + "interfaceName": "PortChannel5", + "active": true, + "weight": 1 + }, + { + "flags": 3, + "fib": true, + "ip": "20.1.16.128", + "afi": "ipv4", + "interfaceIndex": 5, + "interfaceName": "PortChannel9", + "active": true, + "weight": 1 + }, + { + "flags": 3, + "fib": true, + "ip": "20.1.24.128", + "afi": "ipv4", + "interfaceIndex": 3, + "interfaceName": "PortChannel13", + "active": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic0/ip_route_lc_2.json b/tests/mock_tables/asic0/ip_route_lc_2.json new file mode 100644 index 0000000000..8cadf1db22 --- /dev/null +++ b/tests/mock_tables/asic0/ip_route_lc_2.json @@ -0,0 +1,56 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 3, + "internalNextHopActiveNum": 3, + "nexthopGroupId": 2122, + "installedNexthopGroupId": 2122, + "uptime": "01:01:51", + "nexthops": [ + { + "flags": 3, + "fib": true, + "ip": "10.0.0.1", + "afi": "ipv4", + "interfaceIndex": 29, + "interfaceName": "PortChannel102", + "active": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "10.0.0.7", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "10.0.0.7", + "afi": "ipv4", + "interfaceIndex": 52, + "interfaceName": "Ethernet-IB0", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic0/ip_route_remote_lc.json b/tests/mock_tables/asic0/ip_route_remote_lc.json new file mode 100644 index 0000000000..0e8f4a56c7 --- /dev/null +++ b/tests/mock_tables/asic0/ip_route_remote_lc.json @@ -0,0 +1,106 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 200, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 13, + "internalNextHopNum": 8, + "internalNextHopActiveNum": 8, + "nexthopGroupId": 465, + "installedNexthopGroupId": 465, + "uptime": "04w0d12h", + "nexthops": [ + { + "flags": 5, + "ip": "20.1.0.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.0.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB0", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.8.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.8.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB0", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.16.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.16.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB0", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.24.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.24.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB0", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic1/ip_route_lc.json b/tests/mock_tables/asic1/ip_route_lc.json new file mode 100644 index 0000000000..e6c0063f90 --- /dev/null +++ b/tests/mock_tables/asic1/ip_route_lc.json @@ -0,0 +1,106 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 200, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 13, + "internalNextHopNum": 8, + "internalNextHopActiveNum": 8, + "nexthopGroupId": 465, + "installedNexthopGroupId": 465, + "uptime": "04w0d12h", + "nexthops": [ + { + "flags": 5, + "ip": "20.1.0.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.0.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.8.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.8.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.16.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.16.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.24.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.24.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic1/ip_route_lc_2.json b/tests/mock_tables/asic1/ip_route_lc_2.json new file mode 100644 index 0000000000..f7dff5d51b --- /dev/null +++ b/tests/mock_tables/asic1/ip_route_lc_2.json @@ -0,0 +1,56 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 3, + "internalNextHopActiveNum": 3, + "nexthopGroupId": 2173, + "installedNexthopGroupId": 2173, + "uptime": "01:01:57", + "nexthops": [ + { + "flags": 5, + "ip": "10.0.0.1", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "10.0.0.1", + "afi": "ipv4", + "interfaceIndex": 52, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 3, + "fib": true, + "ip": "10.0.0.7", + "afi": "ipv4", + "interfaceIndex": 29, + "interfaceName": "PortChannel106", + "active": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic1/ip_route_remote_lc.json b/tests/mock_tables/asic1/ip_route_remote_lc.json new file mode 100644 index 0000000000..e6c0063f90 --- /dev/null +++ b/tests/mock_tables/asic1/ip_route_remote_lc.json @@ -0,0 +1,106 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 200, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 13, + "internalNextHopNum": 8, + "internalNextHopActiveNum": 8, + "nexthopGroupId": 465, + "installedNexthopGroupId": 465, + "uptime": "04w0d12h", + "nexthops": [ + { + "flags": 5, + "ip": "20.1.0.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.0.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.8.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.8.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.16.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.16.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.24.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.24.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/show_ip_route_common.py b/tests/show_ip_route_common.py index 101b23309c..899915a1f4 100644 --- a/tests/show_ip_route_common.py +++ b/tests/show_ip_route_common.py @@ -875,3 +875,60 @@ Totals 6467 6466 """ + +SHOW_IP_ROUTE_REMOTE_LC = """\ +Codes: K - kernel route, C - connected, S - static, R - RIP, + O - OSPF, I - IS-IS, B - BGP, E - EIGRP, N - NHRP, + T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP, + F - PBR, f - OpenFabric, + > - selected route, * - FIB route, q - queued route, r - rejected route + +B> 0.0.0.0/0 [200/0] via 20.1.24.128, recursive via iBGP 04w0d12h + via 20.1.16.128, recursive via iBGP 04w0d12h + via 20.1.8.128, recursive via iBGP 04w0d12h + via 20.1.0.128, recursive via iBGP 04w0d12h +""" + +SHOW_IP_ROUTE_LC = """\ +Codes: K - kernel route, C - connected, S - static, R - RIP, + O - OSPF, I - IS-IS, B - BGP, E - EIGRP, N - NHRP, + T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP, + F - PBR, f - OpenFabric, + > - selected route, * - FIB route, q - queued route, r - rejected route + +B>*0.0.0.0/0 [20/0] via 20.1.24.128, PortChannel13, 04w0d11h + * via 20.1.16.128, PortChannel9, 04w0d11h + * via 20.1.8.128, PortChannel5, 04w0d11h + * via 20.1.0.128, PortChannel1, 04w0d11h +""" + +SHOW_IP_ROUTE_REMOTE_LC_DEFAULT_ROUTE = """\ +Routing entry for 0.0.0.0/0 + Known via "bgp", distance 200, metric 0, best + Last update 04w0d12h ago + * 20.1.24.128 recursive via iBGP + * 20.1.16.128 recursive via iBGP + * 20.1.8.128 recursive via iBGP + * 20.1.0.128 recursive via iBGP + +""" + +SHOW_IP_ROUTE_LC_DEFAULT_ROUTE = """\ +Routing entry for 0.0.0.0/0 + Known via "bgp", distance 20, metric 0, best + Last update 04w0d11h ago + * 20.1.24.128, via PortChannel13 + * 20.1.16.128, via PortChannel9 + * 20.1.8.128, via PortChannel5 + * 20.1.0.128, via PortChannel1 + +""" + +SHOW_IP_ROUTE_LC_DEFAULT_ROUTE_2 = """\ +Routing entry for 0.0.0.0/0 + Known via "bgp", distance 20, metric 0, best + Last update 01:01:51 ago + * 10.0.0.7, via PortChannel106 + * 10.0.0.1, via PortChannel102 + +""" From d103bfd7247d90703d7dd6680d21440b34fbed34 Mon Sep 17 00:00:00 2001 From: Vivek Date: Mon, 9 Sep 2024 09:43:42 -0700 Subject: [PATCH 45/67] Fix ntp conf file path (#3525) --- show/main.py | 2 +- tests/ntp.conf | 37 +++++++++++++++++++++++++++++++++++++ tests/show_test.py | 14 ++++++++++++++ 3 files changed, 52 insertions(+), 1 deletion(-) create mode 100644 tests/ntp.conf diff --git a/show/main.py b/show/main.py index ac9557a6ef..25202e1e42 100755 --- a/show/main.py +++ b/show/main.py @@ -1545,7 +1545,7 @@ def ntp(verbose): """Show NTP running configuration""" ntp_servers = [] ntp_dict = {} - with open("/etc/ntp.conf") as ntp_file: + with open("/etc/ntpsec/ntp.conf") as ntp_file: data = ntp_file.readlines() for line in data: if line.startswith("server "): diff --git a/tests/ntp.conf b/tests/ntp.conf new file mode 100644 index 0000000000..58bf276dce --- /dev/null +++ b/tests/ntp.conf @@ -0,0 +1,37 @@ +############################################################################### +# This file was AUTOMATICALLY GENERATED. DO NOT MODIFY. +# Controlled by ntp-config.service +############################################################################### + +# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help + +# To avoid ntpd from panic and exit if the drift between new time and +# current system time is large. +tinker panic 0 + +driftfile /var/lib/ntpsec/ntp.drift +leapfile /usr/share/zoneinfo/leap-seconds.list + +server 10.1.1.1 iburst +restrict 10.1.1.1 kod limited nomodify noquery + +server 10.22.1.12 iburst +restrict 10.22.1.12 kod limited nomodify noquery + + +interface ignore wildcard + + +interface listen eth0 +interface listen 127.0.0.1 + + +# Access control configuration +# By default, exchange time with everybody, but don't allow configuration. +# NTPsec doesn't establish peer associations, and so nopeer has no effect, and +# has been removed from here +restrict default kod nomodify noquery limited + +# Local users may interrogate the ntp server more closely. +restrict 127.0.0.1 +restrict ::1 diff --git a/tests/show_test.py b/tests/show_test.py index 4cd29ac45e..d81192367a 100644 --- a/tests/show_test.py +++ b/tests/show_test.py @@ -1064,6 +1064,20 @@ def test_rc_syslog(self, mock_rc): assert result.exit_code == 0 assert '[1.1.1.1]' in result.output + @patch('builtins.open', mock_open( + read_data=open('tests/ntp.conf').read())) + def test_ntp(self): + runner = CliRunner() + + result = runner.invoke( + show.cli.commands['runningconfiguration'].commands['ntp']) + print(result.exit_code) + print(result.output) + + assert result.exit_code == 0 + assert '10.1.1.1' in result.output + assert '10.22.1.12' in result.output + @classmethod def teardown_class(cls): print('TEARDOWN') From 5fdc1b61c8c58582c8cbc1845eddc8840769e380 Mon Sep 17 00:00:00 2001 From: DavidZagury <32644413+DavidZagury@users.noreply.github.com> Date: Mon, 9 Sep 2024 20:09:42 +0300 Subject: [PATCH 46/67] [Mellanox] Add new SKU Mellanox-SN5600-C256 (#3431) - What I did Update sonic-utilities to support new SKU Mellanox-SN5600-C256 Add the SKU to the generic configuration updater Simplify the logic of the buffer migrator to support the new SKU - How I did it - How to verify it Manual and unit tests --- generic_config_updater/gcu_field_operation_validators.conf.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index a379e7282f..622d73a68f 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -22,7 +22,7 @@ "spc2": [ "ACS-MSN3800", "Mellanox-SN3800-D112C8", "ACS-MSN3420", "ACS-MSN3700C", "ACS-MSN3700", "Mellanox-SN3800-C64", "Mellanox-SN3800-D100C12S2", "Mellanox-SN3800-D24C52", "Mellanox-SN3800-D28C49S1", "Mellanox-SN3800-D28C50" ], "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "ACS-SN4280", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40","Mellanox-SN4700-O32","Mellanox-SN4700-V64", "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32", "Mellanox-SN4280-O28"], - "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "ACS-SN5400" ] + "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "Mellanox-SN5600-C256", "ACS-SN5400" ] }, "broadcom_asics": { "th": [ "Force10-S6100", "Arista-7060CX-32S-C32", "Arista-7060CX-32S-C32-T1", "Arista-7060CX-32S-D48C8", "Celestica-DX010-C32", "Seastone-DX010" ], From a7897d1fd43603272f99dd7daaa08dc28f5bda7a Mon Sep 17 00:00:00 2001 From: vdahiya12 <67608553+vdahiya12@users.noreply.github.com> Date: Tue, 10 Sep 2024 10:05:41 -0700 Subject: [PATCH 47/67] [show][interface][counters] Add proposal and changes for fec-histogram for interface counters fec-histogram subcommand (#3519) * [show][interface][counters] Add proposal and changes for fec-histogram for show int counters fec-histogram subcommand Signed-off-by: Vaibhav Dahiya * add implementation Signed-off-by: Vaibhav Dahiya * add changes Signed-off-by: Vaibhav Dahiya * add changes Signed-off-by: Vaibhav Dahiya * add UT Signed-off-by: Vaibhav Dahiya * fix test Signed-off-by: Vaibhav Dahiya * correct doc Signed-off-by: Vaibhav Dahiya * add changes Signed-off-by: Vaibhav Dahiya * add cosmetic fix Signed-off-by: Vaibhav Dahiya * add fixes Signed-off-by: Vaibhav Dahiya * pep 8 Signed-off-by: Vaibhav Dahiya * add indentation Signed-off-by: Vaibhav Dahiya --------- Signed-off-by: Vaibhav Dahiya --- doc/Command-Reference.md | 34 +++++++++++++++ show/interfaces/__init__.py | 70 ++++++++++++++++++++++++++++++ tests/mock_tables/counters_db.json | 18 +++++++- tests/portstat_test.py | 30 +++++++++++++ 4 files changed, 151 insertions(+), 1 deletion(-) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index cdc3f5644d..be0bd14fdd 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -4806,6 +4806,7 @@ Optional argument "-p" specify a period (in seconds) with which to gather counte show interfaces counters errors show interfaces counters rates show interfaces counters rif [-p|--period ] [-i ] + show interfaces counters fec-histogram [-i ] ``` - Example: @@ -4923,6 +4924,39 @@ Optionally, you can specify a period (in seconds) with which to gather counters admin@sonic:~$ sonic-clear rifcounters ``` +The "fec-histogram" subcommand is used to display the fec histogram for the port. + +When data is transmitted, it's broken down into units called codewords. FEC algorithms add extra data to each codeword that can be used to detect and correct errors in transmission. +In a FEC histogram, "bins" represent ranges of errors or specific categories of errors. For instance, Bin0 might represent codewords with no errors, while Bin1 could represent codewords with a single bit error, and so on. The histogram shows how many codewords fell into each bin. A high number in the higher bins might indicate a problem with the transmission link, such as signal degradation. + +- Example: + ``` + admin@str-s6000-acs-11:/usr/bin$ show interface counters fec-histogram -i + +Symbol Errors Per Codeword Codewords +-------------------------- --------- +BIN0: 1000000 +BIN1: 900000 +BIN2: 800000 +BIN3: 700000 +BIN4: 600000 +BIN5: 500000 +BIN6: 400000 +BIN7: 300000 +BIN8: 0 +BIN9: 0 +BIN10: 0 +BIN11: 0 +BIN12: 0 +BIN13: 0 +BIN14: 0 +BIN15: 0 + + ``` + + + + **show interfaces description** This command displays the key fields of the interfaces such as Operational Status, Administrative Status, Alias and Description. diff --git a/show/interfaces/__init__.py b/show/interfaces/__init__.py index 9287eb5af7..f8889e6c32 100644 --- a/show/interfaces/__init__.py +++ b/show/interfaces/__init__.py @@ -18,6 +18,8 @@ HWSKU_JSON = 'hwsku.json' +REDIS_HOSTIP = "127.0.0.1" + # Read given JSON file def readJsonFile(fileName): try: @@ -646,6 +648,74 @@ def fec_stats(verbose, period, namespace, display): clicommon.run_command(cmd, display_cmd=verbose) + +def get_port_oid_mapping(): + ''' Returns dictionary of all ports interfaces and their OIDs. ''' + db = SonicV2Connector(host=REDIS_HOSTIP) + db.connect(db.COUNTERS_DB) + + port_oid_map = db.get_all(db.COUNTERS_DB, 'COUNTERS_PORT_NAME_MAP') + + db.close(db.COUNTERS_DB) + + return port_oid_map + + +def fetch_fec_histogram(port_oid_map, target_port): + ''' Fetch and display FEC histogram for the given port. ''' + asic_db = SonicV2Connector(host=REDIS_HOSTIP) + asic_db.connect(asic_db.ASIC_DB) + + config_db = ConfigDBConnector() + config_db.connect() + + counter_db = SonicV2Connector(host=REDIS_HOSTIP) + counter_db.connect(counter_db.COUNTERS_DB) + + if target_port not in port_oid_map: + click.echo('Port {} not found in COUNTERS_PORT_NAME_MAP'.format(target_port), err=True) + raise click.Abort() + + port_oid = port_oid_map[target_port] + asic_db_kvp = counter_db.get_all(counter_db.COUNTERS_DB, 'COUNTERS:{}'.format(port_oid)) + + if asic_db_kvp is not None: + + fec_errors = {f'BIN{i}': asic_db_kvp.get + (f'SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S{i}', '0') for i in range(16)} + + # Prepare the data for tabulation + table_data = [(bin_label, error_value) for bin_label, error_value in fec_errors.items()] + + # Define headers + headers = ["Symbol Errors Per Codeword", "Codewords"] + + # Print FEC histogram using tabulate + click.echo(tabulate(table_data, headers=headers)) + else: + click.echo('No kvp found in ASIC DB for port {}, exiting'.format(target_port), err=True) + raise click.Abort() + + asic_db.close(asic_db.ASIC_DB) + config_db.close(config_db.CONFIG_DB) + counter_db.close(counter_db.COUNTERS_DB) + + +# 'fec-histogram' subcommand ("show interfaces counters fec-histogram") +@counters.command('fec-histogram') +@multi_asic_util.multi_asic_click_options +@click.argument('interfacename', required=True) +def fec_histogram(interfacename, namespace, display): + """Show interface counters fec-histogram""" + port_oid_map = get_port_oid_mapping() + + # Try to convert interface name from alias + interfacename = try_convert_interfacename_from_alias(click.get_current_context(), interfacename) + + # Fetch and display the FEC histogram + fetch_fec_histogram(port_oid_map, interfacename) + + # 'rates' subcommand ("show interfaces counters rates") @counters.command() @click.option('-p', '--period') diff --git a/tests/mock_tables/counters_db.json b/tests/mock_tables/counters_db.json index 2f16c7014d..9e553c2901 100644 --- a/tests/mock_tables/counters_db.json +++ b/tests/mock_tables/counters_db.json @@ -882,7 +882,23 @@ "SAI_PORT_STAT_ETHER_STATS_JABBERS": "0", "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES": "130402", "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES": "3", - "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "4" + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "4", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S0": "1000000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S1": "900000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S2": "800000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S3": "700000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S4": "600000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S5": "500000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S6": "400000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S7": "300000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S8": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S9": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S10": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S11": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S12": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S13": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S14": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S15": "0" }, "COUNTERS:oid:0x1000000000013": { "SAI_PORT_STAT_IF_IN_UCAST_PKTS": "4", diff --git a/tests/portstat_test.py b/tests/portstat_test.py index 3af704e66e..af9814f812 100644 --- a/tests/portstat_test.py +++ b/tests/portstat_test.py @@ -42,6 +42,27 @@ Ethernet8 N/A 100,317 0 0 """ +intf_fec_counters_fec_hist = """\ +Symbol Errors Per Codeword Codewords +---------------------------- ----------- +BIN0 1000000 +BIN1 900000 +BIN2 800000 +BIN3 700000 +BIN4 600000 +BIN5 500000 +BIN6 400000 +BIN7 300000 +BIN8 0 +BIN9 0 +BIN10 0 +BIN11 0 +BIN12 0 +BIN13 0 +BIN14 0 +BIN15 0 +""" + intf_fec_counters_period = """\ The rates are calculated within 3 seconds period IFACE STATE FEC_CORR FEC_UNCORR FEC_SYMBOL_ERR @@ -337,6 +358,15 @@ def test_show_intf_fec_counters(self): assert return_code == 0 assert result == intf_fec_counters + def test_show_intf_counters_fec_histogram(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["interfaces"].commands["counters"].commands["fec-histogram"], ["Ethernet0"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == intf_fec_counters_fec_hist + def test_show_intf_fec_counters_period(self): runner = CliRunner() result = runner.invoke(show.cli.commands["interfaces"].commands["counters"].commands["fec-stats"], From 8fa076d2fe7871ab0e2f34e352128de4dda51bd1 Mon Sep 17 00:00:00 2001 From: Samuel Angebault Date: Wed, 11 Sep 2024 07:22:32 +0200 Subject: [PATCH 48/67] sonic-installer: enhance next image detection for Aboot (#3433) The Aboot bootloader relies of the SWI= keyword argument in the boot-config file to know which image to boot. This value is also used by sonic-installer to figure to extract the next image that will be executed. The current code has an issue as it only expects the next image to match the installation path of a SONiC image but not anything else. This means that `SWI=flash:sonic-aboot-broadcom.swi` is not valid and can therefore be a problem when trying to install a new image via cold reboot. Additionally a missing or empty boot-config would generate a python backtrace instead of gracefully recovering from this state. --- sonic_installer/bootloader/aboot.py | 7 ++++++- tests/installer_bootloader_aboot_test.py | 21 +++++++++++++++++---- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/sonic_installer/bootloader/aboot.py b/sonic_installer/bootloader/aboot.py index ac327feb4c..d6492171ab 100644 --- a/sonic_installer/bootloader/aboot.py +++ b/sonic_installer/bootloader/aboot.py @@ -71,6 +71,8 @@ class AbootBootloader(Bootloader): def _boot_config_read(self, path=BOOT_CONFIG_PATH): config = collections.OrderedDict() + if not os.path.exists(path): + return config with open(path) as f: for line in f.readlines(): line = line.strip() @@ -112,7 +114,10 @@ def get_installed_images(self): def get_next_image(self): config = self._boot_config_read() - match = re.search(r"flash:/*(\S+)/", config['SWI']) + swi = config.get('SWI', '') + match = re.search(r"flash:/*(\S+)/", swi) + if not match: + return swi.split(':', 1)[-1] return match.group(1).replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX, 1) def set_default_image(self, image): diff --git a/tests/installer_bootloader_aboot_test.py b/tests/installer_bootloader_aboot_test.py index fbe580a638..be09223b5f 100644 --- a/tests/installer_bootloader_aboot_test.py +++ b/tests/installer_bootloader_aboot_test.py @@ -8,6 +8,7 @@ # Constants image_dir = f'{aboot.IMAGE_DIR_PREFIX}expeliarmus-{aboot.IMAGE_DIR_PREFIX}abcde' +image_chainloader = f'{image_dir}/.sonic-boot.swi' exp_image = f'{aboot.IMAGE_PREFIX}expeliarmus-{aboot.IMAGE_DIR_PREFIX}abcde' image_dirs = [image_dir] @@ -45,15 +46,27 @@ def test_get_installed_images(): assert bootloader.get_installed_images() == [exp_image] -@patch("sonic_installer.bootloader.aboot.re.search") -def test_get_next_image(re_search_patch): +def test_get_next_image(): bootloader = aboot.AbootBootloader() - bootloader._boot_config_read = Mock(return_value={'SWI': None}) + + # Test missing boot-config + bootloader._boot_config_read() + + # Test missing SWI value + bootloader._boot_config_read = Mock(return_value={}) + assert bootloader.get_next_image() == '' # Test convertion image dir to image name - re_search_patch().group = Mock(return_value=image_dir) + swi = f'flash:{image_chainloader}' + bootloader._boot_config_read = Mock(return_value={'SWI': swi}) assert bootloader.get_next_image() == exp_image + # Test some other image + next_image = 'EOS.swi' + bootloader._boot_config_read = Mock(return_value={'SWI': f'flash:{next_image}'}) + assert bootloader.get_next_image() == next_image + + def test_install_image(): image_path = 'sonic' env = os.environ.copy() From ad5b0c0aae083156b0b7c0dac10ebbb3cd4c9e07 Mon Sep 17 00:00:00 2001 From: noaOrMlnx <58519608+noaOrMlnx@users.noreply.github.com> Date: Wed, 11 Sep 2024 15:06:52 +0300 Subject: [PATCH 49/67] [Mellanox] Add SPC5 to generic config updater file (#3542) --- .../gcu_field_operation_validators.conf.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index 622d73a68f..8b42812af0 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -22,7 +22,8 @@ "spc2": [ "ACS-MSN3800", "Mellanox-SN3800-D112C8", "ACS-MSN3420", "ACS-MSN3700C", "ACS-MSN3700", "Mellanox-SN3800-C64", "Mellanox-SN3800-D100C12S2", "Mellanox-SN3800-D24C52", "Mellanox-SN3800-D28C49S1", "Mellanox-SN3800-D28C50" ], "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "ACS-SN4280", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40","Mellanox-SN4700-O32","Mellanox-SN4700-V64", "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32", "Mellanox-SN4280-O28"], - "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "Mellanox-SN5600-C256", "ACS-SN5400" ] + "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "Mellanox-SN5600-C256", "ACS-SN5400" ], + "spc5": ["ACS-SN5640"] }, "broadcom_asics": { "th": [ "Force10-S6100", "Arista-7060CX-32S-C32", "Arista-7060CX-32S-C32-T1", "Arista-7060CX-32S-D48C8", "Celestica-DX010-C32", "Seastone-DX010" ], From 1aac5e2c37c26ae5000820a117427438c5f511d4 Mon Sep 17 00:00:00 2001 From: Vineet Mittal <46945843+vmittal-msft@users.noreply.github.com> Date: Wed, 11 Sep 2024 09:38:07 -0700 Subject: [PATCH 50/67] [VoQ chassis] : Script to debug packet drops (#3536) * Script to debug packet loss on VoQ chassis * Updated setup.py with new script * Fixed the order of the script * Fixed error in pre-check --- scripts/debug_voq_chassis_packet_drops.sh | 371 ++++++++++++++++++++++ setup.py | 1 + 2 files changed, 372 insertions(+) create mode 100755 scripts/debug_voq_chassis_packet_drops.sh diff --git a/scripts/debug_voq_chassis_packet_drops.sh b/scripts/debug_voq_chassis_packet_drops.sh new file mode 100755 index 0000000000..53e21c6f09 --- /dev/null +++ b/scripts/debug_voq_chassis_packet_drops.sh @@ -0,0 +1,371 @@ +#!/usr/bin/bash +# defaults for env vars +sleep_period=${sleep_period:-0} +maxiter=${maxiter:-25} # all but 4 iterations will be polling Egress drops +log=${log:-/dev/stdout} +time_format="%D %T.%6N" +delim="END" +# options +ing_check_mc=${ing_check_mc:-1} +ing_check_macsec=${ing_check_macsec:-1} +egr_check_mc=${egr_check_mc:-1} +egr_check_pmf_hit_bits=${egr_check_pmf_hit_bits:-1} +egr_diag_counter_g=${egr_diag_counter_g:-1} + +declare -a cores=("0" "1") +declare -a asics=("0" "1") +queue_pair_mask_a=(0 0 0 0) +dsp_map_a=(0 0 0 0) + +timestamp(){ + curr_time=$(date +"$time_format") + echo "$curr_time $logmsg" >> $log +} + +print_pqp_reasons() { + disc_reasons=$((16#${disc_reasons})) + if [ $disc_reasons -eq 0 ]; then echo "none" >> $log ; fi + if [ $(($disc_reasons & 1)) -ne 0 ] ; then echo "0- Total PDs threshold violated" >> $log ; fi + if [ $(($disc_reasons & 2)) -ne 0 ] ; then echo "1- Total PDs UC pool size threshold violated" >> $log ; fi + if [ $(($disc_reasons & 4)) -ne 0 ] ; then echo "2- Per port UC PDs threshold" >> $log ; fi + if [ $(($disc_reasons & 8)) -ne 0 ] ; then echo "3- Per queue UC PDs thresholds">> $log ; fi + if [ $(($disc_reasons & 16)) -ne 0 ] ; then echo "4- Per port UC DBs threshold">> $log ; fi + if [ $(($disc_reasons & 32)) -ne 0 ] ; then echo "5- Per queue UC DBs threshold">> $log ; fi + if [ $(($disc_reasons & 64)) -ne 0 ] ; then echo "6- Per queue disable bit">> $log ; fi + if [ $(($disc_reasons & 128)) -ne 0 ] ; then echo "7- Undefined">> $log ; fi + if [ $(($disc_reasons & 256)) -ne 0 ] ; then echo "8- Total PDs MC pool size threshold">> $log ; fi + if [ $(($disc_reasons & 512)) -ne 0 ] ; then echo "9- Per interface PDs threhold">> $log; fi + if [ $(($disc_reasons & 1024)) -ne 0 ] ; then echo "10- MC SP threshold">> $log ; fi + if [ $(($disc_reasons & 2048)) -ne 0 ] ; then echo "11- per MC-TC threshold">> $log ; fi + if [ $(($disc_reasons & 4096)) -ne 0 ] ; then echo "12- MC PDs per port threshold">> $log ; fi + if [ $(($disc_reasons & 8192)) -ne 0 ] ; then echo "13- MC PDs per queue threshold">> $log ; fi + if [ $(($disc_reasons & 16384)) -ne 0 ] ; then echo "14- MC per port size (bytes) threshold">> $log ; fi + if [ $(($disc_reasons & 32768)) -ne 0 ] ; then echo "15- MC per queue size(bytes) thresholds">> $log ; fi +} +print_rqp_reasons(){ + disc_reasons=$((16#${disc_reasons})) + if [ $disc_reasons -eq 0 ]; then echo "none" >> $log ; fi + if [ $(($disc_reasons & 1)) -ne 0 ] ; then echo "0- Total DBs threshold violated" >> $log ; fi + if [ $(($disc_reasons & 2)) -ne 0 ] ; then echo "1- Total UC DBs pool size threshold violated" >> $log ; fi + if [ $(($disc_reasons & 4)) -ne 0 ] ; then echo "2- UC packet discarded in EMR because UC FIFO is full" >> $log ; fi + if [ $(($disc_reasons & 8)) -ne 0 ] ; then echo "3- MC HP packetd discarded in EMR because MC FIFO is full">> $log ; fi + if [ $(($disc_reasons & 16)) -ne 0 ] ; then echo "4- MC LP packetd discarded in EMR because MC FIFO is full">> $log ; fi + if [ $(($disc_reasons & 32)) -ne 0 ] ; then echo "5- Total MC DBs pool size threshold violated">> $log ; fi + if [ $(($disc_reasons & 64)) -ne 0 ] ; then echo "6- Packet-DP is not eligible to take from shared DBs resources">> $log ; fi + if [ $(($disc_reasons & 128)) -ne 0 ] ; then echo "7- USP DBs threshold violated">> $log ; fi + if [ $(($disc_reasons & 256)) -ne 0 ] ; then echo "8- Discrete-Partitioning method: MC-TC DBs threshold violated">> $log ; fi + if [ $(($disc_reasons & 512)) -ne 0 ] ; then echo "9- Strict-priority method: MC-TC mapped to SP0 DBs threshold violated">> $log; fi + if [ $(($disc_reasons & 1024)) -ne 0 ] ; then echo "10- Strict-Priority method: MC-TC mapped to SP1 DBs threshold violated">> $log ; fi +} + +# whenever port_disabled mask change, print the up ports +# (according to the queue-pair mask and DSP port mapping, which is what matters ) + +check_new_port_state() { + last_queue_pair_mask=${queue_pair_mask_a[$index]} + queue_pair_mask=$(bcmcmd -n $asic "g hex ECGM_CGM_QUEUE_PAIR_DISABLED.ECGM${core}" | head -n +2 | tail -1) + if [ "$queue_pair_mask" == "$last_queue_pair_mask" ] ; then + return + fi + queue_pair_mask_a[$index]=$queue_pair_mask + logmsg="EGRESS_QPAIR asic $asic core $core new disabled mask: $queue_pair_mask" + timestamp + + start_dsp=$core + let amt=255-$core + dsp_map_a[$index]=$(bcmcmd -n $asic "d SCH_DSP_2_PORT_MAP_DSPP.SCH${core} $start_dsp $amt") + + hr_num=0 + for pos in {-3..-129..-2}; do # todo + byte=${queue_pair_mask:pos:2} + if [ $hr_num -le 8 ] ; then + hr_num_hex="HR_NUM=${hr_num}" + else + hr_num_hex=$(printf "HR_NUM=0x%x" $hr_num) + fi + hr_num=$(( hr_num + 8)) + entry=$(echo ${dsp_map_a[$index]} | sed -e "s/\r/\r\n/g" | grep -m 1 "$hr_num_hex") + found=$? + if [ $found -eq 1 ] ; then + continue + fi + dsp_port=$(echo $entry |grep -o "\[.*\]"| tr -dc '[:alnum:]') + if [ "$byte" = "ff" ]; then + printf "DOWN %3d ${entry}\n" $dsp_port >> $log + else + printf "UP %3d ${entry}\n" $dsp_port >> $log + fi + done + echo >> $log +} + +decode_last_rqp_drop() { + rqp_disc=$(bcmcmd -n $asic "g hex ECGM_RQP_DISCARD_REASONS.ECGM${core}" | head -n -1 | tail -1) + prefix=${rqp_disc: 0: 2} + if [ "$prefix" != "0x" ]; then + return; # empty (0) or a failed read + fi + logmsg="EGRESS_DROP RQP_DISCARD_REASONS asic $asic core $core index $index: $rqp_disc" + timestamp + disc_reasons=${rqp_disc: -4: 3} + print_rqp_reasons +} + +decode_last_pqp_drop() { + pqp_disc=$(bcmcmd -n $asic "g hex ECGM_PQP_DISCARD_REASONS.ECGM${core}" | head -n -1 | tail -1 ) + prefix=${pqp_disc: 0: 2} + if [ "$prefix" != "0x" ]; then + return; # empty (0) or a failed read + fi + logmsg="EGRESS_DROP PQP_DISCARD_REASONS asic $asic core $core: $pqp_disc" + timestamp + check_new_port_state # in case the DSP map has changed + disc_reasons=${pqp_disc: -5: 4} + last_reason=${pqp_disc: -9: 4} + drop_cmd=${pqp_disc: -19: 10} + queue=${drop_cmd: -8: 3} + queue=$((16#${queue})) + queue=$(($queue / 4 )) + queue=$(($queue & 248)) + hr_num_hex=$(printf "%02x" $queue) + entry=$(echo ${dsp_map_a[$index]} | sed -e "s/\r/\r\n/g" | grep -m 1 "$hr_num_hex") + found=$? + dsp_port=$(echo $entry |grep -o "\[.*\]"| tr -dc '[:alnum:]') + if [ $found -eq 1 ] ; then + echo "drop_reason 0x${disc_reasons} queue 0x${hr_num_hex} dsp_port not_found" >> $log + else + dsp_port=$(echo $entry |grep -o "\[.*\]"| tr -dc '[:alnum:]') + echo "drop_reason 0x${disc_reasons} queue 0x${hr_num_hex} dsp port $dsp_port" >> $log + fi + echo "pqp discard reasons (cumulative since last read):" >> $log + print_pqp_reasons + echo "pqp last packet discard reasons:" >> $log + disc_reasons=$last_reason + print_pqp_reasons + echo >> $log +} + + +clear_tcam_hit_bits() { + cint_filename="/tmp/hitbits" + cint=';print bcm_field_entry_hit_flush(0, BCM_FIELD_ENTRY_HIT_FLUSH_ALL, 0); exit;' + bcmcmd -n $asic "log off; rm $cint_filename;log file=$cint_filename quiet=yes; echo '$cint';log off;cint $cint_filename" >> /dev/null +} + +dump_tcam_drop_action_hits() { + echo "SAI_FG_TRAP hits:" >> $log + bcmcmd -n $asic "dbal table dump Table=SAI_FG_TRAP" | grep "CORE" | awk -F'|' '{print $2,$34}' >> $log + echo "EPMF_Cascade hits:" >> $log + # entries 51,52,53,54,55,56 have drop action + bcmcmd -n $asic "dbal table dump Table=EPMF_Cascade" | grep "CORE" | awk -F'|' '{print $2,$10}'>> $log + clear_tcam_hit_bits +} + +check_egress_drops() { + hit=0 + pqp_uc_discard=$(bcmcmd -n $asic "g hex PQP_PQP_DISCARD_UNICAST_PACKET_COUNTER.PQP${core}"| head -n -1 | tail -n +2 | tr -dc '[:alnum:]') + erpp_discard=$(bcmcmd -n $asic "g hex PQP_ERPP_DISCARDED_PACKET_COUNTER.PQP${core}"| head -n -1 | tail -n +2 | tr -dc '[:alnum:]') + rqp_debug_counters=$(bcmcmd -n $asic "g RQP_PRP_DEBUG_COUNTERS.RQP${core}" | head -n -1 | tail -n +2 | sed -e 's/=/ /g'| sed -e 's/,/ /g'|tr -dc "[:alnum:] =_" ) + + pqp_uc_discard=$(printf "%d" $pqp_uc_discard) + erpp_discard=$(printf "%d" $erpp_discard) + + if [ $pqp_uc_discard -ne 0 ]; then + logmsg="EGRESS_DROP UC_DROP on ASIC $asic CORE $core : PQP_DISCARD_UNICAST_PACKET_COUNTER = $pqp_uc_discard" + timestamp + hit=1; + fi + if [ $erpp_discard -ne 0 ]; then + logmsg="EGRESS_DROP ERPP_DROP on ASIC $asic CORE $core : PQP_ERPP_DISCARDED_PACKET_COUNTER = $erpp_discard" + timestamp + hit=1; + fi + + sop_discard_uc=$(echo $rqp_debug_counters | awk {'print $4'}) + prp_discard_uc=$(echo $rqp_debug_counters | awk {'print $14'}) + dbf_err_cnt=$(echo $rqp_debug_counters | awk {'print $18'}) + + sop_discard_uc=$(printf "%d" $sop_discard_uc) + prp_discard_uc=$(printf "%d" $prp_discard_uc) + dbf_err_cnt=$(printf "%d" $dbf_err_cnt) + + if [ $sop_discard_uc -ne 0 ]; then + logmsg="EGRESS_DROP RQP_SOP_UC_DISCARD on ASIC $asic CORE $core : $sop_discard_uc" + timestamp + hit=1; + fi + if [ $prp_discard_uc -ne 0 ]; then + logmsg="EGRESS_DROP RQP_PRP_UC_DISCARD on ASIC $asic CORE $core : $prp_discard_uc" + timestamp + hit=1; + fi + if [ $dbf_err_cnt -ne 0 ]; then + logmsg="EGRESS_DROP RQP_DBF_ERR on ASIC $asic CORE $core : $dbf_err_cnt" + timestamp + hit=1; + fi + if [ $egr_check_mc -ne 0 ]; then + sop_discard_mc=$(echo $rqp_debug_counters | awk {'print $6'}) + prp_discard_mc=$(echo $rqp_debug_counters | awk {'print $16'}) + sop_discard_mc=$(printf "%d" $sop_discard_mc) + prp_discard_mc=$(printf "%d" $prp_discard_mc) + + pqp_mc_discard=$(bcmcmd -n $asic "g hex PQP_PQP_DISCARD_MULTICAST_PACKET_COUNTER.PQP${core}" | head -n -1 | tail -n +2 | tr -dc '[:alnum:]') + pqp_mc_discard=$(printf "%d" $pqp_mc_discard) + if [ $pqp_mc_discard -ne 0 ]; then + logmsg="EGRESS_DROP MC_DROP ASIC $asic CORE $core : PQP_DISCARD_MULTICAST_PACKET_COUNTER = $pqp_mc_discard" + timestamp + hit=1; + fi + if [ $sop_discard_mc -ne 0 ]; then + logmsg="EGRESS_DROP RQP_SOP_MC_DISCARD on ASIC $asic CORE $core : $sop_discard_mc" + timestamp + hit=1; + fi + if [ $prp_discard_mc -ne 0 ]; then + logmsg="EGRESS_DROP RQP_PRP_MC_DISCARD on ASIC $asic CORE $core : $prp_discard_mc" + timestamp + hit=1; + fi + fi + if [ $hit -eq 0 ] ; then + return + fi + + decode_last_pqp_drop + # bcmcmd -n $asic "g chg ECGM_RQP_DISCARD_REASONS.ECGM${core}" | grep "=" >> $log + decode_last_rqp_drop + bcmcmd -n $asic "g chg PQP_INTERRUPT_REGISTER.PQP${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "g chg RQP_INTERRUPT_REGISTER.RQP${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "s PQP_INTERRUPT_REGISTER.PQP${core} -1" > /dev/null + bcmcmd -n $asic "s RQP_INTERRUPT_REGISTER.RQP${core} -1" > /dev/null + + bcmcmd -n $asic "g chg RQP_PACKET_REASSEMBLY_INTERRUPT_REGISTER.RQP${core}"| tail -2 | head -n -1 >> $log + bcmcmd -n $asic "s RQP_PACKET_REASSEMBLY_INTERRUPT_REGISTER.RQP${core} -1" > /dev/null + + bcmcmd -n $asic "g chg FDR_INTERRUPT_REGISTER.FDR${core}"| head -n -1 | tail -n +2 >> $log + # FDA0 block is shared by both cores + bcmcmd -n $asic "g chg FDA_INTERRUPT_REGISTER.FDA0"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "s FDR_INTERRUPT_REGISTER.FDR${core} -1" > /dev/null + bcmcmd -n $asic "s FDA_INTERRUPT_REGISTER.FDA0 -1" > /dev/null + + bcmcmd -n $asic "g chg ERPP_INTERRUPT_REGISTER.ERPP${core}"| head -n -1 | tail -n +2>> $log + bcmcmd -n $asic "g chg ERPP_ERPP_DISCARD_INTERRUPT_REGISTER.ERPP${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "g chg ERPP_ERPP_DISCARD_INTERRUPT_REGISTER_2.ERPP${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "s ERPP_ERPP_DISCARD_INTERRUPT_REGISTER.ERPP${core} -1" > /dev/null + bcmcmd -n $asic "s ERPP_ERPP_DISCARD_INTERRUPT_REGISTER_2.ERPP${core} -1" > /dev/null + bcmcmd -n $asic "s ERPP_INTERRUPT_REGISTER.ERPP${core} -1" > /dev/null + + bcmcmd -n $asic "g chg ERPP_ERPP_DISCARDS_INTERRUPT_REGISTER_MASK.ERPP${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "g chg ERPP_ERPP_DISCARDS_INTERRUPT_REGISTER_MASK_2.ERPP${core}"| head -n -1 | tail -n +2 >> $log + + #bcmcmd -n $asic "g chg IPT_FLOW_CONTROL_DEBUG.IPT${core}" >> $log + bcmcmd -n $asic "tm egr con"| head -n -1 | tail -n +2 >> $log + + if [ $egr_check_pmf_hit_bits -eq 1 ]; then + dump_tcam_drop_action_hits + fi + if [ $egr_diag_counter_g -eq 1 ]; then + bcmcmd -n $asic "diag counter g nz core=${core}"| head -n -1 | tail -n +2 >> $log + fi + echo "$delim" >> $log + echo >> $log +} + +dump_ingress_traps() { + bcmcmd -n $asic "g IPPB_DBG_FLP_DATA_PATH_TRAP.IPPB${core}" | head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "s IPPB_DBG_FLP_DATA_PATH_TRAP.IPPB${core} -1"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "g IPPE_DBG_LLR_TRAP_0.IPPE${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "s IPPE_DBG_LLR_TRAP_0.IPPE${core} -1"| head -n -1 | tail -n +2 >> $log +} +dump_macsec() { + bcmcmd -n $asic "sec stat show; sec stat clear" >> $log +} + +rjct_filename=rjct_status.txt + +check_ingress_drops() { + hit=0 + bcmcmd -n $asic "getreg chg CGM_REJECT_STATUS_BITMAP.CGM${core}" | awk '{split($0,a,":"); print a[2]}' > $rjct_filename + while read -r line; do + [ -z $line ] && continue + res=$(echo $line | grep -v "," | grep "<>") + if [ -z $res ]; then + hit=1 + fi + done < "$rjct_filename" + + if [ $hit == 1 ]; then + logmsg="INGRESS_DROP asic $asic core $core" + timestamp + cat $rjct_filename >> $log + bcmcmd -n $asic "g CGM_MAX_VOQ_WORDS_QSIZE_TRACK.CGM${core}" | head -n -1 | tail -n +2 >> $log + #bcmcmd -n $asic "g chg IPT_FLOW_CONTROL_DEBUG.IPT${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "tm ing cong core=$core" >> $log + bcmcmd -n $asic "trap last info core=$core" >> $log + bcmcmd -n $asic "pp vis ppi core=$core" >> $log + bcmcmd -n $asic "pp vis fdt core=$core" >> $log + bcmcmd -n $asic "pp vis ikleap core=$core" >> $log + #bcmcmd -n $asic "pp vis last" >> $log + if [ $ing_check_mc -eq 1 ] ; then + bcmcmd -n $asic "dbal table dump table=mcdb" >> $log + bcmcmd -n $asic "g MTM_ING_MCDB_OFFSET" | head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "g MTM_EGR_MCDB_OFFSET" | head -n -1 | tail -n +2 >> $log + fi + bcmcmd -n $asic "diag counter g nz core=${core}" >> $log + echo "" >> $log + dump_ingress_traps + echo "" >> $log + if [ $ing_check_macsec -eq 1 ] ; then + dump_macsec + fi + echo "$delim" >> $log + fi +} + +# clear stats +for asic in "${asics[@]}" +do + bcmcmd -n $asic "sec stat clear; clear counter; clear interrupt all" >> /dev/null +done + +iter_a=(0 0 0 0) +while true; +do + for asic in "${asics[@]}" + do + for core in "${cores[@]}" + do + index=$(($asic*2+$core)) + iter=$((${iter_a[$index]}+1)) + if [ $iter -eq $maxiter ] ; then + iter_a[$index]=0; + sleep $sleep_period + continue + fi + iter_a[$index]=$iter + # for majority of polling cycles, check the PQP drop reason and queue + if [ $iter -gt 4 ] ; then + decode_last_pqp_drop + continue + fi + # check for any change in pqp disabled port mask + if [ $iter -eq 1 ] ; then + check_new_port_state + continue + fi + if [ $iter -eq 2 ] ; then + check_egress_drops + continue + fi + if [ $iter -eq 3 ]; then + check_ingress_drops + continue + fi + if [ $iter -eq 4 ]; then + decode_last_rqp_drop + fi + done + done +done + diff --git a/setup.py b/setup.py index 5d0dc0ea35..520530b532 100644 --- a/setup.py +++ b/setup.py @@ -124,6 +124,7 @@ 'scripts/dropstat', 'scripts/dualtor_neighbor_check.py', 'scripts/dump_nat_entries.py', + 'scripts/debug_voq_chassis_packet_drops.sh', 'scripts/ecnconfig', 'scripts/fabricstat', 'scripts/fanshow', From 2cb8cc65b6dc57d9613ce271a681743aa4fa0f3c Mon Sep 17 00:00:00 2001 From: Xinyu Lin Date: Thu, 12 Sep 2024 01:52:56 +0800 Subject: [PATCH 51/67] [sfputil] Configure the debug loopback mode only on the relevant lanes of the logical port (#3485) * [sfputil] Configure the debug loopback mode only on the relevant lanes of the logical port Signed-off-by: xinyu --- doc/Command-Reference.md | 8 ++-- sfputil/main.py | 82 ++++++++++++++++++++++++++++++++++++---- tests/sfputil_test.py | 65 +++++++++++++++++++++++++++---- 3 files changed, 135 insertions(+), 20 deletions(-) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index be0bd14fdd..7697f235f7 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -3144,19 +3144,19 @@ This command is the standard CMIS diagnostic control used for troubleshooting li - Usage: ``` - sfputil debug loopback PORT_NAME LOOPBACK_MODE + sfputil debug loopback PORT_NAME LOOPBACK_MODE - Set the loopback mode + Valid values for loopback mode host-side-input: host side input loopback mode host-side-output: host side output loopback mode media-side-input: media side input loopback mode media-side-output: media side output loopback mode - none: disable loopback mode ``` - Example: ``` - admin@sonic:~$ sfputil debug loopback Ethernet88 host-side-input + admin@sonic:~$ sfputil debug loopback Ethernet88 host-side-input enable + admin@sonic:~$ sfputil debug loopback Ethernet88 media-side-output disable ``` ## DHCP Relay diff --git a/sfputil/main.py b/sfputil/main.py index 2c8f85d016..58c6855abe 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -18,7 +18,7 @@ import sonic_platform import sonic_platform_base.sonic_sfp.sfputilhelper from sonic_platform_base.sfp_base import SfpBase -from swsscommon.swsscommon import SonicV2Connector +from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector from natsort import natsorted from sonic_py_common import device_info, logger, multi_asic from utilities_common.sfp_helper import covert_application_advertisement_to_output_string @@ -1967,11 +1967,12 @@ def debug(): # 'loopback' subcommand @debug.command() -@click.argument('port_name', required=True, default=None) -@click.argument('loopback_mode', required=True, default="none", - type=click.Choice(["none", "host-side-input", "host-side-output", +@click.argument('port_name', required=True) +@click.argument('loopback_mode', required=True, + type=click.Choice(["host-side-input", "host-side-output", "media-side-input", "media-side-output"])) -def loopback(port_name, loopback_mode): +@click.argument('enable', required=True, type=click.Choice(["enable", "disable"])) +def loopback(port_name, loopback_mode, enable): """Set module diagnostic loopback mode """ physical_port = logical_port_to_physical_port_index(port_name) @@ -1991,17 +1992,82 @@ def loopback(port_name, loopback_mode): click.echo("{}: This functionality is not implemented".format(port_name)) sys.exit(ERROR_NOT_IMPLEMENTED) + namespace = multi_asic.get_namespace_for_port(port_name) + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + if config_db is not None: + config_db.connect() + try: + subport = int(config_db.get(config_db.CONFIG_DB, f'PORT|{port_name}', 'subport')) + except TypeError: + click.echo(f"{port_name}: subport is not present in CONFIG_DB") + sys.exit(EXIT_FAIL) + + # If subport is set to 0, assign a default value of 1 to ensure valid subport configuration + if subport == 0: + subport = 1 + else: + click.echo(f"{port_name}: Failed to connect to CONFIG_DB") + sys.exit(EXIT_FAIL) + + state_db = SonicV2Connector(use_unix_socket_path=False, namespace=namespace) + if state_db is not None: + state_db.connect(state_db.STATE_DB) + try: + host_lane_count = int(state_db.get(state_db.STATE_DB, + f'TRANSCEIVER_INFO|{port_name}', + 'host_lane_count')) + except TypeError: + click.echo(f"{port_name}: host_lane_count is not present in STATE_DB") + sys.exit(EXIT_FAIL) + + try: + media_lane_count = int(state_db.get(state_db.STATE_DB, + f'TRANSCEIVER_INFO|{port_name}', + 'media_lane_count')) + except TypeError: + click.echo(f"{port_name}: media_lane_count is not present in STATE_DB") + sys.exit(EXIT_FAIL) + else: + click.echo(f"{port_name}: Failed to connect to STATE_DB") + sys.exit(EXIT_FAIL) + + if 'host-side' in loopback_mode: + lane_mask = get_subport_lane_mask(subport, host_lane_count) + elif 'media-side' in loopback_mode: + lane_mask = get_subport_lane_mask(subport, media_lane_count) + else: + lane_mask = 0 + try: - status = api.set_loopback_mode(loopback_mode) + status = api.set_loopback_mode(loopback_mode, + lane_mask=lane_mask, + enable=enable == 'enable') except AttributeError: click.echo("{}: Set loopback mode is not applicable for this module".format(port_name)) sys.exit(ERROR_NOT_IMPLEMENTED) + except TypeError: + click.echo("{}: Set loopback mode failed. Parameter is not supported".format(port_name)) + sys.exit(EXIT_FAIL) if status: - click.echo("{}: Set {} loopback".format(port_name, loopback_mode)) + click.echo("{}: {} {} loopback".format(port_name, enable, loopback_mode)) else: - click.echo("{}: Set {} loopback failed".format(port_name, loopback_mode)) + click.echo("{}: {} {} loopback failed".format(port_name, enable, loopback_mode)) sys.exit(EXIT_FAIL) + +def get_subport_lane_mask(subport, lane_count): + """Get the lane mask for the given subport and lane count + + Args: + subport (int): Subport number + lane_count (int): Lane count for the subport + + Returns: + int: Lane mask for the given subport and lane count + """ + return ((1 << lane_count) - 1) << ((subport - 1) * lane_count) + + if __name__ == '__main__': cli() diff --git a/tests/sfputil_test.py b/tests/sfputil_test.py index 0e58daa18e..d8d13df1c0 100644 --- a/tests/sfputil_test.py +++ b/tests/sfputil_test.py @@ -1631,11 +1631,16 @@ def test_load_port_config(self, mock_is_multi_asic): @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.ConfigDBConnector') + @patch('sfputil.main.SonicV2Connector') @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) - def test_debug_loopback(self, mock_chassis): + @patch('sonic_py_common.multi_asic.get_front_end_namespaces', MagicMock(return_value=[''])) + def test_debug_loopback(self, mock_sonic_v2_connector, mock_config_db_connector, mock_chassis): mock_sfp = MagicMock() mock_api = MagicMock() + mock_config_db_connector.return_value = MagicMock() + mock_sonic_v2_connector.return_value = MagicMock() mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) mock_sfp.get_presence.return_value = True mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) @@ -1643,31 +1648,75 @@ def test_debug_loopback(self, mock_chassis): runner = CliRunner() mock_sfp.get_presence.return_value = False result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], - ["Ethernet0", "host-side-input"]) + ["Ethernet0", "host-side-input", "enable"]) assert result.output == 'Ethernet0: SFP EEPROM not detected\n' mock_sfp.get_presence.return_value = True mock_sfp.get_xcvr_api = MagicMock(side_effect=NotImplementedError) result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], - ["Ethernet0", "host-side-input"]) + ["Ethernet0", "host-side-input", "enable"]) assert result.output == 'Ethernet0: This functionality is not implemented\n' assert result.exit_code == ERROR_NOT_IMPLEMENTED mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], - ["Ethernet0", "host-side-input"]) - assert result.output == 'Ethernet0: Set host-side-input loopback\n' + ["Ethernet0", "host-side-input", "enable"]) + assert result.output == 'Ethernet0: enable host-side-input loopback\n' + assert result.exit_code != ERROR_NOT_IMPLEMENTED + + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "media-side-input", "enable"]) + assert result.output == 'Ethernet0: enable media-side-input loopback\n' assert result.exit_code != ERROR_NOT_IMPLEMENTED mock_api.set_loopback_mode.return_value = False result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], - ["Ethernet0", "none"]) - assert result.output == 'Ethernet0: Set none loopback failed\n' + ["Ethernet0", "media-side-output", "enable"]) + assert result.output == 'Ethernet0: enable media-side-output loopback failed\n' assert result.exit_code == EXIT_FAIL mock_api.set_loopback_mode.return_value = True mock_api.set_loopback_mode.side_effect = AttributeError result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], - ["Ethernet0", "none"]) + ["Ethernet0", "host-side-input", "enable"]) assert result.output == 'Ethernet0: Set loopback mode is not applicable for this module\n' assert result.exit_code == ERROR_NOT_IMPLEMENTED + + mock_api.set_loopback_mode.side_effect = [TypeError, True] + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "host-side-input", "enable"]) + assert result.output == 'Ethernet0: Set loopback mode failed. Parameter is not supported\n' + assert result.exit_code == EXIT_FAIL + + mock_config_db = MagicMock() + mock_config_db.get.side_effect = TypeError + mock_config_db_connector.return_value = mock_config_db + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "media-side-input", "enable"]) + assert result.output == 'Ethernet0: subport is not present in CONFIG_DB\n' + assert result.exit_code == EXIT_FAIL + + mock_config_db_connector.return_value = None + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "media-side-input", "enable"]) + assert result.output == 'Ethernet0: Failed to connect to CONFIG_DB\n' + assert result.exit_code == EXIT_FAIL + + mock_config_db_connector.return_value = MagicMock() + mock_sonic_v2_connector.return_value = None + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "media-side-input", "enable"]) + assert result.output == 'Ethernet0: Failed to connect to STATE_DB\n' + assert result.exit_code == EXIT_FAIL + + @pytest.mark.parametrize("subport, lane_count, expected_mask", [ + (1, 1, 0x1), + (1, 4, 0xf), + (2, 1, 0x2), + (2, 4, 0xf0), + (3, 2, 0x30), + (4, 1, 0x8), + ]) + def test_get_subport_lane_mask(self, subport, lane_count, expected_mask): + assert sfputil.get_subport_lane_mask(subport, lane_count) == expected_mask From 5fc0ee6c79951f5a8a0e939fd4fe10183b02e23c Mon Sep 17 00:00:00 2001 From: Nazarii Hnydyn Date: Mon, 16 Sep 2024 02:48:13 +0300 Subject: [PATCH 52/67] [spm]: Clean up timers auto generation logic. (#3523) Config Reload Enhancements PR https://github.com/sonic-net/SONiC/pull/1203 does not completely remove TIMERs from SONiC Package Manager infra. This PR is intended to complete the original changes. `Systemd` TIMERs infra was replaced by `hostcfgd` service management. That was done to improve reliability of service management. #### What I did * Removed redundant TIMERs infra #### How I did it * Updated SPM auto generation logic #### How to verify it 1. Install application extension ```bash spm install --from-tarball ``` 2. Make sure `delayed` flag is set ```bash docker image inspect | jq '.[].Config.Labels["com.azure.sonic.manifest"]' | python -c 'import sys,ast; print(ast.literal_eval(sys.stdin.read()))' | jq .service.delayed true ``` 3. Check no TIMERs were generated --- sonic-utilities-data/templates/timer.unit.j2 | 19 ---------- .../service_creator/creator.py | 20 +---------- .../service_creator/feature.py | 3 +- tests/sonic_package_manager/conftest.py | 1 - .../test_service_creator.py | 35 ------------------- 5 files changed, 2 insertions(+), 76 deletions(-) delete mode 100644 sonic-utilities-data/templates/timer.unit.j2 diff --git a/sonic-utilities-data/templates/timer.unit.j2 b/sonic-utilities-data/templates/timer.unit.j2 deleted file mode 100644 index 09989f2c51..0000000000 --- a/sonic-utilities-data/templates/timer.unit.j2 +++ /dev/null @@ -1,19 +0,0 @@ -# -# =============== Managed by SONiC Package Manager. DO NOT EDIT! =============== -# auto-generated from {{ source }} by sonic-package-manager -# -[Unit] -Description=Delays {{ manifest.service.name }} until SONiC has started -PartOf={{ manifest.service.name }}{% if multi_instance %}@%i{% endif %}.service - -[Timer] -OnUnitActiveSec=0 sec -OnBootSec=3min 30 sec -Unit={{ manifest.service.name }}{% if multi_instance %}@%i{% endif %}.service - -[Install] -WantedBy=timers.target sonic.target sonic-delayed.target -{%- for service in manifest.service["wanted-by"] %} -WantedBy={{ service }}{% if multi_instance and service in multi_instance_services %}@%i{% endif %}.service -{%- endfor %} - diff --git a/sonic_package_manager/service_creator/creator.py b/sonic_package_manager/service_creator/creator.py index 57f8ac4624..c88e96a44a 100644 --- a/sonic_package_manager/service_creator/creator.py +++ b/sonic_package_manager/service_creator/creator.py @@ -31,7 +31,6 @@ SERVICE_FILE_TEMPLATE = 'sonic.service.j2' -TIMER_UNIT_TEMPLATE = 'timer.unit.j2' SYSTEMD_LOCATION = '/usr/lib/systemd/system' ETC_SYSTEMD_LOCATION = '/etc/systemd/system' @@ -305,7 +304,7 @@ def generate_service_mgmt(self, package: Package): log.info(f'generated {script_path}') def generate_systemd_service(self, package: Package): - """ Generates systemd service(s) file and timer(s) (if needed) for package. + """ Generates systemd service(s) file for package. Args: package: Package object to generate service for. @@ -333,23 +332,6 @@ def generate_systemd_service(self, package: Package): render_template(template, output_file, template_vars) log.info(f'generated {output_file}') - if package.manifest['service']['delayed']: - template_vars = { - 'source': get_tmpl_path(TIMER_UNIT_TEMPLATE), - 'manifest': package.manifest.unmarshal(), - 'multi_instance': False, - } - output_file = os.path.join(SYSTEMD_LOCATION, f'{name}.timer') - template = os.path.join(TEMPLATES_PATH, TIMER_UNIT_TEMPLATE) - render_template(template, output_file, template_vars) - log.info(f'generated {output_file}') - - if package.manifest['service']['asic-service']: - output_file = os.path.join(SYSTEMD_LOCATION, f'{name}@.timer') - template_vars['multi_instance'] = True - render_template(template, output_file, template_vars) - log.info(f'generated {output_file}') - def update_generated_services_conf_file(self, package: Package, remove=False): """ Updates generated_services.conf file. diff --git a/sonic_package_manager/service_creator/feature.py b/sonic_package_manager/service_creator/feature.py index 43b6c309fe..32a155206c 100644 --- a/sonic_package_manager/service_creator/feature.py +++ b/sonic_package_manager/service_creator/feature.py @@ -105,8 +105,7 @@ def update(self, old_manifest: Manifest, new_manifest: Manifest): """ Migrate feature configuration. It can be that non-configurable - feature entries have to be updated. e.g: "delayed" for example if - the new feature introduces a service timer or name of the service has + feature entries have to be updated. e.g: name of the service has changed, but user configurable entries are not changed). Args: diff --git a/tests/sonic_package_manager/conftest.py b/tests/sonic_package_manager/conftest.py index 98db887941..3d6beae9ff 100644 --- a/tests/sonic_package_manager/conftest.py +++ b/tests/sonic_package_manager/conftest.py @@ -412,7 +412,6 @@ def sonic_fs(fs): fs.create_dir(SERVICE_MGMT_SCRIPT_LOCATION) fs.create_file(GENERATED_SERVICES_CONF_FILE) fs.create_file(os.path.join(TEMPLATES_PATH, SERVICE_FILE_TEMPLATE)) - fs.create_file(os.path.join(TEMPLATES_PATH, TIMER_UNIT_TEMPLATE)) fs.create_file(os.path.join(TEMPLATES_PATH, SERVICE_MGMT_SCRIPT_TEMPLATE)) fs.create_file(os.path.join(TEMPLATES_PATH, DOCKER_CTL_SCRIPT_TEMPLATE)) fs.create_file(os.path.join(TEMPLATES_PATH, DEBUG_DUMP_SCRIPT_TEMPLATE)) diff --git a/tests/sonic_package_manager/test_service_creator.py b/tests/sonic_package_manager/test_service_creator.py index 8278a8da2b..319dcf32ff 100644 --- a/tests/sonic_package_manager/test_service_creator.py +++ b/tests/sonic_package_manager/test_service_creator.py @@ -137,20 +137,6 @@ def read_file(name): assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test@2.service.d')) -def test_service_creator_with_timer_unit(sonic_fs, manifest, service_creator): - entry = PackageEntry('test', 'azure/sonic-test') - package = Package(entry, Metadata(manifest)) - service_creator.create(package) - - assert not sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.timer')) - - manifest['service']['delayed'] = True - package = Package(entry, Metadata(manifest)) - service_creator.create(package) - - assert sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.timer')) - - def test_service_creator_with_debug_dump(sonic_fs, manifest, service_creator): entry = PackageEntry('test', 'azure/sonic-test') package = Package(entry, Metadata(manifest)) @@ -396,27 +382,6 @@ def test_feature_update(mock_sonic_db, manifest): ], any_order=True) -def test_feature_registration_with_timer(mock_sonic_db, manifest): - manifest['service']['delayed'] = True - mock_connector = Mock() - mock_connector.get_entry = Mock(return_value={}) - mock_sonic_db.get_connectors = Mock(return_value=[mock_connector]) - mock_sonic_db.get_initial_db_connector = Mock(return_value=mock_connector) - feature_registry = FeatureRegistry(mock_sonic_db) - feature_registry.register(manifest) - mock_connector.set_entry.assert_called_with('FEATURE', 'test', { - 'state': 'disabled', - 'auto_restart': 'enabled', - 'high_mem_alert': 'disabled', - 'set_owner': 'local', - 'has_per_asic_scope': 'False', - 'has_global_scope': 'True', - 'delayed': 'True', - 'check_up_status': 'False', - 'support_syslog_rate_limit': 'False', - }) - - def test_feature_registration_with_non_default_owner(mock_sonic_db, manifest): mock_connector = Mock() mock_connector.get_entry = Mock(return_value={}) From c6637553fdd1d2fe7d1318383d2e68aa5cd46849 Mon Sep 17 00:00:00 2001 From: DavidZagury <32644413+DavidZagury@users.noreply.github.com> Date: Mon, 16 Sep 2024 02:57:02 +0300 Subject: [PATCH 53/67] Move from bootctl to mokutil when checking for Secure Boot status (#3486) #### What I did Moved to use mokutil instead of bootctl as bootctl is no longer available in Bookworm. This affected reboot scripts, and upgrade scenario. #### How I did it Change calls to _bootctl status_ with _mokutil --sb-state_ #### How to verify it After fixing the scripts to check reboot: root@sn5600:/home/admin# soft-reboot SECURE_UPGRADE_ENABLED=1 [[ CHECK_SECURE_UPGRADE_ENABLED -ne 0 ]] load_kernel_secure invoke_kexec -s packet_write_wait: port 22: Broken pipe admin@sn5600:~$ show reboot-cause User issued 'soft-reboot' command [User: admin, Time: Tue Jul 23 11:06:43 PM UTC 2024] --- scripts/fast-reboot | 2 +- scripts/soft-reboot | 21 ++++++++++++++++++--- sonic_installer/bootloader/grub.py | 2 +- 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index e183c34219..09f8f444ab 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -673,7 +673,7 @@ if is_secureboot && grep -q aboot_machine= /host/machine.conf; then else # check if secure boot is enable in UEFI CHECK_SECURE_UPGRADE_ENABLED=0 - SECURE_UPGRADE_ENABLED=$(bootctl status 2>/dev/null | grep -c "Secure Boot: enabled") || CHECK_SECURE_UPGRADE_ENABLED=$? + SECURE_UPGRADE_ENABLED=$(mokutil --sb-state 2>/dev/null | grep -c "enabled") || CHECK_SECURE_UPGRADE_ENABLED=$? if [[ CHECK_SECURE_UPGRADE_ENABLED -ne 0 ]]; then debug "Loading kernel without secure boot" load_kernel diff --git a/scripts/soft-reboot b/scripts/soft-reboot index 0b9030a6f7..74d7051b1d 100755 --- a/scripts/soft-reboot +++ b/scripts/soft-reboot @@ -93,7 +93,7 @@ function clear_lingering_reboot_config() if [[ -f ${WARM_DIR}/${REDIS_FILE} ]]; then mv -f ${WARM_DIR}/${REDIS_FILE} ${WARM_DIR}/${REDIS_FILE}.${TIMESTAMP} || /bin/true fi - /sbin/kexec -u || /bin/true + /sbin/kexec -u -a || /bin/true } SCRIPT=$0 @@ -147,9 +147,17 @@ function setup_reboot_variables() fi } +function invoke_kexec() { + /sbin/kexec -l "$KERNEL_IMAGE" --initrd="$INITRD" --append="$BOOT_OPTIONS" $@ +} + function load_kernel() { # Load kernel into the memory - /sbin/kexec -l "$KERNEL_IMAGE" --initrd="$INITRD" --append="$BOOT_OPTIONS" + invoke_kexec -a +} + +function load_kernel_secure() { + invoke_kexec -s } function reboot_pre_check() @@ -215,7 +223,14 @@ stop_sonic_services clear_lingering_reboot_config -load_kernel +# check if secure boot is enabled +CHECK_SECURE_UPGRADE_ENABLED=0 +SECURE_UPGRADE_ENABLED=$(mokutil --sb-state 2>/dev/null | grep -c "enabled") || CHECK_SECURE_UPGRADE_ENABLED=$? +if [[ CHECK_SECURE_UPGRADE_ENABLED -ne 0 ]]; then + load_kernel +else + load_kernel_secure +fi # Update the reboot cause file to reflect that user issued 'reboot' command # Upon next boot, the contents of this file will be used to determine the diff --git a/sonic_installer/bootloader/grub.py b/sonic_installer/bootloader/grub.py index d76ddcc0c7..029ebf34f1 100644 --- a/sonic_installer/bootloader/grub.py +++ b/sonic_installer/bootloader/grub.py @@ -164,7 +164,7 @@ def is_secure_upgrade_image_verification_supported(self): if ! [ -n "$(ls -A /sys/firmware/efi/efivars 2>/dev/null)" ]; then mount -t efivarfs none /sys/firmware/efi/efivars 2>/dev/null fi - SECURE_UPGRADE_ENABLED=$(bootctl status 2>/dev/null | grep -c "Secure Boot: enabled") + SECURE_UPGRADE_ENABLED=$(mokutil --sb-state 2>/dev/null | grep -c "enabled") else echo "efi not supported - exiting without verification" exit 1 From ed624895f218b5c81fabc0c4655b317374e246d7 Mon Sep 17 00:00:00 2001 From: i-davydenko <41341620+i-davydenko@users.noreply.github.com> Date: Mon, 16 Sep 2024 03:57:20 +0300 Subject: [PATCH 54/67] SONIC CLI for CLI-Sessions feature (#3175) HLD: https://github.com/sonic-net/SONiC/pull/1367 | Module name | PR | state | context | | ------------- | ------------- | ----|-----| | [sonic-buildimage](https://github.com/sonic-net/sonic-buildimage) | [Dev cli sessions](https://github.com/sonic-net/sonic-buildimage/pull/17623) | ![GitHub issue/pull request detail](https://img.shields.io/github/pulls/detail/state/sonic-net/sonic-buildimage/17623) | ![GitHub pull request check contexts](https://img.shields.io/github/status/contexts/pulls/sonic-net/sonic-buildimage/17623) | | [sonic-host-services](https://github.com/sonic-net/sonic-host-services) | [cli-sessions](https://github.com/sonic-net/sonic-host-services/pull/99) | ![GitHub issue/pull request detail](https://img.shields.io/github/pulls/detail/state/sonic-net/sonic-host-services/99) | ![GitHub pull request check contexts](https://img.shields.io/github/status/contexts/pulls/sonic-net/sonic-host-services/99) | | [sonic-utilities](https://github.com/sonic-net/sonic-utilities) | [SONIC CLI for CLI-Sessions feature #3175](https://github.com/sonic-net/sonic-utilities/pull/3175) | ![GitHub issue/pull request detail](https://img.shields.io/github/pulls/detail/state/sonic-net/sonic-utilities/3175) | ![GitHub pull request check contexts](https://img.shields.io/github/status/contexts/pulls/sonic-net/sonic-utilities/3175) | #### What I did Implement next commands for CLI-sessions feature: - config serial-console inactivity-timeout - config serial-console sysrq-capabilities - show serial-console - config ssh max-sessions - config ssh inactivity-timeout - show ssh #### How I did it Write handlers in config/main.py for serial-console and ssh commands to cover configuration set; Write handlers in show/main.py for serial-console and ssh to cover show commands. #### How to verify it Manual tests --- config/main.py | 66 ++++++++++++++++++++++++++++++++++++++ show/main.py | 40 +++++++++++++++++++++++ tests/cli_sessions_test.py | 32 ++++++++++++++++++ 3 files changed, 138 insertions(+) create mode 100644 tests/cli_sessions_test.py diff --git a/config/main.py b/config/main.py index f4ea93e53f..bfa6dccadc 100644 --- a/config/main.py +++ b/config/main.py @@ -7987,5 +7987,71 @@ def notice(db, category_list, max_events, namespace): handle_asic_sdk_health_suppress(db, 'notice', category_list, max_events, namespace) +# +# 'serial_console' group ('config serial_console') +# +@config.group(cls=clicommon.AbbreviationGroup, name='serial_console') +def serial_console(): + """Configuring system serial-console behavior""" + pass + + +@serial_console.command('sysrq-capabilities') +@click.argument('sysrq_capabilities', metavar='', required=True, + type=click.Choice(['enabled', 'disabled'])) +def sysrq_capabilities(sysrq_capabilities): + """Set serial console sysrq-capabilities state""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry("SERIAL_CONSOLE", 'POLICIES', + {'sysrq_capabilities': sysrq_capabilities}) + + +@serial_console.command('inactivity-timeout') +@click.argument('inactivity_timeout', metavar='', required=True, + type=click.IntRange(0, 35000)) +def inactivity_timeout_serial(inactivity_timeout): + """Set serial console inactivity timeout""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry("SERIAL_CONSOLE", 'POLICIES', + {'inactivity_timeout': inactivity_timeout}) + + +# +# 'ssh' group ('config ssh') +# +@config.group(cls=clicommon.AbbreviationGroup, name='ssh') +def ssh(): + """Configuring system ssh behavior""" + pass + + +@ssh.command('inactivity-timeout') +@click.argument('inactivity_timeout', metavar='', required=True, + type=click.IntRange(0, 35000)) +def inactivity_timeout_ssh(inactivity_timeout): + """Set ssh inactivity timeout""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry("SSH_SERVER", 'POLICIES', + {'inactivity_timeout': inactivity_timeout}) + + +@ssh.command('max-sessions') +@click.argument('max-sessions', metavar='', required=True, + type=click.IntRange(0, 100)) +def max_sessions(max_sessions): + """Set max number of concurrent logins""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry("SSH_SERVER", 'POLICIES', + {'max_sessions': max_sessions}) + + if __name__ == '__main__': config() diff --git a/show/main.py b/show/main.py index 25202e1e42..5257b975db 100755 --- a/show/main.py +++ b/show/main.py @@ -2433,6 +2433,46 @@ def received(db, namespace): ctx.fail("ASIC/SDK health event is not supported on the platform") +# +# 'serial_console' command group ("show serial_console ...") +# +@cli.group('serial_console', invoke_without_command=True) +@clicommon.pass_db +def serial_console(db): + """Show serial_console configuration""" + + serial_console_table = db.cfgdb.get_entry('SERIAL_CONSOLE', 'POLICIES') + + hdrs = ['inactivity-timeout', 'sysrq-capabilities'] + data = [] + + data.append(serial_console_table.get('inactivity_timeout', '900 ')) + data.append(serial_console_table.get('sysrq_capabilities', 'disabled ')) + + configuration = [data] + click.echo(tabulate(configuration, headers=hdrs, tablefmt='simple', missingval='')) + + +# +# 'ssh' command group ("show ssh ...") +# +@cli.group('ssh', invoke_without_command=True) +@clicommon.pass_db +def ssh(db): + """Show ssh configuration""" + + serial_console_table = db.cfgdb.get_entry('SSH_SERVER', 'POLICIES') + + hdrs = ['inactivity-timeout', 'max-sessions'] + data = [] + + data.append(serial_console_table.get('inactivity_timeout', '900 ')) + data.append(serial_console_table.get('max_session', '0 ')) + + configuration = [data] + click.echo(tabulate(configuration, headers=hdrs, tablefmt='simple', missingval='')) + + # Load plugins and register them helper = util_base.UtilHelper() helper.load_and_register_plugins(plugins, cli) diff --git a/tests/cli_sessions_test.py b/tests/cli_sessions_test.py new file mode 100644 index 0000000000..755b232708 --- /dev/null +++ b/tests/cli_sessions_test.py @@ -0,0 +1,32 @@ +from click.testing import CliRunner + +import config.main as config +import show.main as show +from utilities_common.db import Db + + +class TestCliSessionsCommands: + def test_config_command(self): + runner = CliRunner() + + db = Db() + + result = runner.invoke(config.config.commands['serial_console'].commands['sysrq-capabilities'], + ['enabled'], obj=db) + assert result.exit_code == 0 + + result = runner.invoke(config.config.commands['serial_console'].commands['inactivity-timeout'], + ['180'], obj=db) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands['serial_console'], obj=db) + assert result.exit_code == 0 + + result = runner.invoke(config.config.commands['ssh'].commands['inactivity-timeout'], ['190'], obj=db) + assert result.exit_code == 0 + + result = runner.invoke(config.config.commands['ssh'].commands['max-sessions'], ['60'], obj=db) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands['ssh'], obj=db) + assert result.exit_code == 0 From b4d27c4bbf528f6cc026d37254d820284192951d Mon Sep 17 00:00:00 2001 From: bktsim <144830673+bktsim-arista@users.noreply.github.com> Date: Tue, 17 Sep 2024 09:31:30 -0700 Subject: [PATCH 55/67] Fix multi-asic behaviour for watermarkstat (#3060) * Adds multi-asic support to watermarkstat, fixing watermark/persistent-watermark related commands. Previously, the following commands were not behaving correctly on multi-asic devices, as the '-n' namespace option was not available, and correct namespaces were not traversed on multi-asic devices. * show buffer_pool watermark/persistent-watermark * show headroom-pool watermark/persistent-watermark * show priority-group persistent-watermark/watermark * show queue persistent-watermark/watermark This change fixes multi-asic behaviour of CLI commands that rely on watermarkstat, as listed above. --- clear/main.py | 158 +++++- scripts/watermarkstat | 132 ++--- show/main.py | 173 ++++++- tests/mock_tables/asic0/counters_db.json | 313 +++++++++++- tests/mock_tables/asic1/counters_db.json | 413 ++++++++++++++++ tests/mock_tables/dbconnector.py | 28 +- tests/multi_asic_pgdropstat_test.py | 16 +- tests/multi_asic_queue_counter_test.py | 32 +- tests/multi_asic_watermarkstat_test.py | 145 ++++++ tests/watermarkstat_test.py | 10 +- tests/wm_input/wm_test_vectors.py | 602 +++++++++++++++++++++-- 11 files changed, 1853 insertions(+), 169 deletions(-) create mode 100644 tests/multi_asic_watermarkstat_test.py diff --git a/clear/main.py b/clear/main.py index 5ffcd2dba4..38dca2737f 100755 --- a/clear/main.py +++ b/clear/main.py @@ -229,16 +229,38 @@ def watermark(): if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") + +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) @watermark.command('headroom') -def clear_wm_pg_headroom(): +def clear_wm_pg_headroom(namespace): """Clear user headroom WM for pg""" command = ['watermarkstat', '-c', '-t', 'pg_headroom'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @watermark.command('shared') -def clear_wm_pg_shared(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_wm_pg_shared(namespace): """Clear user shared WM for pg""" command = ['watermarkstat', '-c', '-t', 'pg_shared'] + if namespace: + command += ['-n', str(namespace)] run_command(command) @priority_group.group() @@ -261,16 +283,38 @@ def persistent_watermark(): if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") + @persistent_watermark.command('headroom') -def clear_pwm_pg_headroom(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_pwm_pg_headroom(namespace): """Clear persistent headroom WM for pg""" command = ['watermarkstat', '-c', '-p', '-t', 'pg_headroom'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @persistent_watermark.command('shared') -def clear_pwm_pg_shared(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_pwm_pg_shared(namespace): """Clear persistent shared WM for pg""" command = ['watermarkstat', '-c', '-p', '-t', 'pg_shared'] + if namespace: + command += ['-n', str(namespace)] run_command(command) @@ -285,69 +329,159 @@ def watermark(): if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") + @watermark.command('unicast') -def clear_wm_q_uni(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_wm_q_uni(namespace): """Clear user WM for unicast queues""" command = ['watermarkstat', '-c', '-t', 'q_shared_uni'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @watermark.command('multicast') -def clear_wm_q_multi(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_wm_q_multi(namespace): """Clear user WM for multicast queues""" command = ['watermarkstat', '-c', '-t', 'q_shared_multi'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @watermark.command('all') -def clear_wm_q_all(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_wm_q_all(namespace): """Clear user WM for all queues""" command = ['watermarkstat', '-c', '-t', 'q_shared_all'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @queue.group(name='persistent-watermark') def persistent_watermark(): """Clear queue persistent WM. One does not simply clear WM, root is required""" if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") + @persistent_watermark.command('unicast') -def clear_pwm_q_uni(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_pwm_q_uni(namespace): """Clear persistent WM for persistent queues""" command = ['watermarkstat', '-c', '-p', '-t', 'q_shared_uni'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @persistent_watermark.command('multicast') -def clear_pwm_q_multi(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_pwm_q_multi(namespace): """Clear persistent WM for multicast queues""" command = ['watermarkstat', '-c', '-p', '-t', 'q_shared_multi'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @persistent_watermark.command('all') -def clear_pwm_q_all(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_pwm_q_all(namespace): """Clear persistent WM for all queues""" command = ['watermarkstat', '-c', '-p', '-t', 'q_shared_all'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @cli.group(name='headroom-pool') def headroom_pool(): """Clear headroom pool WM""" pass + @headroom_pool.command('watermark') -def watermark(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def watermark(namespace): """Clear headroom pool user WM. One does not simply clear WM, root is required""" if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") command = ['watermarkstat', '-c', '-t', 'headroom_pool'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @headroom_pool.command('persistent-watermark') -def persistent_watermark(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def persistent_watermark(namespace): """Clear headroom pool persistent WM. One does not simply clear WM, root is required""" if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") command = ['watermarkstat', '-c', '-p', '-t', 'headroom_pool'] + if namespace: + command += ['-n', str(namespace)] run_command(command) # diff --git a/scripts/watermarkstat b/scripts/watermarkstat index 99a46d5484..70ea853bc4 100755 --- a/scripts/watermarkstat +++ b/scripts/watermarkstat @@ -5,14 +5,15 @@ # watermarkstat is a tool for displaying watermarks. # ##################################################################### - -import argparse +import click import json import os import sys from natsort import natsorted from tabulate import tabulate +from sonic_py_common import multi_asic +import utilities_common.multi_asic as multi_asic_util # mock the redis for unit test purposes # try: @@ -23,6 +24,10 @@ try: sys.path.insert(0, tests_path) from mock_tables import dbconnector + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import tests.mock_tables.mock_multi_asic + dbconnector.load_namespace_config() + if os.environ["WATERMARKSTAT_UNIT_TESTING"] == "1": input_path = os.path.join(tests_path, "wm_input") mock_db_path = os.path.join(input_path, "mock_db") @@ -66,18 +71,33 @@ COUNTERS_PG_INDEX_MAP = "COUNTERS_PG_INDEX_MAP" COUNTERS_BUFFER_POOL_NAME_MAP = "COUNTERS_BUFFER_POOL_NAME_MAP" -class Watermarkstat(object): +class WatermarkstatWrapper(object): + """A wrapper to execute Watermarkstat over the correct namespaces""" + def __init__(self, namespace): + self.namespace = namespace - def __init__(self): - self.counters_db = SonicV2Connector(use_unix_socket_path=False) - self.counters_db.connect(self.counters_db.COUNTERS_DB) + # Initialize the multi_asic object + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.db = None + + @multi_asic_util.run_on_multi_asic + def run(self, clear, persistent, wm_type): + watermarkstat = Watermarkstat(self.db, self.multi_asic.current_namespace) + if clear: + watermarkstat.send_clear_notification(("PERSISTENT" if persistent else "USER", wm_type.upper())) + else: + table_prefix = PERSISTENT_TABLE_PREFIX if persistent else USER_TABLE_PREFIX + watermarkstat.print_all_stat(table_prefix, wm_type) - # connect APP DB for clear notifications - self.app_db = SonicV2Connector(use_unix_socket_path=False) - self.app_db.connect(self.counters_db.APPL_DB) + +class Watermarkstat(object): + + def __init__(self, db, namespace): + self.namespace = namespace + self.db = db def get_queue_type(table_id): - queue_type = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_QUEUE_TYPE_MAP, table_id) + queue_type = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_TYPE_MAP, table_id) if queue_type is None: print("Queue Type is not available in table '{}'".format(table_id), file=sys.stderr) sys.exit(1) @@ -92,7 +112,7 @@ class Watermarkstat(object): sys.exit(1) def get_queue_port(table_id): - port_table_id = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_QUEUE_PORT_MAP, table_id) + port_table_id = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_PORT_MAP, table_id) if port_table_id is None: print("Port is not available in table '{}'".format(table_id), file=sys.stderr) sys.exit(1) @@ -100,7 +120,7 @@ class Watermarkstat(object): return port_table_id def get_pg_port(table_id): - port_table_id = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_PG_PORT_MAP, table_id) + port_table_id = self.db.get(self.db.COUNTERS_DB, COUNTERS_PG_PORT_MAP, table_id) if port_table_id is None: print("Port is not available in table '{}'".format(table_id), file=sys.stderr) sys.exit(1) @@ -108,7 +128,7 @@ class Watermarkstat(object): return port_table_id # Get all ports - self.counter_port_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) + self.counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) if self.counter_port_name_map is None: print("COUNTERS_PORT_NAME_MAP is empty!", file=sys.stderr) sys.exit(1) @@ -127,7 +147,7 @@ class Watermarkstat(object): self.port_name_map[self.counter_port_name_map[port]] = port # Get Queues for each port - counter_queue_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_QUEUE_NAME_MAP) + counter_queue_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_QUEUE_NAME_MAP) if counter_queue_name_map is None: print("COUNTERS_QUEUE_NAME_MAP is empty!", file=sys.stderr) sys.exit(1) @@ -144,7 +164,7 @@ class Watermarkstat(object): self.port_all_queues_map[port][queue] = counter_queue_name_map[queue] # Get PGs for each port - counter_pg_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_PG_NAME_MAP) + counter_pg_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PG_NAME_MAP) if counter_pg_name_map is None: print("COUNTERS_PG_NAME_MAP is empty!", file=sys.stderr) sys.exit(1) @@ -154,7 +174,7 @@ class Watermarkstat(object): self.port_pg_map[port][pg] = counter_pg_name_map[pg] # Get all buffer pools - self.buffer_pool_name_to_oid_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_BUFFER_POOL_NAME_MAP) + self.buffer_pool_name_to_oid_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_BUFFER_POOL_NAME_MAP) if self.buffer_pool_name_to_oid_map is None: print("COUNTERS_BUFFER_POOL_NAME_MAP is empty!", file=sys.stderr) sys.exit(1) @@ -194,7 +214,7 @@ class Watermarkstat(object): } def get_queue_index(self, table_id): - queue_index = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_QUEUE_INDEX_MAP, table_id) + queue_index = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_INDEX_MAP, table_id) if queue_index is None: print("Queue index is not available in table '{}'".format(table_id), file=sys.stderr) sys.exit(1) @@ -202,7 +222,7 @@ class Watermarkstat(object): return queue_index def get_pg_index(self, table_id): - pg_index = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_PG_INDEX_MAP, table_id) + pg_index = self.db.get(self.db.COUNTERS_DB, COUNTERS_PG_INDEX_MAP, table_id) if pg_index is None: print("Priority group index is not available in table '{}'".format(table_id), file=sys.stderr) sys.exit(1) @@ -256,7 +276,7 @@ class Watermarkstat(object): full_table_id = table_prefix + obj_id idx = int(idx_func(obj_id)) pos = self.header_idx_to_pos[idx] - counter_data = self.counters_db.get(self.counters_db.COUNTERS_DB, full_table_id, watermark) + counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, watermark) if counter_data is None or counter_data == '': fields[pos] = STATUS_NA elif fields[pos] != STATUS_NA: @@ -274,7 +294,7 @@ class Watermarkstat(object): continue db_key = table_prefix + bp_oid - data = self.counters_db.get(self.counters_db.COUNTERS_DB, db_key, type["wm_name"]) + data = self.db.get(self.db.COUNTERS_DB, db_key, type["wm_name"]) if data is None: data = STATUS_NA table.append((buf_pool, data)) @@ -283,58 +303,52 @@ class Watermarkstat(object): # Get stat for each port for port in natsorted(self.counter_port_name_map): row_data = list() + data = self.get_counters(table_prefix, type["obj_map"][port], type["idx_func"], type["wm_name"]) row_data.append(port) row_data.extend(data) table.append(tuple(row_data)) - print(type["message"]) + namespace_str = f" (Namespace {self.namespace})" if multi_asic.is_multi_asic() else '' + print(type["message"] + namespace_str) print(tabulate(table, self.header_list, tablefmt='simple', stralign='right')) def send_clear_notification(self, data): msg = json.dumps(data, separators=(',', ':')) - self.app_db.publish('APPL_DB', 'WATERMARK_CLEAR_REQUEST', msg) + self.db.publish('APPL_DB', 'WATERMARK_CLEAR_REQUEST', msg) return - -def main(): - - parser = argparse.ArgumentParser(description='Display the watermark counters', - formatter_class=argparse.RawTextHelpFormatter, - epilog=""" -Examples: - watermarkstat -t pg_headroom - watermarkstat -t pg_shared - watermarkstat -t q_shared_all - watermarkstat -p -t q_shared_all - watermarkstat -t q_shared_all -c - watermarkstat -t q_shared_uni -c - watermarkstat -t q_shared_multi -c - watermarkstat -p -t pg_shared - watermarkstat -p -t q_shared_multi -c - watermarkstat -t buffer_pool - watermarkstat -t buffer_pool -c - watermarkstat -p -t buffer_pool -c -""") - - parser.add_argument('-c', '--clear', action='store_true', help='Clear watermarks request') - parser.add_argument('-p', '--persistent', action='store_true', help='Do the operations on the persistent watermark') - parser.add_argument('-t', '--type', required=True, action='store', - choices=['pg_headroom', 'pg_shared', 'q_shared_uni', 'q_shared_multi', 'buffer_pool', 'headroom_pool', 'q_shared_all'], - help='The type of watermark') - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - args = parser.parse_args() - watermarkstat = Watermarkstat() - - if args.clear: - watermarkstat.send_clear_notification(("PERSISTENT" if args.persistent else "USER", args.type.upper())) - sys.exit(0) - - table_prefix = PERSISTENT_TABLE_PREFIX if args.persistent else USER_TABLE_PREFIX - watermarkstat.print_all_stat(table_prefix, args.type) +@click.command() +@click.option('-c', '--clear', is_flag=True, help='Clear watermarks request') +@click.option('-p', '--persistent', is_flag=True, help='Do the operations on the persistent watermark') +@click.option('-t', '--type', 'wm_type', type=click.Choice(['pg_headroom', 'pg_shared', 'q_shared_uni', 'q_shared_multi', 'buffer_pool', 'headroom_pool', 'q_shared_all']), help='The type of watermark', required=True) +@click.option('-n', '--namespace', type=click.Choice(multi_asic.get_namespace_list()), help='Namespace name or skip for all', default=None) +@click.version_option(version='1.0') +def main(clear, persistent, wm_type, namespace): + """ + Display the watermark counters + + Examples: + watermarkstat -t pg_headroom + watermarkstat -t pg_shared + watermarkstat -t q_shared_all + watermarkstat -p -t q_shared_all + watermarkstat -t q_shared_all -c + watermarkstat -t q_shared_uni -c + wwatermarkstat -t q_shared_multi -c + watermarkstat -p -t pg_shared + watermarkstat -p -t q_shared_multi -c + watermarkstat -t buffer_pool + watermarkstat -t buffer_pool -c + watermarkstat -p -t buffer_pool -c + watermarkstat -t pg_headroom -n asic0 + watermarkstat -p -t buffer_pool -c -n asic1 + """ + + namespace_context = WatermarkstatWrapper(namespace) + namespace_context.run(clear, persistent, wm_type) sys.exit(0) - if __name__ == "__main__": main() diff --git a/show/main.py b/show/main.py index 5257b975db..b7e75b24cf 100755 --- a/show/main.py +++ b/show/main.py @@ -783,23 +783,53 @@ def watermark(): # 'unicast' subcommand ("show queue watermarks unicast") @watermark.command('unicast') -def wm_q_uni(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_q_uni(namespace): """Show user WM for unicast queues""" command = ['watermarkstat', '-t', 'q_shared_uni'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # 'multicast' subcommand ("show queue watermarks multicast") @watermark.command('multicast') -def wm_q_multi(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_q_multi(namespace): """Show user WM for multicast queues""" command = ['watermarkstat', '-t', 'q_shared_multi'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # 'all' subcommand ("show queue watermarks all") @watermark.command('all') -def wm_q_all(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_q_all(namespace): """Show user WM for all queues""" command = ['watermarkstat', '-t', 'q_shared_all'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # @@ -813,23 +843,53 @@ def persistent_watermark(): # 'unicast' subcommand ("show queue persistent-watermarks unicast") @persistent_watermark.command('unicast') -def pwm_q_uni(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_q_uni(namespace): """Show persistent WM for unicast queues""" command = ['watermarkstat', '-p', '-t', 'q_shared_uni'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # 'multicast' subcommand ("show queue persistent-watermarks multicast") @persistent_watermark.command('multicast') -def pwm_q_multi(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_q_multi(namespace): """Show persistent WM for multicast queues""" command = ['watermarkstat', '-p', '-t', 'q_shared_multi'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # 'all' subcommand ("show queue persistent-watermarks all") @persistent_watermark.command('all') -def pwm_q_all(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_q_all(namespace): """Show persistent WM for all queues""" command = ['watermarkstat', '-p', '-t', 'q_shared_all'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # @@ -846,15 +906,35 @@ def watermark(): pass @watermark.command('headroom') -def wm_pg_headroom(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_pg_headroom(namespace): """Show user headroom WM for pg""" command = ['watermarkstat', '-t', 'pg_headroom'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @watermark.command('shared') -def wm_pg_shared(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_pg_shared(namespace): """Show user shared WM for pg""" command = ['watermarkstat', '-t', 'pg_shared'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @priority_group.group() @@ -877,15 +957,36 @@ def persistent_watermark(): pass @persistent_watermark.command('headroom') -def pwm_pg_headroom(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_pg_headroom(namespace): """Show persistent headroom WM for pg""" command = ['watermarkstat', '-p', '-t', 'pg_headroom'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) + @persistent_watermark.command('shared') -def pwm_pg_shared(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_pg_shared(namespace): """Show persistent shared WM for pg""" command = ['watermarkstat', '-p', '-t', 'pg_shared'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @@ -898,15 +999,36 @@ def buffer_pool(): """Show details of the buffer pools""" @buffer_pool.command('watermark') -def wm_buffer_pool(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_buffer_pool(namespace): """Show user WM for buffer pools""" - command = ['watermarkstat', '-t' ,'buffer_pool'] + command = ['watermarkstat', '-t', 'buffer_pool'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) + @buffer_pool.command('persistent-watermark') -def pwm_buffer_pool(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_buffer_pool(namespace): """Show persistent WM for buffer pools""" command = ['watermarkstat', '-p', '-t', 'buffer_pool'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @@ -919,15 +1041,36 @@ def headroom_pool(): """Show details of headroom pool""" @headroom_pool.command('watermark') -def wm_headroom_pool(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_headroom_pool(namespace): """Show user WM for headroom pool""" command = ['watermarkstat', '-t', 'headroom_pool'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) + @headroom_pool.command('persistent-watermark') -def pwm_headroom_pool(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_headroom_pool(namespace): """Show persistent WM for headroom pool""" command = ['watermarkstat', '-p', '-t', 'headroom_pool'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) diff --git a/tests/mock_tables/asic0/counters_db.json b/tests/mock_tables/asic0/counters_db.json index 53e3b558a2..610662a019 100644 --- a/tests/mock_tables/asic0/counters_db.json +++ b/tests/mock_tables/asic0/counters_db.json @@ -2202,14 +2202,14 @@ "oid:0x1000000004005": "SAI_QUEUE_TYPE_UNICAST", "oid:0x1000000004006": "SAI_QUEUE_TYPE_UNICAST", "oid:0x1000000004007": "SAI_QUEUE_TYPE_UNICAST", - "oid:0x1000000004008": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004009": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004010": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004011": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004012": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004013": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004014": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004015": "SAI_QUEUE_TYPE_MULTICAST" + "oid:0x1000000004008": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004009": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004010": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004011": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004012": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004013": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004014": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004015": "SAI_QUEUE_TYPE_ALL" }, "COUNTERS_FABRIC_PORT_NAME_MAP" : { "PORT0": "oid:0x1000000000143", @@ -2489,5 +2489,302 @@ "COUNTERS:oid:0x1600000000034d":{ "SAI_COUNTER_STAT_PACKETS": 200, "SAI_COUNTER_STAT_BYTES": 4000 + }, + "COUNTERS_BUFFER_POOL_NAME_MAP": { + "ingress_lossless_pool": "oid:0x18000000000c10" + }, + "COUNTERS_PG_NAME_MAP": { + "Enternet0:0": "oid:100000000b0f0", + "Enternet0:1": "oid:100000000b0f1", + "Enternet0:2": "oid:100000000b0f2", + "Enternet0:3": "oid:100000000b0f3", + "Enternet0:4": "oid:100000000b0f4", + "Enternet0:5": "oid:100000000b0f5", + "Enternet0:6": "oid:100000000b0f6", + "Enternet0:7": "oid:100000000b0f7", + "Enternet0:8": "oid:100000000b0f8", + "Enternet0:9": "oid:100000000b0f9", + "Enternet0:10": "oid:100000000b0fa", + "Enternet0:11": "oid:100000000b0fb", + "Enternet0:12": "oid:100000000b0fc", + "Enternet0:13": "oid:100000000b0fd", + "Enternet0:14": "oid:100000000b0fe", + "Enternet0:15": "oid:100000000b0ff", + "Enternet4:0": "oid:0x100000000b1f0", + "Enternet4:1": "oid:0x100000000b1f1", + "Enternet4:2": "oid:0x100000000b1f2", + "Enternet4:3": "oid:0x100000000b1f3", + "Enternet4:4": "oid:0x100000000b1f4", + "Enternet4:5": "oid:0x100000000b1f5", + "Enternet4:6": "oid:0x100000000b1f6", + "Enternet4:7": "oid:0x100000000b1f7", + "Enternet4:8": "oid:0x100000000b1f8", + "Enternet4:9": "oid:0x100000000b1f9", + "Enternet4:10": "oid:0x100000000b1fa", + "Enternet4:11": "oid:0x100000000b1fb", + "Enternet4:12": "oid:0x100000000b1fc", + "Enternet4:13": "oid:0x100000000b1fd", + "Enternet4:14": "oid:0x100000000b1fe", + "Enternet4:15": "oid:0x100000000b1ff" + }, + "COUNTERS_PG_PORT_MAP": { + "oid:100000000b0f0": "oid:0x1000000000002", + "oid:100000000b0f1": "oid:0x1000000000002", + "oid:100000000b0f2": "oid:0x1000000000002", + "oid:100000000b0f3": "oid:0x1000000000002", + "oid:100000000b0f4": "oid:0x1000000000002", + "oid:100000000b0f5": "oid:0x1000000000002", + "oid:100000000b0f6": "oid:0x1000000000002", + "oid:100000000b0f7": "oid:0x1000000000002", + "oid:100000000b0f8": "oid:0x1000000000002", + "oid:100000000b0f9": "oid:0x1000000000002", + "oid:100000000b0fa": "oid:0x1000000000002", + "oid:100000000b0fb": "oid:0x1000000000002", + "oid:100000000b0fc": "oid:0x1000000000002", + "oid:100000000b0fd": "oid:0x1000000000002", + "oid:100000000b0fe": "oid:0x1000000000002", + "oid:100000000b0ff": "oid:0x1000000000002", + "oid:0x100000000b1f0": "oid:0x1000000000004", + "oid:0x100000000b1f1": "oid:0x1000000000004", + "oid:0x100000000b1f2": "oid:0x1000000000004", + "oid:0x100000000b1f3": "oid:0x1000000000004", + "oid:0x100000000b1f4": "oid:0x1000000000004", + "oid:0x100000000b1f5": "oid:0x1000000000004", + "oid:0x100000000b1f6": "oid:0x1000000000004", + "oid:0x100000000b1f7": "oid:0x1000000000004", + "oid:0x100000000b1f8": "oid:0x1000000000004", + "oid:0x100000000b1f9": "oid:0x1000000000004", + "oid:0x100000000b1fa": "oid:0x1000000000004", + "oid:0x100000000b1fb": "oid:0x1000000000004", + "oid:0x100000000b1fc": "oid:0x1000000000004", + "oid:0x100000000b1fd": "oid:0x1000000000004", + "oid:0x100000000b1fe": "oid:0x1000000000004", + "oid:0x100000000b1ff" : "oid:0x1000000000004" + }, + "COUNTERS_PG_INDEX_MAP": { + "oid:100000000b0f0": "0", + "oid:100000000b0f1": "1", + "oid:100000000b0f2": "2", + "oid:100000000b0f3": "3", + "oid:100000000b0f4": "4", + "oid:100000000b0f5": "5", + "oid:100000000b0f6": "6", + "oid:100000000b0f7": "7", + "oid:100000000b0f8": "8", + "oid:100000000b0f9": "9", + "oid:100000000b0fa": "10", + "oid:100000000b0fb": "11", + "oid:100000000b0fc": "12", + "oid:100000000b0fd": "13", + "oid:100000000b0fe": "14", + "oid:100000000b0ff": "15", + "oid:0x100000000b1f0": "0", + "oid:0x100000000b1f1": "1", + "oid:0x100000000b1f2": "2", + "oid:0x100000000b1f3": "3", + "oid:0x100000000b1f4": "4", + "oid:0x100000000b1f5": "5", + "oid:0x100000000b1f6": "6", + "oid:0x100000000b1f7": "7", + "oid:0x100000000b1f8": "8", + "oid:0x100000000b1f9": "9", + "oid:0x100000000b1fa": "10", + "oid:0x100000000b1fb": "11", + "oid:0x100000000b1fc": "12", + "oid:0x100000000b1fd": "13", + "oid:0x100000000b1fe": "14", + "oid:0x100000000b1ff" : "15" + }, + "USER_WATERMARKS:oid:100000000b0f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 100, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 100 + }, + "USER_WATERMARKS:oid:100000000b0f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 101, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 101 + }, + "USER_WATERMARKS:oid:100000000b0f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 102, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 102 + }, + "USER_WATERMARKS:oid:100000000b0f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 103, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 103 + }, + "USER_WATERMARKS:oid:100000000b0f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 104, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 104 + }, + "USER_WATERMARKS:oid:100000000b0f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 105, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 105 + }, + "USER_WATERMARKS:oid:100000000b0f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 106, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 106 + }, + "USER_WATERMARKS:oid:100000000b0f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 107, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 107 + }, + "USER_WATERMARKS:oid:100000000b0f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 108, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 108 + }, + "USER_WATERMARKS:oid:100000000b0f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 109, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 109 + }, + "USER_WATERMARKS:oid:100000000b0fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 110, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 110 + }, + "USER_WATERMARKS:oid:100000000b0fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 111, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 111 + }, + "USER_WATERMARKS:oid:100000000b0fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 112, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 112 + }, + "USER_WATERMARKS:oid:100000000b0fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 113, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 113 + }, + "USER_WATERMARKS:oid:100000000b0fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 114, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 114 + }, + "USER_WATERMARKS:oid:100000000b0ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 115, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 115 + }, + "USER_WATERMARKS:oid:0x100000000b1f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 200, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 200 + }, + "USER_WATERMARKS:oid:0x100000000b1f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 201, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 201 + }, + "USER_WATERMARKS:oid:0x100000000b1f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 202, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 202 + }, + "USER_WATERMARKS:oid:0x100000000b1f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 203, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 203 + }, + "USER_WATERMARKS:oid:0x100000000b1f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 204, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 204 + }, + "USER_WATERMARKS:oid:0x100000000b1f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 205, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 205 + }, + "USER_WATERMARKS:oid:0x100000000b1f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 206, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 206 + }, + "USER_WATERMARKS:oid:0x100000000b1f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 207, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 207 + }, + "USER_WATERMARKS:oid:0x100000000b1f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 208, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 208 + }, + "USER_WATERMARKS:oid:0x100000000b1f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 209, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 209 + }, + "USER_WATERMARKS:oid:0x100000000b1fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 210, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 210 + }, + "USER_WATERMARKS:oid:0x100000000b1fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 211, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 211 + }, + "USER_WATERMARKS:oid:0x100000000b1fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 212, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 212 + }, + "USER_WATERMARKS:oid:0x100000000b1fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 213, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 213 + }, + "USER_WATERMARKS:oid:0x100000000b1fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 214, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 214 + }, + "USER_WATERMARKS:oid:0x100000000b1ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 215, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 215 + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "200", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "200" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "201", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "201" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "202", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "202" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "203", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "203" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "204", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "204" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "205", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "205" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "206", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "206" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "207", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "207" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "500", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "500" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "501", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "501" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "502", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "502" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "503", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "503" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "504", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "504" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "505", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "505" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "506", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "506" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "507", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "507" } } diff --git a/tests/mock_tables/asic1/counters_db.json b/tests/mock_tables/asic1/counters_db.json index f919742157..1455f069c0 100644 --- a/tests/mock_tables/asic1/counters_db.json +++ b/tests/mock_tables/asic1/counters_db.json @@ -309,6 +309,111 @@ "oid:0x100000000b1fe": "14", "oid:0x100000000b1ff" : "15" }, + "COUNTERS_BUFFER_POOL_NAME_MAP": { + "ingress_lossless_pool": "oid:0x18000000000c10" + }, + "COUNTERS_QUEUE_PORT_MAP": { + "oid:0x100000000b100": "oid:0x1000000000b06", + "oid:0x100000000b101": "oid:0x1000000000b06", + "oid:0x100000000b102": "oid:0x1000000000b06", + "oid:0x100000000b103": "oid:0x1000000000b06", + "oid:0x100000000b104": "oid:0x1000000000b06", + "oid:0x100000000b105": "oid:0x1000000000b06", + "oid:0x100000000b106": "oid:0x1000000000b06", + "oid:0x100000000b107": "oid:0x1000000000b06", + "oid:0x100000000b108": "oid:0x1000000000b06", + "oid:0x100000000b109": "oid:0x1000000000b06", + "oid:0x100000000b110": "oid:0x1000000000b06", + "oid:0x100000000b111": "oid:0x1000000000b06", + "oid:0x100000000b112": "oid:0x1000000000b06", + "oid:0x100000000b113": "oid:0x1000000000b06", + "oid:0x100000000b114": "oid:0x1000000000b06", + "oid:0x100000000b115": "oid:0x1000000000b06", + "oid:0x100000000b200": "oid:0x1000000000b08", + "oid:0x100000000b201": "oid:0x1000000000b08", + "oid:0x100000000b202": "oid:0x1000000000b08", + "oid:0x100000000b203": "oid:0x1000000000b08", + "oid:0x100000000b204": "oid:0x1000000000b08", + "oid:0x100000000b205": "oid:0x1000000000b08", + "oid:0x100000000b206": "oid:0x1000000000b08", + "oid:0x100000000b207": "oid:0x1000000000b08", + "oid:0x100000000b208": "oid:0x1000000000b08", + "oid:0x100000000b209": "oid:0x1000000000b08", + "oid:0x100000000b210": "oid:0x1000000000b08", + "oid:0x100000000b211": "oid:0x1000000000b08", + "oid:0x100000000b212": "oid:0x1000000000b08", + "oid:0x100000000b213": "oid:0x1000000000b08", + "oid:0x100000000b214": "oid:0x1000000000b08", + "oid:0x100000000b215": "oid:0x1000000000b08" + }, + "COUNTERS_QUEUE_TYPE_MAP": { + "oid:0x100000000b100": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b101": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b102": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b103": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b104": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b105": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b106": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b107": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b108": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b109": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b110": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b111": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b112": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b113": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b114": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b115": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b200": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b201": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b202": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b203": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b204": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b205": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b206": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b207": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b208": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b209": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b210": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b211": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b212": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b213": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b214": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b215": "SAI_QUEUE_TYPE_ALL" + }, + "COUNTERS_QUEUE_INDEX_MAP": { + "oid:0x100000000b100": "0", + "oid:0x100000000b101": "1", + "oid:0x100000000b102": "2", + "oid:0x100000000b103": "3", + "oid:0x100000000b104": "4", + "oid:0x100000000b105": "5", + "oid:0x100000000b106": "6", + "oid:0x100000000b107": "7", + "oid:0x100000000b108": "8", + "oid:0x100000000b109": "9", + "oid:0x100000000b110": "10", + "oid:0x100000000b111": "11", + "oid:0x100000000b112": "12", + "oid:0x100000000b113": "13", + "oid:0x100000000b114": "14", + "oid:0x100000000b115": "15", + "oid:0x100000000b200": "0", + "oid:0x100000000b201": "1", + "oid:0x100000000b202": "2", + "oid:0x100000000b203": "3", + "oid:0x100000000b204": "4", + "oid:0x100000000b205": "5", + "oid:0x100000000b206": "6", + "oid:0x100000000b207": "7", + "oid:0x100000000b208": "8", + "oid:0x100000000b209": "9", + "oid:0x100000000b210": "10", + "oid:0x100000000b211": "11", + "oid:0x100000000b212": "12", + "oid:0x100000000b213": "13", + "oid:0x100000000b214": "14", + "oid:0x100000000b215": "15" + }, "COUNTERS_LAG_NAME_MAP": { "PortChannel0001": "oid:0x60000000005a1", "PortChannel0002": "oid:0x60000000005a2", @@ -1262,5 +1367,313 @@ "COUNTERS:oid:0x1600000000034f":{ "SAI_COUNTER_STAT_PACKETS": 1000, "SAI_COUNTER_STAT_BYTES": 2000 + }, + "USER_WATERMARKS:oid:100000000b0f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 100, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 100 + }, + "USER_WATERMARKS:oid:100000000b0f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 101, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 101 + }, + "USER_WATERMARKS:oid:100000000b0f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 102, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 102 + }, + "USER_WATERMARKS:oid:100000000b0f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 103, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 103 + }, + "USER_WATERMARKS:oid:100000000b0f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 104, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 104 + }, + "USER_WATERMARKS:oid:100000000b0f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 105, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 105 + }, + "USER_WATERMARKS:oid:100000000b0f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 106, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 106 + }, + "USER_WATERMARKS:oid:100000000b0f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 107, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 107 + }, + "USER_WATERMARKS:oid:100000000b0f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 108, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 108 + }, + "USER_WATERMARKS:oid:100000000b0f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 109, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 109 + }, + "USER_WATERMARKS:oid:100000000b0fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 110, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 110 + }, + "USER_WATERMARKS:oid:100000000b0fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 111, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 111 + }, + "USER_WATERMARKS:oid:100000000b0fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 112, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 112 + }, + "USER_WATERMARKS:oid:100000000b0fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 113, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 113 + }, + "USER_WATERMARKS:oid:100000000b0fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 114, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 114 + }, + "USER_WATERMARKS:oid:100000000b0ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 115, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 115 + }, + "USER_WATERMARKS:oid:0x100000000b1f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 200, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 200 + }, + "USER_WATERMARKS:oid:0x100000000b1f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 201, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 201 + }, + "USER_WATERMARKS:oid:0x100000000b1f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 202, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 202 + }, + "USER_WATERMARKS:oid:0x100000000b1f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 203, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 203 + }, + "USER_WATERMARKS:oid:0x100000000b1f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 204, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 204 + }, + "USER_WATERMARKS:oid:0x100000000b1f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 205, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 205 + }, + "USER_WATERMARKS:oid:0x100000000b1f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 206, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 206 + }, + "USER_WATERMARKS:oid:0x100000000b1f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 207, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 207 + }, + "USER_WATERMARKS:oid:0x100000000b1f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 208, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 208 + }, + "USER_WATERMARKS:oid:0x100000000b1f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 209, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 209 + }, + "USER_WATERMARKS:oid:0x100000000b1fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 210, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 210 + }, + "USER_WATERMARKS:oid:0x100000000b1fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 211, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 211 + }, + "USER_WATERMARKS:oid:0x100000000b1fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 212, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 212 + }, + "USER_WATERMARKS:oid:0x100000000b1fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 213, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 213 + }, + "USER_WATERMARKS:oid:0x100000000b1fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 214, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 214 + }, + "USER_WATERMARKS:oid:0x100000000b1ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 215, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 215 + }, + "USER_WATERMARKS:oid:0x18000000000c10": { + "SAI_BUFFER_POOL_STAT_WATERMARK_BYTES": "3000", + "SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES": "432640" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "200", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "200" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "201", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "201" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "202", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "202" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "203", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "203" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "204", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "204" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "205", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "205" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "206", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "206" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "207", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "207" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "500", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "500" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "501", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "501" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "502", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "502" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "503", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "503" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "504", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "504" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "505", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "505" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "506", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "506" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "507", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "507" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "200", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "200" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "201", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "201" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "202", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "202" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "203", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "203" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "204", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "204" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "205", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "205" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "206", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "206" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "207", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "207" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "500", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "500" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "501", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "501" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "502", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "502" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "503", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "503" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "504", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "504" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "505", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "505" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "506", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "506" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "507", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "507" + }, + "USER_WATERMARKS:oid:0x100000000b100": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2057328" + }, + "USER_WATERMARKS:oid:0x100000000b101": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "20" + }, + "USER_WATERMARKS:oid:0x100000000b102": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2" + }, + "USER_WATERMARKS:oid:0x100000000b103": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "0" + }, + "USER_WATERMARKS:oid:0x100000000b104": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "0" + }, + "USER_WATERMARKS:oid:0x100000000b105": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2" + }, + "USER_WATERMARKS:oid:0x100000000b106": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2" + }, + "USER_WATERMARKS:oid:0x100000000b107": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "28" + }, + "USER_WATERMARKS:oid:0x100000000b108": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2" + }, + "USER_WATERMARKS:oid:0x100000000b109": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "0" + }, + "USER_WATERMARKS:oid:0x100000000b110": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "5" + }, + "USER_WATERMARKS:oid:0x100000000b111": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2057328" + }, + "USER_WATERMARKS:oid:0x100000000b112": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "208" + }, + "USER_WATERMARKS:oid:0x100000000b113": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "20" + }, + "USER_WATERMARKS:oid:0x100000000b114": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "228" + }, + "USER_WATERMARKS:oid:0x100000000b115": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2" } } diff --git a/tests/mock_tables/dbconnector.py b/tests/mock_tables/dbconnector.py index 4ccb392368..379c4e75cd 100644 --- a/tests/mock_tables/dbconnector.py +++ b/tests/mock_tables/dbconnector.py @@ -68,6 +68,32 @@ def config_set(self, *args): class MockPubSub: + class MessageList: + """A custom subscriptable class to hold messages in a list-like format""" + def __init__(self, channel): + self._data = [] + self._channel = channel + + def __getitem__(self, index): + return self._data[index] + + def __setitem__(self, index, value): + self._data[index] = value + + def append(self, msg): + print(f"Message published to {self._channel}: ", msg) + self._data.append(msg) + + def __init__(self, namespace): + # Initialize channels required for testing + self.messages = self.MessageList('WATERMARK_CLEAR_REQUEST') + self.channels = {'WATERMARK_CLEAR_REQUEST': self.messages} + self.namespace = namespace + + def __getitem__(self, key): + print("Channel:", key, "accessed in namespace:", self.namespace) + return self.channels[key] + def get_message(self): return None @@ -99,7 +125,7 @@ def __init__(self, *args, **kwargs): db_name = kwargs.pop('db_name') self.decode_responses = kwargs.pop('decode_responses', False) == True fname = db_name.lower() + ".json" - self.pubsub = MockPubSub() + self.pubsub = MockPubSub(namespace) if namespace is not None and namespace is not multi_asic.DEFAULT_NAMESPACE: fname = os.path.join(INPUT_DIR, namespace, fname) diff --git a/tests/multi_asic_pgdropstat_test.py b/tests/multi_asic_pgdropstat_test.py index 94bb13011b..2a5e97cfdb 100644 --- a/tests/multi_asic_pgdropstat_test.py +++ b/tests/multi_asic_pgdropstat_test.py @@ -27,18 +27,18 @@ PG14 PG15 -------------- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ------ ------ ------ ------\ ------ ------ - Ethernet0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ - 0 0 - Ethernet4 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ - 0 0 + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A Ethernet-BP0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ 0 0 Ethernet-BP4 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ 0 0 -Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ - N/A N/A -Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ - N/A N/A +Ethernet-BP256 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 +Ethernet-BP260 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 """ diff --git a/tests/multi_asic_queue_counter_test.py b/tests/multi_asic_queue_counter_test.py index fe8b057b5d..992709b3ae 100644 --- a/tests/multi_asic_queue_counter_test.py +++ b/tests/multi_asic_queue_counter_test.py @@ -89,14 +89,14 @@ Ethernet-BP4 UC5 121 83 3 21 Ethernet-BP4 UC6 122 84 2 20 Ethernet-BP4 UC7 123 85 1 19 -Ethernet-BP4 MC8 124 86 0 18 -Ethernet-BP4 MC9 125 87 1 17 -Ethernet-BP4 MC10 126 88 2 16 -Ethernet-BP4 MC11 127 89 3 15 -Ethernet-BP4 MC12 128 90 4 14 -Ethernet-BP4 MC13 129 91 5 13 -Ethernet-BP4 MC14 130 92 6 12 -Ethernet-BP4 MC15 131 93 7 11 +Ethernet-BP4 ALL8 124 86 0 18 +Ethernet-BP4 ALL9 125 87 1 17 +Ethernet-BP4 ALL10 126 88 2 16 +Ethernet-BP4 ALL11 127 89 3 15 +Ethernet-BP4 ALL12 128 90 4 14 +Ethernet-BP4 ALL13 129 91 5 13 +Ethernet-BP4 ALL14 130 92 6 12 +Ethernet-BP4 ALL15 131 93 7 11 """ @@ -112,14 +112,14 @@ Ethernet-BP4 UC5 121 83 3 21 Ethernet-BP4 UC6 122 84 2 20 Ethernet-BP4 UC7 123 85 1 19 -Ethernet-BP4 MC8 124 86 0 18 -Ethernet-BP4 MC9 125 87 1 17 -Ethernet-BP4 MC10 126 88 2 16 -Ethernet-BP4 MC11 127 89 3 15 -Ethernet-BP4 MC12 128 90 4 14 -Ethernet-BP4 MC13 129 91 5 13 -Ethernet-BP4 MC14 130 92 6 12 -Ethernet-BP4 MC15 131 93 7 11 +Ethernet-BP4 ALL8 124 86 0 18 +Ethernet-BP4 ALL9 125 87 1 17 +Ethernet-BP4 ALL10 126 88 2 16 +Ethernet-BP4 ALL11 127 89 3 15 +Ethernet-BP4 ALL12 128 90 4 14 +Ethernet-BP4 ALL13 129 91 5 13 +Ethernet-BP4 ALL14 130 92 6 12 +Ethernet-BP4 ALL15 131 93 7 11 """ diff --git a/tests/multi_asic_watermarkstat_test.py b/tests/multi_asic_watermarkstat_test.py new file mode 100644 index 0000000000..b3bc011011 --- /dev/null +++ b/tests/multi_asic_watermarkstat_test.py @@ -0,0 +1,145 @@ +import os +import sys +from .wm_input.wm_test_vectors import testData +from .utils import get_result_and_return_code +from click.testing import CliRunner +import show.main as show + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + + +class TestWatermarkstatMultiAsic(object): + @classmethod + def setup_class(cls): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ['UTILITIES_UNIT_TESTING'] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + print("SETUP") + + def executor(self, testcase): + runner = CliRunner() + for input in testcase: + if 'clear' in input['cmd']: + exec_cmd = input['cmd'][1:] + print(exec_cmd) + exit_code, output = get_result_and_return_code(exec_cmd) + else: + if len(input['cmd']) == 3: + exec_cmd = show.cli.commands[input['cmd'][0]].commands[input['cmd'][1]].commands[input['cmd'][2]] + else: + exec_cmd = show.cli.commands[input['cmd'][0]].commands[input['cmd'][1]] + args = [] if 'args' not in input else input['args'] + result = runner.invoke(exec_cmd, args) + exit_code = result.exit_code + output = result.output + + print(exit_code) + print(output) + + expected_code = 0 if 'rc' not in input else input['rc'] + assert exit_code == expected_code + assert output == input['rc_output'] + + def test_show_pg_shared_one_masic(self): + self.executor(testData['show_pg_wm_shared_one_masic']) + + def test_show_pg_shared_all_masic(self): + self.executor(testData['show_pg_wm_shared_all_masic']) + + def test_show_pg_headroom_wm_one_masic(self): + self.executor(testData['show_pg_wm_hdrm_one_masic']) + + def test_show_pg_headroom_wm_all_masic(self): + self.executor(testData['show_pg_wm_hdrm_all_masic']) + + def test_show_pg_shared_pwm_one_masic(self): + self.executor(testData['show_pg_pwm_shared_one_masic']) + + def test_show_pg_shared_pwm_all_masic(self): + self.executor(testData['show_pg_pwm_shared_all_masic']) + + def test_show_pg_headroom_pwm_one_masic(self): + self.executor(testData['show_pg_pwm_hdrm_one_masic']) + + def test_show_pg_headroom_pwm_all_masic(self): + self.executor(testData['show_pg_pwm_hdrm_all_masic']) + + def test_show_queue_unicast_wm_one_masic(self): + self.executor(testData['show_q_wm_unicast_one_masic']) + + def test_show_queue_unicast_wm_all_masic(self): + self.executor(testData['show_q_wm_unicast_all_masic']) + + def test_show_queue_unicast_pwm_one_masic(self): + self.executor(testData['show_q_pwm_unicast_one_masic']) + + def test_show_queue_unicast_pwm_all_masic(self): + self.executor(testData['show_q_pwm_unicast_all_masic']) + + def test_show_queue_multicast_wm_one_masic(self): + self.executor(testData['show_q_wm_multicast_one_masic']) + + def test_show_queue_multicast_wm_all_masic(self): + self.executor(testData['show_q_wm_multicast_all_masic']) + + def test_show_queue_multicast_pwm_one_masic(self): + self.executor(testData['show_q_pwm_multicast_one_masic']) + + def test_show_queue_multicast_pwm_all_masic(self): + self.executor(testData['show_q_pwm_multicast_all_masic']) + + def test_show_queue_all_wm_one_masic(self): + self.executor(testData['show_q_wm_all_one_masic']) + + def test_show_queue_all_wm_all_masic(self): + self.executor(testData['show_q_wm_all_all_masic']) + + def test_show_queue_all_pwm_one_masic(self): + self.executor(testData['show_q_pwm_all_one_masic']) + + def test_show_queue_all_pwm_all_masic(self): + self.executor(testData['show_q_pwm_all_all_masic']) + + def test_show_buffer_pool_wm_one_masic(self): + self.executor(testData['show_buffer_pool_wm_one_masic']) + + def test_show_buffer_pool_wm_all_masic(self): + self.executor(testData['show_buffer_pool_wm_all_masic']) + + def test_show_buffer_pool_pwm_one_masic(self): + self.executor(testData['show_buffer_pool_pwm_one_masic']) + + def test_show_buffer_pool_pwm_all_masic(self): + self.executor(testData['show_buffer_pool_pwm_all_masic']) + + def test_show_headroom_pool_wm_one_masic(self): + self.executor(testData['show_hdrm_pool_wm_one_masic']) + + def test_show_headroom_pool_wm_all_masic(self): + self.executor(testData['show_hdrm_pool_wm_all_masic']) + + def test_show_headroom_pool_pwm_one_masic(self): + self.executor(testData['show_hdrm_pool_pwm_one_masic']) + + def test_show_headroom_pool_pwm_all_masic(self): + self.executor(testData['show_hdrm_pool_pwm_all_masic']) + + def test_show_invalid_namespace_masic(self): + self.executor(testData['show_invalid_namespace_masic']) + + def test_clear_headroom_one_masic(self): + self.executor(testData['clear_hdrm_pool_wm_one_masic']) + + def test_clear_headroom_all_masic(self): + self.executor(testData['clear_hdrm_pool_wm_all_masic']) + + @classmethod + def teardown_class(cls): + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + print("TEARDOWN") diff --git a/tests/watermarkstat_test.py b/tests/watermarkstat_test.py index dc419ae3b9..6a2ebfa2cf 100644 --- a/tests/watermarkstat_test.py +++ b/tests/watermarkstat_test.py @@ -1,11 +1,9 @@ import os import sys import pytest - import show.main as show from click.testing import CliRunner - -from .wm_input.wm_test_vectors import * +from wm_input.wm_test_vectors import testData test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -84,12 +82,14 @@ def executor(self, testcase): else: exec_cmd = show.cli.commands[input['cmd'][0]].commands[input['cmd'][1]] - result = runner.invoke(exec_cmd, []) + args = [] if 'args' not in input else input['args'] + result = runner.invoke(exec_cmd, args) print(result.exit_code) print(result.output) - assert result.exit_code == 0 + expected_code = 0 if 'rc' not in input else input['rc'] + assert result.exit_code == expected_code assert result.output == input['rc_output'] @classmethod diff --git a/tests/wm_input/wm_test_vectors.py b/tests/wm_input/wm_test_vectors.py index 93d9faa4cb..f0a80cf9cb 100644 --- a/tests/wm_input/wm_test_vectors.py +++ b/tests/wm_input/wm_test_vectors.py @@ -1,3 +1,373 @@ +show_pg_wm_shared_output_one_masic = ( + "Ingress shared pool occupancy per PG: (Namespace asic0)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "------------ ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + " Ethernet0 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + " Ethernet4 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n" + "Ethernet-BP0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ethernet-BP4 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n") + +show_pg_wm_shared_output_all_masic = ( + "Ingress shared pool occupancy per PG: (Namespace asic0)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "------------ ----- ----- ----- ----- ----- ----- ----- ----- " + "----- ----- ------ ------ ------ ------ ------ ------\n" + " Ethernet0 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + " Ethernet4 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n" + "Ethernet-BP0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ethernet-BP4 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ingress shared pool occupancy per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 " + "PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + "Ethernet-BP260 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n") + +show_pg_wm_hdrm_output_one_masic = ( + "Ingress headroom per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + "Ethernet-BP260 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n") + +show_pg_wm_hdrm_output_all_masic = ( + "Ingress headroom per PG: (Namespace asic0)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "------------ ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + " Ethernet0 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + " Ethernet4 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n" + "Ethernet-BP0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ethernet-BP4 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ingress headroom per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + "Ethernet-BP260 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n") + +show_pg_persistent_wm_shared_output_one_masic = ( + "Ingress shared pool occupancy per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 200 201 202 203 204 205 206 207" + " 500 501 502 503 504 505 506 507\n" + "Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A" + " N/A N/A N/A N/A N/A N/A N/A N/A\n") + +show_pg_persistent_wm_shared_output_all_masic = ( + "Ingress shared pool occupancy per PG: (Namespace asic0)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "------------ ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + " Ethernet0 200 201 202 203 204 205 206 207" + " 500 501 502 503 504 505 506 507\n" + " Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A" + " N/A N/A N/A N/A N/A N/A N/A N/A\n" + "Ethernet-BP0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ethernet-BP4 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ingress shared pool occupancy per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7" + " PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 200 201 202 203 204 205 206 207" + " 500 501 502 503 504 505 506 507\n" + "Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A" + " N/A N/A N/A N/A N/A N/A N/A N/A\n") + +show_pg_persistent_wm_hdrm_output_one_masic = ( + "Ingress headroom per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 200 201 202 203 204 205 206 207" + " 500 501 502 503 504 505 506 507\n" + "Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A" + " N/A N/A N/A N/A N/A N/A N/A N/A\n") + +show_pg_persistent_wm_hdrm_output_all_masic = ( + "Ingress headroom per PG: (Namespace asic0)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "------------ ----- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ------ ------ ------ ------ ------ ------\n" + " Ethernet0 200 201 202 203 204 205 206 207 500" + " 501 502 503 504 505 506 507\n" + " Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A N/A" + " N/A N/A N/A N/A N/A N/A N/A\n" + "Ethernet-BP0 0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0\n" + "Ethernet-BP4 0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0\n" + "Ingress headroom per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7 " + "PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- ----- " + "----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 200 201 202 203 204 205 206 207 " + "500 501 502 503 504 505 506 507\n" + "Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A " + "N/A N/A N/A N/A N/A N/A N/A N/A\n") + +show_queue_wm_unicast_output_one_masic = """\ +Egress shared pool occupancy per unicast queue: (Namespace asic1) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +-------------- ------- ----- ----- ----- ----- ----- ----- ----- +Ethernet-BP256 2057328 20 2 0 0 2 2 28 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_wm_unicast_output_all_masic = """\ +Egress shared pool occupancy per unicast queue: (Namespace asic0) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +------------ ----- ----- ----- ----- ----- ----- ----- ----- + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 N/A N/A N/A N/A N/A N/A N/A N/A +Egress shared pool occupancy per unicast queue: (Namespace asic1) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +-------------- ------- ----- ----- ----- ----- ----- ----- ----- +Ethernet-BP256 2057328 20 2 0 0 2 2 28 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_pwm_unicast_output_one_masic = """\ +Egress shared pool occupancy per unicast queue: (Namespace asic1) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +-------------- ----- ----- ----- ----- ----- ----- ----- ----- +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_pwm_unicast_output_all_masic = """\ +Egress shared pool occupancy per unicast queue: (Namespace asic0) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +------------ ----- ----- ----- ----- ----- ----- ----- ----- + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 N/A N/A N/A N/A N/A N/A N/A N/A +Egress shared pool occupancy per unicast queue: (Namespace asic1) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +-------------- ----- ----- ----- ----- ----- ----- ----- ----- +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_wm_multicast_output_one_masic = """\ +Egress shared pool occupancy per multicast queue: (Namespace asic0) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +------------ ----- ----- ------ ------ ------ ------ ------ ------ + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 0 0 0 0 0 0 0 0 +""" + +show_queue_wm_multicast_output_all_masic = """\ +Egress shared pool occupancy per multicast queue: (Namespace asic0) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +------------ ----- ----- ------ ------ ------ ------ ------ ------ + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 0 0 0 0 0 0 0 0 +Egress shared pool occupancy per multicast queue: (Namespace asic1) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +-------------- ----- ----- ------ ------- ------ ------ ------ ------ +Ethernet-BP256 2 0 5 2057328 208 20 228 2 +Ethernet-BP260 0 0 0 0 0 0 0 0 +""" + +show_queue_pwm_multicast_output_one_masic = """\ +Egress shared pool occupancy per multicast queue: (Namespace asic0) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +------------ ----- ----- ------ ------ ------ ------ ------ ------ + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 0 0 0 0 0 0 0 0 +""" + +show_queue_pwm_multicast_output_all_masic = """\ +Egress shared pool occupancy per multicast queue: (Namespace asic0) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +------------ ----- ----- ------ ------ ------ ------ ------ ------ + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 0 0 0 0 0 0 0 0 +Egress shared pool occupancy per multicast queue: (Namespace asic1) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +-------------- ----- ----- ------ ------ ------ ------ ------ ------ +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP260 0 0 0 0 0 0 0 0 +""" + +show_queue_wm_all_output_one_masic = """\ +Egress shared pool occupancy per all queues: (Namespace asic1) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +-------------- ------ ------ ------- ------- ------- ------- ------- ------- +Ethernet-BP256 0 0 0 0 0 0 0 0 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_wm_all_output_all_masic = """\ +Egress shared pool occupancy per all queues: (Namespace asic0) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +------------ ------ ------ ------- ------- ------- ------- ------- ------- + Ethernet0 0 0 0 0 0 0 0 0 + Ethernet4 0 0 0 0 0 0 0 0 +Ethernet-BP0 0 0 0 0 0 0 0 0 +Ethernet-BP4 N/A N/A N/A N/A N/A N/A N/A N/A +Egress shared pool occupancy per all queues: (Namespace asic1) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +-------------- ------ ------ ------- ------- ------- ------- ------- ------- +Ethernet-BP256 0 0 0 0 0 0 0 0 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_pwm_all_output_one_masic = """\ +Egress shared pool occupancy per all queues: (Namespace asic1) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +-------------- ------ ------ ------- ------- ------- ------- ------- ------- +Ethernet-BP256 0 0 0 0 0 0 0 0 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_pwm_all_output_all_masic = """\ +Egress shared pool occupancy per all queues: (Namespace asic0) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +------------ ------ ------ ------- ------- ------- ------- ------- ------- + Ethernet0 0 0 0 0 0 0 0 0 + Ethernet4 0 0 0 0 0 0 0 0 +Ethernet-BP0 0 0 0 0 0 0 0 0 +Ethernet-BP4 N/A N/A N/A N/A N/A N/A N/A N/A +Egress shared pool occupancy per all queues: (Namespace asic1) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +-------------- ------ ------ ------- ------- ------- ------- ------- ------- +Ethernet-BP256 0 0 0 0 0 0 0 0 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_buffer_pool_wm_output_one_masic = """\ +Shared pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool 3000 +""" + +show_buffer_pool_wm_output_all_masic = """\ +Shared pool maximum occupancy: (Namespace asic0) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +Shared pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool 3000 +""" + +show_buffer_pool_pwm_output_one_masic = """\ +Shared pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +""" + +show_buffer_pool_pwm_output_all_masic = """\ +Shared pool maximum occupancy: (Namespace asic0) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +Shared pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +""" + +show_hdrm_pool_wm_output_one_masic = """\ +Headroom pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool 432640 +""" + +show_hdrm_pool_wm_output_all_masic = """\ +Headroom pool maximum occupancy: (Namespace asic0) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +Headroom pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool 432640 +""" + +show_hdrm_pool_pwm_output_one_masic = """\ +Headroom pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +""" + +show_hdrm_pool_pwm_output_all_masic = """\ +Headroom pool maximum occupancy: (Namespace asic0) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +Headroom pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +""" + +clear_hdrm_pool_wm_output_one_masic = """\ +Channel: WATERMARK_CLEAR_REQUEST accessed in namespace: asic0 +Message published to WATERMARK_CLEAR_REQUEST: ["USER","PG_HEADROOM"] +""" + +clear_hdrm_pool_wm_output_all_masic = """\ +Channel: WATERMARK_CLEAR_REQUEST accessed in namespace: asic0 +Message published to WATERMARK_CLEAR_REQUEST: ["USER","PG_HEADROOM"] +Channel: WATERMARK_CLEAR_REQUEST accessed in namespace: asic1 +Message published to WATERMARK_CLEAR_REQUEST: ["USER","PG_HEADROOM"] +""" + show_pg_wm_shared_output="""\ Ingress shared pool occupancy per PG: Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7 @@ -124,56 +494,198 @@ 'rc_output': show_pg_wm_hdrm_output } ], - 'show_pg_pwm_shared' : [ {'cmd' : ['priority-group', 'persistent-watermark', 'shared'], - 'rc_output': show_pg_persistent_wm_shared_output - } - ], - 'show_pg_pwm_hdrm' : [ {'cmd' : ['priority-group', 'persistent-watermark', 'headroom'], - 'rc_output': show_pg_persistent_wm_hdrm_output - } - ], - 'show_q_wm_unicast' : [ {'cmd' : ['queue', 'watermark', 'unicast'], - 'rc_output': show_queue_wm_unicast_output + 'show_pg_pwm_shared': [{'cmd': ['priority-group', 'persistent-watermark', 'shared'], + 'rc_output': show_pg_persistent_wm_shared_output } ], - 'show_q_pwm_unicast' : [ {'cmd' : ['queue', 'persistent-watermark', 'unicast'], - 'rc_output': show_queue_pwm_unicast_output - } + 'show_pg_pwm_hdrm': [{'cmd': ['priority-group', 'persistent-watermark', 'headroom'], + 'rc_output': show_pg_persistent_wm_hdrm_output + } + ], + 'show_q_wm_unicast': [{'cmd': ['queue', 'watermark', 'unicast'], + 'rc_output': show_queue_wm_unicast_output + } + ], + 'show_q_pwm_unicast': [{'cmd': ['queue', 'persistent-watermark', 'unicast'], + 'rc_output': show_queue_pwm_unicast_output + } + ], + 'show_q_wm_multicast': [{'cmd': ['queue', 'watermark', 'multicast'], + 'rc_output': show_queue_wm_multicast_output + } ], - 'show_q_wm_multicast' : [ {'cmd' : ['queue', 'watermark', 'multicast'], - 'rc_output': show_queue_wm_multicast_output - } - ], - 'show_q_wm_multicast_neg' : [ { 'cmd' : ['queue', 'watermark', 'multicast'], - 'rc_output': show_queue_wm_multicast_neg_output - } + 'show_q_wm_multicast_neg': [{'cmd': ['queue', 'watermark', 'multicast'], + 'rc_output': show_queue_wm_multicast_neg_output + } ], - 'show_q_pwm_multicast' : [ {'cmd' : ['queue', 'persistent-watermark', 'multicast'], - 'rc_output': show_queue_wm_multicast_output - } - ], - 'show_q_wm_all' : [ {'cmd' : ['queue', 'watermark', 'all'], - 'rc_output': show_queue_wm_all_output - } - ], - 'show_q_pwm_all' : [ {'cmd' : ['queue', 'persistent-watermark', 'all'], - 'rc_output': show_queue_pwm_all_output - } - ], - 'show_buffer_pool_wm' : [ {'cmd' : ['buffer_pool', 'watermark'], - 'rc_output': show_buffer_pool_wm_output - } + 'show_q_pwm_multicast': [{'cmd': ['queue', 'persistent-watermark', 'multicast'], + 'rc_output': show_queue_wm_multicast_output + } ], - 'show_buffer_pool_pwm' : [ {'cmd' : ['buffer_pool', 'persistent-watermark'], - 'rc_output': show_buffer_pool_persistent_wm_output - } - ], - 'show_hdrm_pool_wm' : [ {'cmd' : ['headroom-pool', 'watermark'], - 'rc_output': show_hdrm_pool_wm_output + 'show_q_wm_all': [{'cmd': ['queue', 'watermark', 'all'], + 'rc_output': show_queue_wm_all_output + } + ], + 'show_q_pwm_all': [{'cmd': ['queue', 'persistent-watermark', 'all'], + 'rc_output': show_queue_pwm_all_output + } + ], + 'show_buffer_pool_wm': [{'cmd': ['buffer_pool', 'watermark'], + 'rc_output': show_buffer_pool_wm_output } - ], - 'show_hdrm_pool_pwm' : [ {'cmd' : ['headroom-pool', 'persistent-watermark'], - 'rc_output': show_hdrm_pool_persistent_wm_output + ], + 'show_buffer_pool_pwm': [{'cmd': ['buffer_pool', 'persistent-watermark'], + 'rc_output': show_buffer_pool_persistent_wm_output } - ] + ], + 'show_hdrm_pool_wm': [{'cmd': ['headroom-pool', 'watermark'], + 'rc_output': show_hdrm_pool_wm_output + } + ], + 'show_hdrm_pool_pwm': [{'cmd': ['headroom-pool', 'persistent-watermark'], + 'rc_output': show_hdrm_pool_persistent_wm_output + } + ], + 'show_pg_wm_shared_one_masic': [{'cmd': ['priority-group', 'watermark', 'shared'], + 'args': ['--namespace', 'asic0'], + 'rc_output': show_pg_wm_shared_output_one_masic + } + ], + 'show_pg_wm_shared_all_masic': [{'cmd': ['priority-group', 'watermark', 'shared'], + 'rc_output': show_pg_wm_shared_output_all_masic + } + ], + 'show_pg_wm_hdrm_one_masic': [{'cmd': ['priority-group', 'watermark', 'headroom'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_pg_wm_hdrm_output_one_masic + } + ], + 'show_pg_wm_hdrm_all_masic': [{'cmd': ['priority-group', 'watermark', 'headroom'], + 'rc_output': show_pg_wm_hdrm_output_all_masic + } + ], + 'show_pg_pwm_shared_one_masic': [{'cmd': ['priority-group', 'persistent-watermark', 'shared'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_pg_persistent_wm_shared_output_one_masic + } + ], + 'show_pg_pwm_shared_all_masic': [{'cmd': ['priority-group', 'persistent-watermark', 'shared'], + 'rc_output': show_pg_persistent_wm_shared_output_all_masic + } + ], + 'show_pg_pwm_hdrm_one_masic': [{'cmd': ['priority-group', 'persistent-watermark', 'headroom'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_pg_persistent_wm_hdrm_output_one_masic + } + ], + 'show_pg_pwm_hdrm_all_masic': [{'cmd': ['priority-group', 'persistent-watermark', 'headroom'], + 'rc_output': show_pg_persistent_wm_hdrm_output_all_masic + } + ], + 'show_q_wm_unicast_one_masic': [{'cmd': ['queue', 'watermark', 'unicast'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_queue_wm_unicast_output_one_masic + } + ], + 'show_q_wm_unicast_all_masic': [{'cmd': ['queue', 'watermark', 'unicast'], + 'rc_output': show_queue_wm_unicast_output_all_masic + } + ], + 'show_q_pwm_unicast_one_masic': [{'cmd': ['queue', 'persistent-watermark', 'unicast'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_queue_pwm_unicast_output_one_masic + } + ], + 'show_q_pwm_unicast_all_masic': [{'cmd': ['queue', 'persistent-watermark', 'unicast'], + 'rc_output': show_queue_pwm_unicast_output_all_masic + } + ], + 'show_q_wm_multicast_one_masic': [{'cmd': ['queue', 'watermark', 'multicast'], + 'args': ['--namespace', 'asic0'], + 'rc_output': show_queue_wm_multicast_output_one_masic + } + ], + 'show_q_wm_multicast_all_masic': [{'cmd': ['queue', 'watermark', 'multicast'], + 'rc_output': show_queue_wm_multicast_output_all_masic + } + ], + 'show_q_pwm_multicast_one_masic': [{'cmd': ['queue', 'persistent-watermark', 'multicast'], + 'args': ['--namespace', 'asic0'], + 'rc_output': show_queue_pwm_multicast_output_one_masic + } + ], + 'show_q_pwm_multicast_all_masic': [{'cmd': ['queue', 'persistent-watermark', 'multicast'], + 'rc_output': show_queue_pwm_multicast_output_all_masic + } + ], + 'show_q_wm_all_one_masic': [{'cmd': ['queue', 'watermark', 'all'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_queue_wm_all_output_one_masic + } + ], + 'show_q_wm_all_all_masic': [{'cmd': ['queue', 'watermark', 'all'], + 'rc_output': show_queue_wm_all_output_all_masic + } + ], + 'show_q_pwm_all_one_masic': [{'cmd': ['queue', 'persistent-watermark', 'all'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_queue_pwm_all_output_one_masic + } + ], + 'show_q_pwm_all_all_masic': [{'cmd': ['queue', 'persistent-watermark', 'all'], + 'rc_output': show_queue_pwm_all_output_all_masic + } + ], + 'show_buffer_pool_wm_one_masic': [{'cmd': ['buffer_pool', 'watermark'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_buffer_pool_wm_output_one_masic + } + ], + 'show_buffer_pool_wm_all_masic': [{'cmd': ['buffer_pool', 'watermark'], + 'rc_output': show_buffer_pool_wm_output_all_masic + } + ], + 'show_buffer_pool_pwm_one_masic': [{'cmd': ['buffer_pool', 'persistent-watermark'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_buffer_pool_pwm_output_one_masic + } + ], + 'show_buffer_pool_pwm_all_masic': [{'cmd': ['buffer_pool', 'persistent-watermark'], + 'rc_output': show_buffer_pool_pwm_output_all_masic + } + ], + 'show_hdrm_pool_wm_one_masic': [{'cmd': ['headroom-pool', 'watermark'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_hdrm_pool_wm_output_one_masic + } + ], + 'show_hdrm_pool_wm_all_masic': [{'cmd': ['headroom-pool', 'watermark'], + 'rc_output': show_hdrm_pool_wm_output_all_masic + } + ], + 'show_hdrm_pool_pwm_one_masic': [{'cmd': ['headroom-pool', 'persistent-watermark'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_hdrm_pool_pwm_output_one_masic + } + ], + 'show_hdrm_pool_pwm_all_masic': [{'cmd': ['headroom-pool', 'persistent-watermark'], + 'rc_output': show_hdrm_pool_pwm_output_all_masic + } + ], + 'show_invalid_namespace_masic': [{'cmd': ['buffer_pool', 'watermark'], + 'args': ['--namespace', 'asic14'], + 'rc': 2, + 'rc_output': '' + } + ], + 'clear_hdrm_pool_wm_one_masic': [{'cmd': ['clear', 'watermarkstat', '-t', + 'pg_headroom', '-n', 'asic0', '-c'], + 'rc_output': clear_hdrm_pool_wm_output_one_masic + } + ], + 'clear_hdrm_pool_wm_all_masic': [{'cmd': ['clear', 'watermarkstat', '-t', + 'pg_headroom', '-c'], + 'rc_output': clear_hdrm_pool_wm_output_all_masic + } + ] } From 867fc5400e7c53e068bf61e7c4c9e33b54a3fea3 Mon Sep 17 00:00:00 2001 From: Vivek Date: Wed, 18 Sep 2024 18:12:42 -0500 Subject: [PATCH 56/67] [DASH] Add support for ENI counters (#3496) ### What I did Updated counterpoll cli to support configuration for ENI counters ``` root@dpu:/home/admin# counterpoll eni enable root@dpu:/home/admin# counterpoll eni interval 1000 root@dpu:/home/admin# counterpoll show Type Interval (in ms) Status -------------------------- ------------------ -------- QUEUE_STAT default (10000) enable PORT_STAT default (1000) enable PORT_BUFFER_DROP default (60000) enable RIF_STAT default (1000) enable QUEUE_WATERMARK_STAT default (60000) enable PG_WATERMARK_STAT default (60000) enable PG_DROP_STAT default (10000) enable BUFFER_POOL_WATERMARK_STAT default (60000) enable ACL 10000 enable ENI_STAT 1000 enable ``` --- counterpoll/main.py | 59 ++++++++++++++++++++++++++++++++ tests/counterpoll_test.py | 55 +++++++++++++++++++++++++++++ tests/mock_tables/config_db.json | 4 +++ 3 files changed, 118 insertions(+) diff --git a/counterpoll/main.py b/counterpoll/main.py index ad15c8c248..530281188f 100644 --- a/counterpoll/main.py +++ b/counterpoll/main.py @@ -3,17 +3,29 @@ from flow_counter_util.route import exit_if_route_flow_counter_not_support from swsscommon.swsscommon import ConfigDBConnector from tabulate import tabulate +from sonic_py_common import device_info BUFFER_POOL_WATERMARK = "BUFFER_POOL_WATERMARK" PORT_BUFFER_DROP = "PORT_BUFFER_DROP" PG_DROP = "PG_DROP" ACL = "ACL" +ENI = "ENI" DISABLE = "disable" ENABLE = "enable" DEFLT_60_SEC= "default (60000)" DEFLT_10_SEC= "default (10000)" DEFLT_1_SEC = "default (1000)" + +def is_dpu(db): + """ Check if the device is DPU """ + platform_info = device_info.get_platform_info(db) + if platform_info.get('switch_type') == 'dpu': + return True + else: + return False + + @click.group() def cli(): """ SONiC Static Counter Poll configurations """ @@ -126,6 +138,7 @@ def disable(): port_info['FLEX_COUNTER_STATUS'] = DISABLE configdb.mod_entry("FLEX_COUNTER_TABLE", PORT_BUFFER_DROP, port_info) + # Ingress PG drop packet stat @cli.group() @click.pass_context @@ -382,6 +395,47 @@ def disable(ctx): fc_info['FLEX_COUNTER_STATUS'] = 'disable' ctx.obj.mod_entry("FLEX_COUNTER_TABLE", "FLOW_CNT_ROUTE", fc_info) + +# ENI counter commands +@cli.group() +@click.pass_context +def eni(ctx): + """ ENI counter commands """ + ctx.obj = ConfigDBConnector() + ctx.obj.connect() + if not is_dpu(ctx.obj): + click.echo("ENI counters are not supported on non DPU platforms") + exit(1) + + +@eni.command(name='interval') +@click.argument('poll_interval', type=click.IntRange(1000, 30000)) +@click.pass_context +def eni_interval(ctx, poll_interval): + """ Set eni counter query interval """ + eni_info = {} + eni_info['POLL_INTERVAL'] = poll_interval + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", ENI, eni_info) + + +@eni.command(name='enable') +@click.pass_context +def eni_enable(ctx): + """ Enable eni counter query """ + eni_info = {} + eni_info['FLEX_COUNTER_STATUS'] = 'enable' + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", ENI, eni_info) + + +@eni.command(name='disable') +@click.pass_context +def eni_disable(ctx): + """ Disable eni counter query """ + eni_info = {} + eni_info['FLEX_COUNTER_STATUS'] = 'disable' + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", ENI, eni_info) + + @cli.command() def show(): """ Show the counter configuration """ @@ -399,6 +453,7 @@ def show(): tunnel_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'TUNNEL') trap_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_TRAP') route_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_ROUTE') + eni_info = configdb.get_entry('FLEX_COUNTER_TABLE', ENI) header = ("Type", "Interval (in ms)", "Status") data = [] @@ -428,6 +483,10 @@ def show(): data.append(["FLOW_CNT_ROUTE_STAT", route_info.get("POLL_INTERVAL", DEFLT_10_SEC), route_info.get("FLEX_COUNTER_STATUS", DISABLE)]) + if is_dpu(config_db) and eni_info: + data.append(["ENI_STAT", eni_info.get("POLL_INTERVAL", DEFLT_10_SEC), + eni_info.get("FLEX_COUNTER_STATUS", DISABLE)]) + click.echo(tabulate(data, headers=header, tablefmt="simple", missingval="")) def _update_config_db_flex_counter_table(status, filename): diff --git a/tests/counterpoll_test.py b/tests/counterpoll_test.py index 4a4da07ee9..6c165498c5 100644 --- a/tests/counterpoll_test.py +++ b/tests/counterpoll_test.py @@ -2,6 +2,7 @@ import json import os import pytest +import mock import sys from click.testing import CliRunner from shutil import copyfile @@ -31,6 +32,21 @@ FLOW_CNT_ROUTE_STAT 10000 enable """ +expected_counterpoll_show_dpu = """Type Interval (in ms) Status +-------------------- ------------------ -------- +QUEUE_STAT 10000 enable +PORT_STAT 1000 enable +PORT_BUFFER_DROP 60000 enable +QUEUE_WATERMARK_STAT default (60000) enable +PG_WATERMARK_STAT default (60000) enable +PG_DROP_STAT 10000 enable +ACL 5000 enable +TUNNEL_STAT 3000 enable +FLOW_CNT_TRAP_STAT 10000 enable +FLOW_CNT_ROUTE_STAT 10000 enable +ENI_STAT 1000 enable +""" + class TestCounterpoll(object): @classmethod def setup_class(cls): @@ -44,6 +60,13 @@ def test_show(self): print(result.output) assert result.output == expected_counterpoll_show + @mock.patch('counterpoll.main.device_info.get_platform_info') + def test_show_dpu(self, mock_get_platform_info): + mock_get_platform_info.return_value = {'switch_type': 'dpu'} + runner = CliRunner() + result = runner.invoke(counterpoll.cli.commands["show"], []) + assert result.output == expected_counterpoll_show_dpu + def test_port_buffer_drop_interval(self): runner = CliRunner() result = runner.invoke(counterpoll.cli.commands["port-buffer-drop"].commands["interval"], ["30000"]) @@ -221,6 +244,38 @@ def test_update_route_counter_interval(self): assert result.exit_code == 2 assert expected in result.output + @pytest.mark.parametrize("status", ["disable", "enable"]) + def test_update_eni_status(self, status): + runner = CliRunner() + result = runner.invoke(counterpoll.cli, ["eni", status]) + assert result.exit_code == 1 + assert result.output == "ENI counters are not supported on non DPU platforms\n" + + @pytest.mark.parametrize("status", ["disable", "enable"]) + @mock.patch('counterpoll.main.device_info.get_platform_info') + def test_update_eni_status_dpu(self, mock_get_platform_info, status): + mock_get_platform_info.return_value = {'switch_type': 'dpu'} + runner = CliRunner() + db = Db() + + result = runner.invoke(counterpoll.cli.commands["eni"].commands[status], [], obj=db.cfgdb) + assert result.exit_code == 0 + + table = db.cfgdb.get_table('FLEX_COUNTER_TABLE') + assert status == table["ENI"]["FLEX_COUNTER_STATUS"] + + @mock.patch('counterpoll.main.device_info.get_platform_info') + def test_update_eni_interval(self, mock_get_platform_info): + mock_get_platform_info.return_value = {'switch_type': 'dpu'} + runner = CliRunner() + db = Db() + test_interval = "2000" + + result = runner.invoke(counterpoll.cli.commands["eni"].commands["interval"], [test_interval], obj=db.cfgdb) + assert result.exit_code == 0 + + table = db.cfgdb.get_table('FLEX_COUNTER_TABLE') + assert test_interval == table["ENI"]["POLL_INTERVAL"] @classmethod def teardown_class(cls): diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 108fa7593d..187efed553 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -1785,6 +1785,10 @@ "POLL_INTERVAL": "10000", "FLEX_COUNTER_STATUS": "enable" }, + "FLEX_COUNTER_TABLE|ENI": { + "POLL_INTERVAL": "1000", + "FLEX_COUNTER_STATUS": "enable" + }, "PFC_WD|Ethernet0": { "action": "drop", "detection_time": "600", From 695cc9a7f0aadd56e97475643c966e8875a99548 Mon Sep 17 00:00:00 2001 From: Vivek Date: Tue, 24 Sep 2024 10:02:54 -0700 Subject: [PATCH 57/67] Upgrade pyroute2 and improve cli response time (#3513) What I did Older pyroute2 depends on distutils. Thus upgrade the version to latest to improve import time. A similar issue for natsort is reported here [warm-reboot] natsort import is taking more time with python 3.11 in bookworm sonic-buildimage#17246. However pyroute2 import is still heavy in bookworm and thus every CLI command is slow compared to bullseye. root@msn2700:/home/admin# time python3 -c "import pyroute2" real 0m0.378s user 0m0.308s sys 0m0.060s root@msn2700:/home/admin# time python3 -c "import pyroute2" real 0m0.707s user 0m0.425s sys 0m0.097s root@msn2700:/home/admin# time python3 -c "import pyroute2" real 0m0.511s user 0m0.433s sys 0m0.075s To fix this, i've delayed the pyroute2 import into the method where it is actually used, this has an improvement of 0.4 sec for all the CLI commands on slower CPU devices root@msn2700:/home/admin# time python3 -c "import utilities_common.cli as clicommon" real 0m0.693s user 0m0.579s sys 0m0.109s root@msn2700/home/admin# time python3 -c "import utilities_common.cli as clicommon" real 0m0.363s user 0m0.271s sys 0m0.072s --- setup.py | 2 +- utilities_common/multi_asic.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 520530b532..a828387a06 100644 --- a/setup.py +++ b/setup.py @@ -248,7 +248,7 @@ 'pexpect>=4.8.0', 'semantic-version>=2.8.5', 'prettyprinter>=0.18.0', - 'pyroute2>=0.5.14, <0.6.1', + 'pyroute2==0.7.12', 'requests>=2.25.0, <=2.31.0', 'tabulate==0.9.0', 'toposort==1.6', diff --git a/utilities_common/multi_asic.py b/utilities_common/multi_asic.py index b1f24e12e8..4ebd728031 100644 --- a/utilities_common/multi_asic.py +++ b/utilities_common/multi_asic.py @@ -3,7 +3,6 @@ import click import netifaces -import pyroute2 from natsort import natsorted from sonic_py_common import multi_asic, device_info from utilities_common import constants @@ -170,6 +169,7 @@ def multi_asic_args(parser=None): return parser def multi_asic_get_ip_intf_from_ns(namespace): + import pyroute2 if namespace != constants.DEFAULT_NAMESPACE: pyroute2.netns.pushns(namespace) interfaces = natsorted(netifaces.interfaces()) @@ -181,6 +181,7 @@ def multi_asic_get_ip_intf_from_ns(namespace): def multi_asic_get_ip_intf_addr_from_ns(namespace, iface): + import pyroute2 if namespace != constants.DEFAULT_NAMESPACE: pyroute2.netns.pushns(namespace) ipaddresses = netifaces.ifaddresses(iface) From b8f306f3b638a1b64e75066805c87e88f5b2b895 Mon Sep 17 00:00:00 2001 From: Dylan Godwin Date: Wed, 25 Sep 2024 11:59:24 -0400 Subject: [PATCH 58/67] [Nokia] Add J2C+/H3/H4/H5 to GCU validator (#3495) * Add Nokia platforms to GCU validator .json file Signed-off-by: dgodwin-nokia --- .../gcu_field_operation_validators.conf.json | 26 ++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index 8b42812af0..c1921470d4 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -28,8 +28,12 @@ "broadcom_asics": { "th": [ "Force10-S6100", "Arista-7060CX-32S-C32", "Arista-7060CX-32S-C32-T1", "Arista-7060CX-32S-D48C8", "Celestica-DX010-C32", "Seastone-DX010" ], "th2": [ "Arista-7260CX3-D108C8", "Arista-7260CX3-C64", "Arista-7260CX3-Q64" ], + "th3": [ "Nokia-IXR7220-H3" ], + "th4": [ "Nokia-IXR7220-H4-64D", "Nokia-IXR7220-H4-32D" ], + "th5": [ "Nokia-IXR7220-H5-64D" ], "td2": [ "Force10-S6000", "Force10-S6000-Q24S32", "Arista-7050-QX32", "Arista-7050-QX-32S", "Nexus-3164", "Arista-7050QX32S-Q32" ], - "td3": [ "Arista-7050CX3-32S-C32", "Arista-7050CX3-32S-D48C8" ] + "td3": [ "Arista-7050CX3-32S-C32", "Arista-7050CX3-32S-D48C8" ], + "j2c+": [ "Nokia-IXR7250E-36x100G", "Nokia-IXR7250E-36x400G" ] } } }, @@ -55,7 +59,11 @@ "td2": "20181100", "th": "20181100", "th2": "20181100", + "th3": "20240500", + "th4": "20240500", + "th5": "20240500", "td3": "20201200", + "j2c+": "20220500", "cisco-8000": "20201200" } } @@ -81,7 +89,11 @@ "td2": "", "th": "20221100", "th2": "20221100", + "th3": "20240500", + "th4": "20240500", + "th5": "20240500", "td3": "20221100", + "j2c+": "20220500", "cisco-8000": "20201200" } } @@ -105,7 +117,11 @@ "td2": "20181100", "th": "20181100", "th2": "20181100", + "th3": "20240500", + "th4": "20240500", + "th5": "20240500", "td3": "20201200", + "j2c+": "20220500", "cisco-8000": "20201200" } }, @@ -122,7 +138,11 @@ "td2": "", "th": "20221100", "th2": "20221100", + "th3": "20240500", + "th4": "20240500", + "th5": "20240500", "td3": "20221100", + "j2c+": "20220500", "cisco-8000": "20201200" } } @@ -148,7 +168,11 @@ "td2": "20181100", "th": "20181100", "th2": "20181100", + "th3": "20240500", + "th4": "20240500", + "th5": "20240500", "td3": "20201200", + "j2c+": "20220500", "cisco-8000": "20201200" } } From 688c1d1a565f9e57958d40a2c905bbc58af12af2 Mon Sep 17 00:00:00 2001 From: Wenchung Wang <38226696+wenchungw@users.noreply.github.com> Date: Wed, 25 Sep 2024 10:50:00 -0700 Subject: [PATCH 59/67] [dpu_tty]: Add a DPU TTY console utility (#3535) * [dpu_tty]: Add a DPU TTY console utility * Add dpu_tty.py * Add dpu_tty.py to scripts Signed-off-by: Wenchung Wang vincwang@cisco.com What I did Add DPU TTY console utility. How I did it * Read platform.json to retrieve TTY settings in DPUS section. Set up TTY console according to TTY device name and baud rate. * Also provide options to overwrite default TTY device and baud rate. ''' "DPUS": { "dpu0": { "serial-console": { "device": "ttyS4", "baud-rate": "115200" } }, "dpu1": { "serial-console": { "device": "ttyS5", "baud-rate": "115200" } } }, ''' How to verify it Run the utility on a smart switch that provides DPU UART console via ttyS device. The test plan is at https://github.com/sonic-net/sonic-mgmt/pull/12701/files section 1.4. * Correct SA errors * Correct SA errors * Correct SA error * Fix a SA error * Update Command-Reference.md Add DPU serial console utility. * Address review comments --- doc/Command-Reference.md | 86 +++++++++++++++++++++++++++++++++++++++- scripts/dpu-tty.py | 73 ++++++++++++++++++++++++++++++++++ setup.py | 1 + 3 files changed, 159 insertions(+), 1 deletion(-) create mode 100755 scripts/dpu-tty.py diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 7697f235f7..e9009ef67d 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -43,6 +43,7 @@ * [Console config commands](#console-config-commands) * [Console connect commands](#console-connect-commands) * [Console clear commands](#console-clear-commands) + * [DPU serial console utility](#dpu-serial-console-utility) * [CMIS firmware upgrade](#cmis-firmware-upgrade) * [CMIS firmware version show commands](#cmis-firmware-version-show-commands) * [CMIS firmware upgrade commands](#cmis-firmware-upgrade-commands) @@ -229,6 +230,7 @@ | Version | Modification Date | Details | | --- | --- | --- | +| v9 | Sep-19-2024 | Add DPU serial console utility | | v8 | Oct-09-2023 | Add CMIS firmware upgrade commands | | v7 | Jun-22-2023 | Add static DNS show and config commands | | v6 | May-06-2021 | Add SNMP show and config commands | @@ -2825,7 +2827,7 @@ Optionally, you can display configured console ports only by specifying the `-b` 1 9600 Enabled - - switch1 ``` -## Console config commands +### Console config commands This sub-section explains the list of configuration options available for console management module. @@ -3001,6 +3003,88 @@ Optionally, you can clear with a remote device name by specifying the `-d` or `- Go Back To [Beginning of the document](#) or [Beginning of this section](#console) +### DPU serial console utility + +**dpu-tty.py** + +This command allows user to connect to a DPU serial console via TTY device with +interactive CLI program: picocom. The configuration is from platform.json. The +utility works only on smart switch that provides DPU UART connections through +/dev/ttyS* devices. + +- Usage: + ``` + dpu-tty.py (-n|--name) [(-b|-baud) ] [(-t|-tty) ] + ``` + +- Example: + ``` + root@MtFuji:/home/cisco# dpu-tty.py -n dpu0 + picocom v3.1 + + port is : /dev/ttyS4 + flowcontrol : none + baudrate is : 115200 + parity is : none + databits are : 8 + stopbits are : 1 + escape is : C-a + local echo is : no + noinit is : no + noreset is : no + hangup is : no + nolock is : no + send_cmd is : sz -vv + receive_cmd is : rz -vv -E + imap is : + omap is : + emap is : crcrlf,delbs, + logfile is : none + initstring : none + exit_after is : not set + exit is : no + + Type [C-a] [C-h] to see available commands + Terminal ready + + sonic login: admin + Password: + Linux sonic 6.1.0-11-2-arm64 #1 SMP Debian 6.1.38-4 (2023-08-08) aarch64 + You are on + ____ ___ _ _ _ ____ + / ___| / _ \| \ | (_)/ ___| + \___ \| | | | \| | | | + ___) | |_| | |\ | | |___ + |____/ \___/|_| \_|_|\____| + + -- Software for Open Networking in the Cloud -- + + Unauthorized access and/or use are prohibited. + All access and/or use are subject to monitoring. + + Help: https://sonic-net.github.io/SONiC/ + + Last login: Mon Sep 9 21:39:44 UTC 2024 on ttyS0 + admin@sonic:~$ + Terminating... + Thanks for using picocom + root@MtFuji:/home/cisco# + ``` + +Optionally, user may overwrite baud rate for experiment. + +- Example: + ``` + root@MtFuji:/home/cisco# dpu-tty.py -n dpu1 -b 9600 + ``` + +Optionally, user may overwrite TTY device for experiment. + +- Example: + ``` + root@MtFuji:/home/cisco# dpu-tty.py -n dpu2 -t ttyS4 + ``` + ## CMIS firmware upgrade ### CMIS firmware version show commands diff --git a/scripts/dpu-tty.py b/scripts/dpu-tty.py new file mode 100755 index 0000000000..ff0b041b01 --- /dev/null +++ b/scripts/dpu-tty.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python3 +# +# Copyright (c) 2024 Cisco Systems, Inc. +# + +import argparse +import json +import os +import subprocess +from sonic_py_common import device_info + +UART_CON = '/usr/bin/picocom' + + +def get_dpu_tty(dpu, tty, baud): + + platform = device_info.get_platform() + if not platform: + print("No platform") + return None + + # Get platform path. + platform_path = device_info.get_path_to_platform_dir() + + if os.path.isfile(os.path.join(platform_path, device_info.PLATFORM_JSON_FILE)): + json_file = os.path.join(platform_path, device_info.PLATFORM_JSON_FILE) + + try: + with open(json_file, 'r') as file: + platform_data = json.load(file) + except (json.JSONDecodeError, IOError, TypeError, ValueError): + print("No platform.json") + return None + + dpus = platform_data.get('DPUS', None) + if dpus is None: + print("No DPUs in platform.json") + return None + + if tty is None: + dev = dpus[dpu]["serial-console"]["device"] + else: + # overwrite tty device in platform.json + dev = tty + + if baud is None: + baud = dpus[dpu]["serial-console"]["baud-rate"] + return dev, baud + + +def main(): + + parser = argparse.ArgumentParser(description='DPU TTY Console Utility') + parser.add_argument('-n', '--name', required=True) + parser.add_argument('-t', '--tty') + parser.add_argument('-b', '--baud') + args = parser.parse_args() + + dpu_tty, dpu_baud = get_dpu_tty(args.name, args.tty, args.baud) + # Use UART console utility for error checking of dpu_tty and dpu_baud. + + p = subprocess.run([UART_CON, '-b', dpu_baud, '/dev/%s' % dpu_tty]) + if p.returncode: + print('{} failed'.format(p.args)) + if p.stdout: + print(p.stdout) + if p.stderr: + print(p.stderr) + return p.returncode + + +if __name__ == "__main__": + exit(main()) diff --git a/setup.py b/setup.py index a828387a06..dc5fa4a9b4 100644 --- a/setup.py +++ b/setup.py @@ -120,6 +120,7 @@ 'scripts/decode-syseeprom', 'scripts/dropcheck', 'scripts/disk_check.py', + 'scripts/dpu-tty.py', 'scripts/dropconfig', 'scripts/dropstat', 'scripts/dualtor_neighbor_check.py', From 94ec7108a85aff9b89f5622ef768530b36450064 Mon Sep 17 00:00:00 2001 From: HP Date: Fri, 27 Sep 2024 14:41:35 -0700 Subject: [PATCH 60/67] Enhance multi-asic support for queuestat (#3554) - Added support for iterating over all namespaces (ns) when none specified - Added a test case to verify all ns behaviour - Introduced a wrapper class to handle the mutli-asic functionality - Replaced argparse with click for better argument checks --- scripts/queuestat | 117 +++++++++++----------- tests/multi_asic_queue_counter_test.py | 133 +++++++++++++++++++++++++ tests/queue_counter_test.py | 16 +++ 3 files changed, 210 insertions(+), 56 deletions(-) diff --git a/scripts/queuestat b/scripts/queuestat index dd8c9d7e0c..3774ede6d9 100755 --- a/scripts/queuestat +++ b/scripts/queuestat @@ -7,7 +7,7 @@ ##################################################################### import json -import argparse +import click import datetime import os.path import sys @@ -102,23 +102,40 @@ def build_json(port, cnstat, voq=False): out.update(ports_stats(k)) return out +class QueuestatWrapper(object): + """A wrapper to execute queuestat cmd over the correct namespaces""" + def __init__(self, namespace, voq): + self.namespace = namespace + self.voq = voq -class Queuestat(object): - def __init__(self, namespace, voq=False): + # Initialize the multi-asic namespace + self.multi_asic = multi_asic_util.MultiAsic(constants.DISPLAY_ALL, namespace_option=namespace) self.db = None - self.multi_asic = multi_asic_util.MultiAsic(constants.DISPLAY_ALL, namespace) - if namespace is not None: - for ns in self.multi_asic.get_ns_list_based_on_options(): - self.db = multi_asic.connect_to_all_dbs_for_ns(ns) + + @multi_asic_util.run_on_multi_asic + def run(self, save_fresh_stats, port_to_show_stats, json_opt, non_zero): + queuestat = Queuestat(self.multi_asic.current_namespace, self.db, self.voq) + if save_fresh_stats: + queuestat.save_fresh_stats() + return + + if port_to_show_stats != None: + queuestat.get_print_port_stat(port_to_show_stats, json_opt, non_zero) else: - self.db = SonicV2Connector(use_unix_socket_path=False) - self.db.connect(self.db.COUNTERS_DB) + queuestat.get_print_all_stat(json_opt, non_zero) + + +class Queuestat(object): + def __init__(self, namespace, db, voq=False): + self.db = db self.voq = voq + self.namespace = namespace + self.namespace_str = f" for {namespace}" if namespace else '' def get_queue_port(table_id): port_table_id = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_PORT_MAP, table_id) if port_table_id is None: - print("Port is not available!", table_id) + print(f"Port is not available{self.namespace_str}!", table_id) sys.exit(1) return port_table_id @@ -130,7 +147,7 @@ class Queuestat(object): self.counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) if self.counter_port_name_map is None: - print("COUNTERS_PORT_NAME_MAP is empty!") + print(f"COUNTERS_PORT_NAME_MAP is empty{self.namespace_str}!") sys.exit(1) self.port_queues_map = {} @@ -148,7 +165,7 @@ class Queuestat(object): counter_queue_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_QUEUE_NAME_MAP) if counter_queue_name_map is None: - print("COUNTERS_QUEUE_NAME_MAP is empty!") + print(f"COUNTERS_QUEUE_NAME_MAP is empty{self.namespace_str}!") sys.exit(1) for queue in counter_queue_name_map: @@ -166,7 +183,7 @@ class Queuestat(object): def get_queue_index(table_id): queue_index = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_INDEX_MAP, table_id) if queue_index is None: - print("Queue index is not available!", table_id) + print(f"Queue index is not available{self.namespace_str}!", table_id) sys.exit(1) return queue_index @@ -174,7 +191,7 @@ class Queuestat(object): def get_queue_type(table_id): queue_type = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_TYPE_MAP, table_id) if queue_type is None: - print("Queue Type is not available!", table_id) + print(f"Queue Type is not available{self.namespace_str}!", table_id) sys.exit(1) elif queue_type == SAI_QUEUE_TYPE_MULTICAST: return QUEUE_TYPE_MC @@ -185,7 +202,7 @@ class Queuestat(object): elif queue_type == SAI_QUEUE_TYPE_ALL: return QUEUE_TYPE_ALL else: - print("Queue Type is invalid:", table_id, queue_type) + print(f"Queue Type is invalid{self.namespace_str}:", table_id, queue_type) sys.exit(1) if self.voq: @@ -255,6 +272,7 @@ class Queuestat(object): else: hdr = voq_header if self.voq else header if table: + print(f"For namespace {self.namespace}:") print(tabulate(table, hdr, tablefmt='simple', stralign='right')) print() @@ -314,7 +332,7 @@ class Queuestat(object): else: hdr = voq_header if self.voq else header if table: - print(port + " Last cached time was " + str(cnstat_old_dict.get('time'))) + print(port + f" Last cached time{self.namespace_str} was " + str(cnstat_old_dict.get('time'))) print(tabulate(table, hdr, tablefmt='simple', stralign='right')) print() @@ -370,7 +388,7 @@ class Queuestat(object): json_output[port].update({"cached_time":cnstat_cached_dict.get('time')}) json_output.update(self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict, json_opt, non_zero)) else: - print("Last cached time was " + str(cnstat_cached_dict.get('time'))) + print(f"Last cached time{self.namespace_str} was " + str(cnstat_cached_dict.get('time'))) self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict, json_opt, non_zero) except IOError as e: print(e.errno, e) @@ -395,38 +413,33 @@ class Queuestat(object): else: print("Clear and update saved counters for " + port) -def main(): + +@click.command() +@click.option('-p', '--port', type=str, help='Show the queue conters for just one port', default=None) +@click.option('-c', '--clear', is_flag=True, default=False, help='Clear previous stats and save new ones') +@click.option('-d', '--delete', is_flag=True, default=False, help='Delete saved stats') +@click.option('-j', '--json_opt', is_flag=True, default=False, help='Print in JSON format') +@click.option('-V', '--voq', is_flag=True, default=False, help='display voq stats') +@click.option('-nz','--non_zero', is_flag=True, default=False, help='Display non-zero queue counters') +@click.option('-n', '--namespace', type=click.Choice(multi_asic.get_namespace_list()), help='Display queuecounters for a specific namespace name or skip for all', default=None) +@click.version_option(version='1.0') +def main(port, clear, delete, json_opt, voq, non_zero, namespace): + """ + Examples: + queuestat + queuestat -p Ethernet0 + queuestat -c + queuestat -d + queuestat -p Ethernet0 -n asic0 + """ + global cnstat_dir global cnstat_fqn_file - parser = argparse.ArgumentParser(description='Display the queue state and counters', - formatter_class=argparse.RawTextHelpFormatter, - epilog=""" -Examples: - queuestat - queuestat -p Ethernet0 - queuestat -c - queuestat -d -""") - - parser.add_argument('-p', '--port', type=str, help='Show the queue conters for just one port', default=None) - parser.add_argument('-c', '--clear', action='store_true', help='Clear previous stats and save new ones') - parser.add_argument('-d', '--delete', action='store_true', help='Delete saved stats') - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - parser.add_argument('-j', '--json_opt', action='store_true', help='Print in JSON format') - parser.add_argument('-V', '--voq', action='store_true', help='display voq stats') - parser.add_argument('-n','--namespace', default=None, help='Display queue counters for specific namespace') - parser.add_argument('-nz','--non_zero', action='store_true', help='Display non-zero queue counters') - args = parser.parse_args() - - save_fresh_stats = args.clear - delete_stats = args.delete - voq = args.voq - json_opt = args.json_opt - namespace = args.namespace - non_zero = args.non_zero - - port_to_show_stats = args.port + save_fresh_stats = clear + delete_stats = delete + + port_to_show_stats = port cache = UserCache() @@ -436,16 +449,8 @@ Examples: if delete_stats: cache.remove() - queuestat = Queuestat( namespace, voq ) - - if save_fresh_stats: - queuestat.save_fresh_stats() - sys.exit(0) - - if port_to_show_stats!=None: - queuestat.get_print_port_stat(port_to_show_stats, json_opt, non_zero) - else: - queuestat.get_print_all_stat(json_opt, non_zero) + queuestat_wrapper = QueuestatWrapper(namespace, voq) + queuestat_wrapper.run(save_fresh_stats, port_to_show_stats, json_opt, non_zero) sys.exit(0) diff --git a/tests/multi_asic_queue_counter_test.py b/tests/multi_asic_queue_counter_test.py index 992709b3ae..af57fa75e5 100644 --- a/tests/multi_asic_queue_counter_test.py +++ b/tests/multi_asic_queue_counter_test.py @@ -22,6 +22,7 @@ show_queue_counters = """\ +For namespace asic0: Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet0 UC0 68 30 56 74 @@ -41,6 +42,7 @@ Ethernet0 MC14 82 44 42 60 Ethernet0 MC15 83 45 41 59 +For namespace asic0: Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet4 UC0 84 46 40 58 @@ -60,6 +62,7 @@ Ethernet4 MC14 98 60 26 44 Ethernet4 MC15 99 61 25 43 +For namespace asic0: Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes ------------ ----- -------------- --------------- ----------- ------------ Ethernet-BP0 UC0 100 62 24 42 @@ -79,6 +82,7 @@ Ethernet-BP0 MC14 114 76 10 28 Ethernet-BP0 MC15 115 77 9 27 +For namespace asic0: Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes ------------ ----- -------------- --------------- ----------- ------------ Ethernet-BP4 UC0 116 78 8 26 @@ -100,8 +104,131 @@ """ +show_queue_counters_all_asics = """\ +For namespace asic0: + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +--------- ----- -------------- --------------- ----------- ------------ +Ethernet0 UC0 68 30 56 74 +Ethernet0 UC1 69 31 55 73 +Ethernet0 UC2 70 32 54 72 +Ethernet0 UC3 71 33 53 71 +Ethernet0 UC4 72 34 52 70 +Ethernet0 UC5 73 35 51 69 +Ethernet0 UC6 74 36 50 68 +Ethernet0 UC7 75 37 49 67 +Ethernet0 MC8 76 38 48 66 +Ethernet0 MC9 77 39 47 65 +Ethernet0 MC10 78 40 46 64 +Ethernet0 MC11 79 41 45 63 +Ethernet0 MC12 80 42 44 62 +Ethernet0 MC13 81 43 43 61 +Ethernet0 MC14 82 44 42 60 +Ethernet0 MC15 83 45 41 59 + +For namespace asic0: + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +--------- ----- -------------- --------------- ----------- ------------ +Ethernet4 UC0 84 46 40 58 +Ethernet4 UC1 85 47 39 57 +Ethernet4 UC2 86 48 38 56 +Ethernet4 UC3 87 49 37 55 +Ethernet4 UC4 88 50 36 54 +Ethernet4 UC5 89 51 35 53 +Ethernet4 UC6 90 52 34 52 +Ethernet4 UC7 91 53 33 51 +Ethernet4 MC8 92 54 32 50 +Ethernet4 MC9 93 55 31 49 +Ethernet4 MC10 94 56 30 48 +Ethernet4 MC11 95 57 29 47 +Ethernet4 MC12 96 58 28 46 +Ethernet4 MC13 97 59 27 45 +Ethernet4 MC14 98 60 26 44 +Ethernet4 MC15 99 61 25 43 + +For namespace asic0: + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +------------ ----- -------------- --------------- ----------- ------------ +Ethernet-BP0 UC0 100 62 24 42 +Ethernet-BP0 UC1 101 63 23 41 +Ethernet-BP0 UC2 102 64 22 40 +Ethernet-BP0 UC3 103 65 21 39 +Ethernet-BP0 UC4 104 66 20 38 +Ethernet-BP0 UC5 105 67 19 37 +Ethernet-BP0 UC6 106 68 18 36 +Ethernet-BP0 UC7 107 69 17 35 +Ethernet-BP0 MC8 108 70 16 34 +Ethernet-BP0 MC9 109 71 15 33 +Ethernet-BP0 MC10 110 72 14 32 +Ethernet-BP0 MC11 111 73 13 31 +Ethernet-BP0 MC12 112 74 12 30 +Ethernet-BP0 MC13 113 75 11 29 +Ethernet-BP0 MC14 114 76 10 28 +Ethernet-BP0 MC15 115 77 9 27 + +For namespace asic0: + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +------------ ----- -------------- --------------- ----------- ------------ +Ethernet-BP4 UC0 116 78 8 26 +Ethernet-BP4 UC1 117 79 7 25 +Ethernet-BP4 UC2 118 80 6 24 +Ethernet-BP4 UC3 119 81 5 23 +Ethernet-BP4 UC4 120 82 4 22 +Ethernet-BP4 UC5 121 83 3 21 +Ethernet-BP4 UC6 122 84 2 20 +Ethernet-BP4 UC7 123 85 1 19 +Ethernet-BP4 ALL8 124 86 0 18 +Ethernet-BP4 ALL9 125 87 1 17 +Ethernet-BP4 ALL10 126 88 2 16 +Ethernet-BP4 ALL11 127 89 3 15 +Ethernet-BP4 ALL12 128 90 4 14 +Ethernet-BP4 ALL13 129 91 5 13 +Ethernet-BP4 ALL14 130 92 6 12 +Ethernet-BP4 ALL15 131 93 7 11 + +For namespace asic1: + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +-------------- ----- -------------- --------------- ----------- ------------ +Ethernet-BP256 UC0 N/A N/A N/A N/A +Ethernet-BP256 UC1 N/A N/A N/A N/A +Ethernet-BP256 UC2 N/A N/A N/A N/A +Ethernet-BP256 UC3 N/A N/A N/A N/A +Ethernet-BP256 UC4 N/A N/A N/A N/A +Ethernet-BP256 UC5 N/A N/A N/A N/A +Ethernet-BP256 UC6 N/A N/A N/A N/A +Ethernet-BP256 UC7 N/A N/A N/A N/A +Ethernet-BP256 MC8 N/A N/A N/A N/A +Ethernet-BP256 MC9 N/A N/A N/A N/A +Ethernet-BP256 MC10 N/A N/A N/A N/A +Ethernet-BP256 MC11 N/A N/A N/A N/A +Ethernet-BP256 MC12 N/A N/A N/A N/A +Ethernet-BP256 MC13 N/A N/A N/A N/A +Ethernet-BP256 MC14 N/A N/A N/A N/A +Ethernet-BP256 MC15 N/A N/A N/A N/A + +For namespace asic1: + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +-------------- ----- -------------- --------------- ----------- ------------ +Ethernet-BP260 UC0 N/A N/A N/A N/A +Ethernet-BP260 UC1 N/A N/A N/A N/A +Ethernet-BP260 UC2 N/A N/A N/A N/A +Ethernet-BP260 UC3 N/A N/A N/A N/A +Ethernet-BP260 UC4 N/A N/A N/A N/A +Ethernet-BP260 UC5 N/A N/A N/A N/A +Ethernet-BP260 UC6 N/A N/A N/A N/A +Ethernet-BP260 UC7 N/A N/A N/A N/A +Ethernet-BP260 ALL8 N/A N/A N/A N/A +Ethernet-BP260 ALL9 N/A N/A N/A N/A +Ethernet-BP260 ALL10 N/A N/A N/A N/A +Ethernet-BP260 ALL11 N/A N/A N/A N/A +Ethernet-BP260 ALL12 N/A N/A N/A N/A +Ethernet-BP260 ALL13 N/A N/A N/A N/A +Ethernet-BP260 ALL14 N/A N/A N/A N/A +Ethernet-BP260 ALL15 N/A N/A N/A N/A + +""" show_queue_counters_port = """\ +For namespace asic0: Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes ------------ ----- -------------- --------------- ----------- ------------ Ethernet-BP4 UC0 116 78 8 26 @@ -143,6 +270,12 @@ def test_queue_counters_port(self): print(result) assert result == show_queue_counters_port + def test_queue_counters_all_masic(self): + return_code, result = get_result_and_return_code(['queuestat']) + assert return_code == 0 + print(result) + assert result == show_queue_counters_all_asics + @classmethod def teardown_class(cls): os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) diff --git a/tests/queue_counter_test.py b/tests/queue_counter_test.py index 391d004872..508550b9c8 100644 --- a/tests/queue_counter_test.py +++ b/tests/queue_counter_test.py @@ -22,6 +22,7 @@ show_queue_counters = """\ +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet0 UC0 0 0 0 0 @@ -55,6 +56,7 @@ Ethernet0 ALL28 N/A N/A N/A N/A Ethernet0 ALL29 N/A N/A N/A N/A +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet4 UC0 41 96 70 98 @@ -88,6 +90,7 @@ Ethernet4 ALL28 N/A N/A N/A N/A Ethernet4 ALL29 N/A N/A N/A N/A +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet8 UC0 0 0 0 0 @@ -123,6 +126,7 @@ """ show_queue_counters_nz = """\ +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet0 UC1 60 43 39 1 @@ -155,6 +159,7 @@ Ethernet0 ALL28 N/A N/A N/A N/A Ethernet0 ALL29 N/A N/A N/A N/A +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet4 UC0 41 96 70 98 @@ -188,6 +193,7 @@ Ethernet4 ALL28 N/A N/A N/A N/A Ethernet4 ALL29 N/A N/A N/A N/A +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet8 UC1 38 17 68 91 @@ -324,6 +330,7 @@ """] show_queue_counters_port = """\ +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet8 UC0 0 0 0 0 @@ -359,6 +366,7 @@ """ show_queue_counters_port_nz = """\ +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet8 UC1 38 17 68 91 @@ -1851,6 +1859,7 @@ show_queue_voq_counters = """\ +For namespace : Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts ---------------- ----- -------------- --------------- ----------- ------------ -------------------- testsw|Ethernet0 VOQ0 0 0 0 0 0 @@ -1862,6 +1871,7 @@ testsw|Ethernet0 VOQ6 33 17 94 74 17 testsw|Ethernet0 VOQ7 40 71 95 33 73 +For namespace : Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts ---------------- ----- -------------- --------------- ----------- ------------ -------------------- testsw|Ethernet4 VOQ0 54 8 93 78 29 @@ -1873,6 +1883,7 @@ testsw|Ethernet4 VOQ6 68 60 66 81 22 testsw|Ethernet4 VOQ7 63 4 48 76 53 +For namespace : Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts ---------------- ----- -------------- --------------- ----------- ------------ -------------------- testsw|Ethernet8 VOQ0 41 73 77 74 67 @@ -1887,6 +1898,7 @@ """ show_queue_voq_counters_nz = """\ +For namespace : Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts ---------------- ----- -------------- --------------- ----------- ------------ -------------------- testsw|Ethernet0 VOQ1 60 43 39 1 1 @@ -1897,6 +1909,7 @@ testsw|Ethernet0 VOQ6 33 17 94 74 17 testsw|Ethernet0 VOQ7 40 71 95 33 73 +For namespace : Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts ---------------- ----- -------------- --------------- ----------- ------------ -------------------- testsw|Ethernet4 VOQ0 54 8 93 78 29 @@ -1908,6 +1921,7 @@ testsw|Ethernet4 VOQ6 68 60 66 81 22 testsw|Ethernet4 VOQ7 63 4 48 76 53 +For namespace : Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts ---------------- ----- -------------- --------------- ----------- ------------ -------------------- testsw|Ethernet8 VOQ0 41 73 77 74 67 @@ -1958,6 +1972,7 @@ ] show_queue_port_voq_counters = """\ +For namespace : Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts ---------------- ----- -------------- --------------- ----------- ------------ -------------------- testsw|Ethernet0 VOQ0 0 0 0 0 0 @@ -1972,6 +1987,7 @@ """ show_queue_port_voq_counters_nz = """\ +For namespace : Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts ---------------- ----- -------------- --------------- ----------- ------------ -------------------- testsw|Ethernet0 VOQ1 60 43 39 1 1 From 66b41e5f3a4f2ece1cf849a3810aeada602f6f7d Mon Sep 17 00:00:00 2001 From: Andriy Yurkiv <70649192+ayurkiv-nvda@users.noreply.github.com> Date: Mon, 30 Sep 2024 20:22:59 +0300 Subject: [PATCH 61/67] [fast/warm-reboot] Improve retry mechanism to check if SAI_OBJECT_TYPE_ACL_ENTRY entries are in redis (#3548) * [fast/warm-reboot] Improve retry mechanism to check if SAI_OBJECT_TYPE_ACL_ENTRY entries are in redis Signed-off-by: Andriy Yurkiv * Change log severity Signed-off-by: Andriy Yurkiv --------- Signed-off-by: Andriy Yurkiv --- scripts/fast-reboot | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 09f8f444ab..aef71d6cd6 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -304,17 +304,23 @@ function check_mirror_session_acls() ACL_ND="missing" start_time=${SECONDS} elapsed_time=$((${SECONDS} - ${start_time})) + retry_count=0 while [[ ${elapsed_time} -lt 10 ]]; do CHECK_ACL_ENTRIES=0 + retry_count=$((retry_count + 1)) ACL_OUTPUT=$(sonic-db-cli ASIC_DB KEYS "*" | grep SAI_OBJECT_TYPE_ACL_ENTRY) || CHECK_ACL_ENTRIES=$? if [[ ${CHECK_ACL_ENTRIES} -ne 0 ]]; then - error "Failed to retrieve SAI_OBJECT_TYPE_ACL_ENTRY from redis" - exit ${EXIT_NO_MIRROR_SESSION_ACLS} + debug "Failed to retrieve SAI_OBJECT_TYPE_ACL_ENTRY from redis, retrying... (Attempt: ${retry_count})" + sleep 0.1 + elapsed_time=$((${SECONDS} - ${start_time})) + continue fi ACL_ENTRIES=( ${ACL_OUTPUT} ) if [[ ${#ACL_ENTRIES[@]} -eq 0 ]]; then - error "NO SAI_OBJECT_TYPE_ACL_ENTRY objects found" - exit ${EXIT_NO_MIRROR_SESSION_ACLS} + debug "NO SAI_OBJECT_TYPE_ACL_ENTRY objects found, retrying... (Attempt: ${retry_count})" + sleep 0.1 + elapsed_time=$((${SECONDS} - ${start_time})) + continue fi for ACL_ENTRY in ${ACL_ENTRIES[@]}; do ACL_PRIORITY=$(sonic-db-cli ASIC_DB HGET ${ACL_ENTRY} SAI_ACL_ENTRY_ATTR_PRIORITY) @@ -332,7 +338,7 @@ function check_mirror_session_acls() elapsed_time=$((${SECONDS} - ${start_time})) done if [[ "${ACL_ARP}" != "found" || "${ACL_ND}" != "found" ]]; then - debug "Failed to program mirror session ACLs on ASIC. ACLs: ARP=${ACL_ARP} ND=${ACL_ND}" + error "Failed to program mirror session ACLs on ASIC. ACLs: ARP=${ACL_ARP} ND=${ACL_ND}" exit ${EXIT_NO_MIRROR_SESSION_ACLS} fi debug "Mirror session ACLs (arp, nd) programmed to ASIC successfully" From 008a078a531965f589e319dab1c0b91ca13a6f28 Mon Sep 17 00:00:00 2001 From: Changrong Wu Date: Wed, 2 Oct 2024 15:08:40 -0700 Subject: [PATCH 62/67] Add Unit Test for portstat (#3564) added a unit test for scenarios where a LC's counter DB is N/A. --- .../on_sup_na/chassis_state_db.json | 68 +++++++++++++++++++ tests/portstat_test.py | 38 +++++++++++ 2 files changed, 106 insertions(+) create mode 100644 tests/portstat_db/on_sup_na/chassis_state_db.json diff --git a/tests/portstat_db/on_sup_na/chassis_state_db.json b/tests/portstat_db/on_sup_na/chassis_state_db.json new file mode 100644 index 0000000000..d2e5771098 --- /dev/null +++ b/tests/portstat_db/on_sup_na/chassis_state_db.json @@ -0,0 +1,68 @@ +{ + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD0": { + "module_hostname": "sonic-lc1" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD1": { + "module_hostname": "sonic-lc2" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD2": { + "module_hostname": "sonic-lc3" + }, + "LINECARD_PORT_STAT_MARK_TABLE|sonic-lc1": { + "timestamp": "2020-07-01 00:00:00" + }, + "LINECARD_PORT_STAT_MARK_TABLE|sonic-lc3": { + "timestamp": "2020-07-01 00:00:00" + }, + "LINECARD_PORT_STAT_TABLE|Ethernet1/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 + }, + "LINECARD_PORT_STAT_TABLE|Ethernet2/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 + }, + "LINECARD_PORT_STAT_TABLE|Ethernet11/1": { + "state": "N/A", + "rx_ok": "N/A", + "rx_bps": "N/A", + "rx_pps": "N/A", + "rx_util": "N/A", + "rx_err": "N/A", + "rx_drop": "N/A", + "rx_ovr": "N/A", + "tx_ok": "N/A", + "tx_bps": "N/A", + "tx_pps": "N/A", + "tx_util": "N/A", + "tx_err": "N/A", + "tx_drop": "N/A", + "tx_ovr": "N/A" + } +} \ No newline at end of file diff --git a/tests/portstat_test.py b/tests/portstat_test.py index af9814f812..9c6f94d96a 100644 --- a/tests/portstat_test.py +++ b/tests/portstat_test.py @@ -272,6 +272,19 @@ intf_counters_on_sup_partial_lc = "Not all linecards have published their counter values.\n" +intf_counters_on_sup_na = """\ + IFACE STATE RX_OK RX_BPS RX_UTIL RX_ERR RX_DRP RX_OVR TX_OK TX_BPS TX_UTIL\ + TX_ERR TX_DRP TX_OVR +------------ ------- ------- --------- --------- -------- -------- -------- ------- --------- ---------\ + -------- -------- -------- + Ethernet1/1 U 100 10.00 B/s 0.00% 0 0 0 100 10.00 B/s 0.00%\ + 0 0 0 + Ethernet2/1 U 100 10.00 B/s 0.00% 0 0 0 100 10.00 B/s 0.00%\ + 0 0 0 +Ethernet11/1 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A N/A +""" + TEST_PERIOD = 3 @@ -512,6 +525,31 @@ def test_show_intf_counters_on_sup_partial_lc(self): os.system("cp /tmp/chassis_state_db.json {}" .format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + def test_show_intf_counters_on_sup_na(self): + remove_tmp_cnstat_file() + os.system("cp {} /tmp/".format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + os.system("cp {} {}".format(os.path.join(test_path, "portstat_db/on_sup_na/chassis_state_db.json"), + os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "1" + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["interfaces"].commands["counters"], []) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == intf_counters_on_sup_na + + return_code, result = get_result_and_return_code(['portstat']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == intf_counters_on_sup_na + + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "0" + os.system("cp /tmp/chassis_state_db.json {}" + .format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + @classmethod def teardown_class(cls): print("TEARDOWN") From 72d1faa7aedf986f45d363838483b7398e2a4967 Mon Sep 17 00:00:00 2001 From: HP Date: Mon, 7 Oct 2024 13:34:06 -0700 Subject: [PATCH 63/67] Fix key error when checking for UTILITIES_UNIT_TESTING env var (#3563) --- pfc/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pfc/main.py b/pfc/main.py index 071b4a304e..f894a5d7c5 100644 --- a/pfc/main.py +++ b/pfc/main.py @@ -28,7 +28,7 @@ def dump_config_to_json(self, table_name, namespace): This function dumps the current config in a JSON file for unit testing. """ # Only dump files in unit testing mode - if os.environ["UTILITIES_UNIT_TESTING"] != "2": + if os.getenv("UTILITIES_UNIT_TESTING") != "2": return if namespace not in self.updated_port_tables.keys(): From 910252c996a59279101a7116f9bf89e1f936c596 Mon Sep 17 00:00:00 2001 From: DavidZagury <32644413+DavidZagury@users.noreply.github.com> Date: Wed, 9 Oct 2024 15:57:57 +0300 Subject: [PATCH 64/67] [Mellanox] Rename SKU to Mellanox-SN5600-C256X1 (#3546) --- generic_config_updater/gcu_field_operation_validators.conf.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index c1921470d4..75a2d03a00 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -22,7 +22,7 @@ "spc2": [ "ACS-MSN3800", "Mellanox-SN3800-D112C8", "ACS-MSN3420", "ACS-MSN3700C", "ACS-MSN3700", "Mellanox-SN3800-C64", "Mellanox-SN3800-D100C12S2", "Mellanox-SN3800-D24C52", "Mellanox-SN3800-D28C49S1", "Mellanox-SN3800-D28C50" ], "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "ACS-SN4280", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40","Mellanox-SN4700-O32","Mellanox-SN4700-V64", "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32", "Mellanox-SN4280-O28"], - "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "Mellanox-SN5600-C256", "ACS-SN5400" ], + "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "Mellanox-SN5600-C256X1", "ACS-SN5400" ], "spc5": ["ACS-SN5640"] }, "broadcom_asics": { From 88ef85cbfee066c34cce27a58e7053371eb4025f Mon Sep 17 00:00:00 2001 From: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Date: Sat, 12 Oct 2024 12:15:43 +0800 Subject: [PATCH 65/67] Add a command to update log level and refresh configuration (#3428) * Add a command to update log level and refresh configuration * Multi ASIC support * Fix pre-commit issue * Fix comment Fix review comment --- config/syslog.py | 54 +++++++++++++++++++++++++ doc/Command-Reference.md | 29 ++++++++++++++ tests/syslog_multi_asic_test.py | 16 ++++++++ tests/syslog_test.py | 70 +++++++++++++++++++++++++++++++++ 4 files changed, 169 insertions(+) diff --git a/config/syslog.py b/config/syslog.py index a5d520d9cf..7228e365c8 100644 --- a/config/syslog.py +++ b/config/syslog.py @@ -642,3 +642,57 @@ def disable_rate_limit_feature(db, service_name, namespace): if not failed: click.echo(f'Disabled syslog rate limit feature for {feature_name}') + + +@syslog.command('level') +@click.option("-i", "--identifier", + required=True, + help="Log identifier in DB for which loglevel is applied (provided with -l)") +@click.option("-l", "--level", + required=True, + help="Loglevel value", + type=click.Choice(['DEBUG', 'INFO', 'NOTICE', 'WARN', 'ERROR'])) +@click.option("--container", + help="Container name to which the SIGHUP is sent (provided with --pid or --program)") +@click.option("--program", + help="Program name to which the SIGHUP is sent (provided with --container)") +@click.option("--pid", + help="Process ID to which the SIGHUP is sent (provided with --container if PID is from container)") +@click.option('--namespace', '-n', 'namespace', default=None, + type=click.Choice(multi_asic_util.multi_asic_ns_choices()), + show_default=True, help='Namespace name') +@clicommon.pass_db +def level(db, identifier, level, container, program, pid, namespace): + """ Configure log level """ + if program and not container: + raise click.UsageError('--program must be specified with --container') + + if container and not program and not pid: + raise click.UsageError('--container must be specified with --pid or --program') + + if not namespace: + cfg_db = db.cfgdb + else: + asic_id = multi_asic.get_asic_id_from_name(namespace) + container = f'{container}{asic_id}' + cfg_db = db.cfgdb_clients[namespace] + + cfg_db.mod_entry('LOGGER', identifier, {'LOGLEVEL': level}) + if not container and not program and not pid: + return + + log_config = cfg_db.get_entry('LOGGER', identifier) + require_manual_refresh = log_config.get('require_manual_refresh') + if not require_manual_refresh: + return + + if container: + if program: + command = ['docker', 'exec', '-i', container, 'supervisorctl', 'signal', 'HUP', program] + else: + command = ['docker', 'exec', '-i', container, 'kill', '-s', 'SIGHUP', pid] + else: + command = ['kill', '-s', 'SIGHUP', pid] + output, ret = clicommon.run_command(command, return_cmd=True) + if ret != 0: + raise click.ClickException(f'Failed: {output}') diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index e9009ef67d..1aa9c6523f 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -10858,6 +10858,35 @@ This command is used to disable syslog rate limit feature. config syslog rate-limit-feature disable database -n asci0 ``` +**config syslog level** + +This command is used to configure log level for a given log identifier. + +- Usage: + ``` + config syslog level -i -l --container [] --program [] + + config syslog level -i -l --container [] --pid [] + + config syslog level -i -l ---pid [] + ``` + +- Example: + + ``` + # Update the log level without refresh the configuration + config syslog level -i xcvrd -l DEBUG + + # Update the log level and send SIGHUP to xcvrd running in PMON + config syslog level -i xcvrd -l DEBUG --container pmon --program xcvrd + + # Update the log level and send SIGHUP to PID 20 running in PMON + config syslog level -i xcvrd -l DEBUG --container pmon --pid 20 + + # Update the log level and send SIGHUP to PID 20 running in host + config syslog level -i xcvrd -l DEBUG --pid 20 + ``` + Go Back To [Beginning of the document](#) or [Beginning of this section](#syslog) ## System State diff --git a/tests/syslog_multi_asic_test.py b/tests/syslog_multi_asic_test.py index 7933edcd66..c1a136582c 100644 --- a/tests/syslog_multi_asic_test.py +++ b/tests/syslog_multi_asic_test.py @@ -279,3 +279,19 @@ def test_disable_syslog_rate_limit_feature(self, setup_cmd_module): ['database', '-n', 'asic0'] ) assert result.exit_code == 0 + + @mock.patch('config.syslog.clicommon.run_command') + def test_config_log_level(self, mock_run, setup_cmd_module): + _, config = setup_cmd_module + db = Db() + runner = CliRunner() + + mock_run.return_value = ('something', 0) + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'component', '-l', 'DEBUG', '-n', 'asic0'], obj=db + ) + assert result.exit_code == 0 + cfg_db = db.cfgdb_clients['asic0'] + data = cfg_db.get_entry('LOGGER', 'component') + assert data.get('LOGLEVEL') == 'DEBUG' diff --git a/tests/syslog_test.py b/tests/syslog_test.py index c1cbee1127..e77f6d0e6c 100644 --- a/tests/syslog_test.py +++ b/tests/syslog_test.py @@ -484,3 +484,73 @@ def side_effect(*args, **kwargs): config.config.commands["syslog"].commands["rate-limit-feature"].commands["disable"], obj=db ) assert result.exit_code == SUCCESS + + @mock.patch('config.syslog.clicommon.run_command') + def test_config_log_level(self, mock_run): + db = Db() + db.cfgdb.set_entry('LOGGER', 'log1', {'require_manual_refresh': 'true'}) + + runner = CliRunner() + + mock_run.return_value = ('something', 0) + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'component', '-l', 'DEBUG'], obj=db + ) + assert result.exit_code == SUCCESS + data = db.cfgdb.get_entry('LOGGER', 'component') + assert data.get('LOGLEVEL') == 'DEBUG' + + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'component', '-l', 'DEBUG', '--pid', '123'], obj=db + ) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'component', '-l', 'DEBUG', '--container', 'pmon', '--pid', '123'], obj=db + ) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'component', '-l', 'DEBUG', '--container', 'pmon', '--program', 'xcvrd'], obj=db + ) + assert result.exit_code == SUCCESS + + @mock.patch('config.syslog.clicommon.run_command') + def test_config_log_level_negative(self, mock_run): + db = Db() + + runner = CliRunner() + + mock_run.return_value = ('something', 0) + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'log1', '-l', 'DEBUG', '--container', 'pmon'], obj=db + ) + assert result.exit_code != SUCCESS + + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'log1', '-l', 'DEBUG', '--program', 'xcvrd'], obj=db + ) + assert result.exit_code != SUCCESS + + mock_run.reset_mock() + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'log1', '-l', 'DEBUG', '--container', 'swss', '--program', 'orchagent'], obj=db + ) + assert result.exit_code == SUCCESS + # Verify it does not send signal to orchagent if require_manual_refresh is not true + assert mock_run.call_count == 0 + + mock_run.return_value = ('something', -1) + db.cfgdb.set_entry('LOGGER', 'log1', {'require_manual_refresh': 'true'}) + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'log1', '-l', 'DEBUG', '--container', 'pmon', '--program', 'xcvrd'], obj=db + ) + assert result.exit_code != SUCCESS From 319f58d5d5dbe86e2f0aeece67b8274dcd3f3ce8 Mon Sep 17 00:00:00 2001 From: Stepan Blyshchak <38952541+stepanblyschak@users.noreply.github.com> Date: Mon, 14 Oct 2024 19:03:46 +0300 Subject: [PATCH 66/67] [fast/warm-reboot] add cpufreq.default_governor=performance to BOOT_OPTIONS (#3435) * [fast/warm-reboot] add cpufreq.default_governor=performance to BOOT_OPTIONS Append this option to BOOT_OPTIONS variable. How to verify it Run fast-reboot or warm-reboot. Check: admin@sonic:~$ cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor performance After boot is finalized check that it is reset back to default: admin@sonic:~$ cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor schedutil --------- Signed-off-by: Stepan Blyschak --- scripts/fast-reboot | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index aef71d6cd6..935188c393 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -422,6 +422,12 @@ function setup_reboot_variables() local fstype=$(blkid -o value -s TYPE ${sonic_dev}) BOOT_OPTIONS="${BOOT_OPTIONS} ssd-upgrader-part=${sonic_dev},${fstype}" fi + + if [[ "$sonic_asic_type" == "mellanox" ]]; then + # Set governor to performance to speed up boot process. + # The governor is reset back to kernel default in warmboot-finalizer script. + BOOT_OPTIONS="${BOOT_OPTIONS} cpufreq.default_governor=performance" + fi } function check_docker_exec() From 244a18853f56d40ad8460455fc20f9352402d022 Mon Sep 17 00:00:00 2001 From: Saikrishna Arcot Date: Tue, 15 Oct 2024 23:36:02 -0700 Subject: [PATCH 67/67] Update the .NET core version to 8.0 (#3280) For the HTML code coverage report, .NET 6 or newer is needed. Install .NET 8 as it is the latest version currently. Signed-off-by: Saikrishna Arcot --- azure-pipelines.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 8cb6586a9b..5781be9436 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -121,7 +121,7 @@ stages: curl -sSL https://packages.microsoft.com/keys/microsoft.asc | sudo apt-key add - sudo apt-add-repository https://packages.microsoft.com/debian/11/prod sudo apt-get update - sudo apt-get install -y dotnet-sdk-5.0 + sudo apt-get install -y dotnet-sdk-8.0 displayName: "Install .NET CORE" - script: |