diff --git a/config/main.py b/config/main.py index 1396c2b106..6d3b5744f0 100644 --- a/config/main.py +++ b/config/main.py @@ -2199,6 +2199,93 @@ def del_portchannel_member(ctx, portchannel_name, port_name): except JsonPatchConflict: ctx.fail("Invalid or nonexistent portchannel or interface. Please ensure existence of portchannel member.") +@portchannel.group(cls=clicommon.AbbreviationGroup, name='retry-count') +@click.pass_context +def portchannel_retry_count(ctx): + pass + +def check_if_retry_count_is_enabled(ctx, portchannel_name): + try: + proc = subprocess.Popen(["teamdctl", portchannel_name, "state", "item", "get", "runner.enable_retry_count_feature"], text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output, err = proc.communicate(timeout=10) + if proc.returncode != 0: + ctx.fail("Unable to determine if the retry count feature is enabled or not: {}".format(err.strip())) + return output.strip() == "true" + except subprocess.TimeoutExpired as e: + proc.kill() + proc.communicate() + ctx.fail("Unable to determine if the retry count feature is enabled or not: {}".format(e)) + +@portchannel_retry_count.command('get') +@click.argument('portchannel_name', metavar='', required=True) +@click.pass_context +def get_portchannel_retry_count(ctx, portchannel_name): + """Get the retry count for a port channel""" + db = ValidatedConfigDBConnector(ctx.obj['db']) + + # Don't proceed if the port channel name is not valid + if is_portchannel_name_valid(portchannel_name) is False: + ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'" + .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) + + # Don't proceed if the port channel does not exist + if is_portchannel_present_in_db(db, portchannel_name) is False: + ctx.fail("{} is not present.".format(portchannel_name)) + + try: + is_retry_count_enabled = check_if_retry_count_is_enabled(ctx, portchannel_name) + if not is_retry_count_enabled: + ctx.fail("Retry count feature is not enabled!") + + proc = subprocess.Popen(["teamdctl", portchannel_name, "state", "item", "get", "runner.retry_count"], text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output, err = proc.communicate(timeout=10) + if proc.returncode != 0: + ctx.fail("Unable to get the retry count: {}".format(err.strip())) + click.echo(output.strip()) + except FileNotFoundError: + ctx.fail("Unable to get the retry count: teamdctl could not be run") + except subprocess.TimeoutExpired as e: + proc.kill() + proc.communicate() + ctx.fail("Unable to get the retry count: {}".format(e)) + except Exception as e: + ctx.fail("Unable to get the retry count: {}".format(e)) + +@portchannel_retry_count.command('set') +@click.argument('portchannel_name', metavar='', required=True) +@click.argument('retry_count', metavar='', required=True, type=click.IntRange(3,10)) +@click.pass_context +def set_portchannel_retry_count(ctx, portchannel_name, retry_count): + """Set the retry count for a port channel""" + db = ValidatedConfigDBConnector(ctx.obj['db']) + + # Don't proceed if the port channel name is not valid + if is_portchannel_name_valid(portchannel_name) is False: + ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'" + .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) + + # Don't proceed if the port channel does not exist + if is_portchannel_present_in_db(db, portchannel_name) is False: + ctx.fail("{} is not present.".format(portchannel_name)) + + try: + is_retry_count_enabled = check_if_retry_count_is_enabled(ctx, portchannel_name) + if not is_retry_count_enabled: + ctx.fail("Retry count feature is not enabled!") + + proc = subprocess.Popen(["teamdctl", portchannel_name, "state", "item", "set", "runner.retry_count", str(retry_count)], text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output, err = proc.communicate(timeout=10) + if proc.returncode != 0: + ctx.fail("Unable to set the retry count: {}".format(err.strip())) + except FileNotFoundError: + ctx.fail("Unable to set the retry count: teamdctl could not be run") + except subprocess.TimeoutExpired as e: + proc.kill() + proc.communicate() + ctx.fail("Unable to set the retry count: {}".format(e)) + except Exception as e: + ctx.fail("Unable to set the retry count: {}".format(e)) + # # 'mirror_session' group ('config mirror_session ...') diff --git a/scripts/fast-reboot b/scripts/fast-reboot index eea97e792b..4cb9b597c7 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -30,6 +30,7 @@ TAG_LATEST=yes DETACH=no LOG_PATH="/var/log/${REBOOT_TYPE}.txt" UIMAGE_HDR_SIZE=64 +REQUIRE_TEAMD_RETRY_COUNT=no # Require 100M available on the hard drive for warm reboot temp files, # Size is in 1K blocks: @@ -47,6 +48,7 @@ EXIT_DB_INTEGRITY_FAILURE=15 EXIT_NO_CONTROL_PLANE_ASSISTANT=20 EXIT_SONIC_INSTALLER_VERIFY_REBOOT=21 EXIT_PLATFORM_FW_AU_FAILURE=22 +EXIT_TEAMD_RETRY_COUNT_FAILURE=23 function error() { @@ -78,13 +80,15 @@ function showHelpAndExit() echo " -t : Don't tag the current kube images as latest" echo " -D : detached mode - closing terminal will not cause stopping reboot" echo " -u : include ssd-upgrader-part in boot options" + echo " -n : don't require peer devices to be running SONiC with retry count feature [default]" + echo " -N : require peer devices to be running SONiC with retry count feature" exit "${EXIT_SUCCESS}" } function parseOptions() { - while getopts "vfidh?rkxc:sDu" opt; do #TODO "t" is missing + while getopts "vfidh?rkxc:sDunN" opt; do #TODO "t" is missing case ${opt} in h|\? ) showHelpAndExit @@ -125,6 +129,12 @@ function parseOptions() u ) SSD_FW_UPDATE_BOOT_OPTION=yes ;; + n ) + REQUIRE_TEAMD_RETRY_COUNT=no + ;; + N ) + REQUIRE_TEAMD_RETRY_COUNT=yes + ;; esac done } @@ -635,6 +645,22 @@ init_warm_reboot_states setup_control_plane_assistant +TEAMD_INCREASE_RETRY_COUNT=0 +if [[ "${REBOOT_TYPE}" = "warm-reboot" || "${REBOOT_TYPE}" = "fastfast-reboot" ]]; then + TEAMD_RETRY_COUNT_PROBE_RC=0 + /usr/local/bin/teamd_increase_retry_count.py --probe-only || TEAMD_RETRY_COUNT_PROBE_RC=$? + if [[ ${TEAMD_RETRY_COUNT_PROBE_RC} -ne 0 ]]; then + if [[ "${REQUIRE_TEAMD_RETRY_COUNT}" = "yes" ]]; then + error "Could not confirm that all neighbor devices are running SONiC with the retry count feature" + exit "${EXIT_TEAMD_RETRY_COUNT_FAILURE}" + else + debug "Warning: Retry count feature support unknown for one or more neighbor devices; assuming that it's not available" + fi + else + TEAMD_INCREASE_RETRY_COUNT=1 + fi +fi + if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "fast-reboot" ]]; then # Freeze orchagent for warm restart # Ask orchagent_restart_check to try freeze 5 times with interval of 2 seconds, @@ -663,6 +689,10 @@ if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then fi fi +if [[ ( "${REBOOT_TYPE}" = "warm-reboot" || "${REBOOT_TYPE}" = "fastfast-reboot" ) && "${TEAMD_INCREASE_RETRY_COUNT}" -eq 1 ]]; then + /usr/local/bin/teamd_increase_retry_count.py +fi + # We are fully committed to reboot from this point on because critical # service will go down and we cannot recover from it. set +e diff --git a/scripts/teamd_increase_retry_count.py b/scripts/teamd_increase_retry_count.py new file mode 100755 index 0000000000..34238b3fee --- /dev/null +++ b/scripts/teamd_increase_retry_count.py @@ -0,0 +1,322 @@ +#!/usr/bin/python3 + +import subprocess +import json +from scapy.config import conf +conf.ipv6_enabled = False +conf.verb = False +from scapy.fields import ByteField, ShortField, MACField, XStrFixedLenField, ConditionalField +from scapy.layers.l2 import Ether +from scapy.sendrecv import sendp, sniff +from scapy.packet import Packet, split_layers, bind_layers +import scapy.contrib.lacp +import os +import re +import sys +from threading import Thread, Event +import time +import argparse +import signal + +from sonic_py_common import logger +from swsscommon.swsscommon import DBConnector, Table + +log = logger.Logger() +revertTeamdRetryCountChanges = False +DEFAULT_RETRY_COUNT = 3 +EXTENDED_RETRY_COUNT = 5 +SLOW_PROTOCOL_MAC_ADDRESS = "01:80:c2:00:00:02" +LACP_ETHERTYPE = 0x8809 + +class LACPRetryCount(Packet): + name = "LACPRetryCount" + fields_desc = [ + ByteField("version", 0xf1), + ByteField("actor_type", 1), + ByteField("actor_length", 20), + ShortField("actor_system_priority", 0), + MACField("actor_system", None), + ShortField("actor_key", 0), + ShortField("actor_port_priority", 0), + ShortField("actor_port_number", 0), + ByteField("actor_state", 0), + XStrFixedLenField("actor_reserved", "", 3), + ByteField("partner_type", 2), + ByteField("partner_length", 20), + ShortField("partner_system_priority", 0), + MACField("partner_system", None), + ShortField("partner_key", 0), + ShortField("partner_port_priority", 0), + ShortField("partner_port_number", 0), + ByteField("partner_state", 0), + XStrFixedLenField("partner_reserved", "", 3), + ByteField("collector_type", 3), + ByteField("collector_length", 16), + ShortField("collector_max_delay", 0), + XStrFixedLenField("collector_reserved", "", 12), + ConditionalField(ByteField("actor_retry_count_type", 0x80), lambda pkt:pkt.version == 0xf1), + ConditionalField(ByteField("actor_retry_count_length", 4), lambda pkt:pkt.version == 0xf1), + ConditionalField(ByteField("actor_retry_count", 0), lambda pkt:pkt.version == 0xf1), + ConditionalField(XStrFixedLenField("actor_retry_count_reserved", "", 1), lambda pkt:pkt.version == 0xf1), + ConditionalField(ByteField("partner_retry_count_type", 0x81), lambda pkt:pkt.version == 0xf1), + ConditionalField(ByteField("partner_retry_count_length", 4), lambda pkt:pkt.version == 0xf1), + ConditionalField(ByteField("partner_retry_count", 0), lambda pkt:pkt.version == 0xf1), + ConditionalField(XStrFixedLenField("partner_retry_count_reserved", "", 1), lambda pkt:pkt.version == 0xf1), + ByteField("terminator_type", 0), + ByteField("terminator_length", 0), + ConditionalField(XStrFixedLenField("reserved", "", 42), lambda pkt:pkt.version == 0xf1), + ConditionalField(XStrFixedLenField("reserved", "", 50), lambda pkt:pkt.version != 0xf1), + ] + +split_layers(scapy.contrib.lacp.SlowProtocol, scapy.contrib.lacp.LACP, subtype=1) +bind_layers(scapy.contrib.lacp.SlowProtocol, LACPRetryCount, subtype=1) + +class LacpPacketListenThread(Thread): + def __init__(self, port, targetMacAddress, sendReadyEvent): + Thread.__init__(self) + self.port = port + self.targetMacAddress = targetMacAddress + self.sendReadyEvent = sendReadyEvent + self.detectedNewVersion = False + + def lacpPacketCallback(self, pkt): + if pkt["LACPRetryCount"].version == 0xf1: + self.detectedNewVersion = True + return self.detectedNewVersion + + def run(self): + sniff(stop_filter=self.lacpPacketCallback, iface=self.port, filter="ether proto {} and ether src {}".format(LACP_ETHERTYPE, self.targetMacAddress), + store=0, timeout=30, started_callback=self.sendReadyEvent.set) + +def getPortChannels(): + applDb = DBConnector("APPL_DB", 0) + configDb = DBConnector("CONFIG_DB", 0) + portChannelTable = Table(applDb, "LAG_TABLE") + portChannels = portChannelTable.getKeys() + activePortChannels = [] + for portChannel in portChannels: + state = portChannelTable.get(portChannel) + if not state or not state[0]: + continue + isAdminUp = False + isOperUp = False + for key, value in state[1]: + if key == "admin_status": + isAdminUp = value == "up" + elif key == "oper_status": + isOperUp = value == "up" + if isAdminUp and isOperUp: + activePortChannels.append(portChannel) + + # Now find out which BGP sessions on these port channels are admin up. This needs to go + # through a circuitious sequence of steps. + # + # 1. Get the local IPv4/IPv6 address assigned to each port channel. + # 2. Find out which BGP session (in CONFIG_DB) has a local_addr attribute of the local + # IPv4/IPv6 address. + # 3. Check the admin_status field of that table in CONFIG_DB. + portChannelData = {} + portChannelInterfaceTable = Table(configDb, "PORTCHANNEL_INTERFACE") + portChannelInterfaces = portChannelInterfaceTable.getKeys() + for portChannelInterface in portChannelInterfaces: + if "|" not in portChannelInterface: + continue + portChannel = portChannelInterface.split("|")[0] + ipAddress = portChannelInterface.split("|")[1].split("/")[0].lower() + if portChannel not in activePortChannels: + continue + portChannelData[ipAddress] = { + "portChannel": portChannel, + "adminUp": False + } + + bgpTable = Table(configDb, "BGP_NEIGHBOR") + bgpNeighbors = bgpTable.getKeys() + for bgpNeighbor in bgpNeighbors: + neighborData = bgpTable.get(bgpNeighbor) + if not neighborData[0]: + continue + localAddr = None + isAdminUp = False + for key, value in neighborData[1]: + if key == "local_addr": + if value not in portChannelData: + break + localAddr = value.lower() + elif key == "admin_status": + isAdminUp = value == "up" + if not localAddr: + continue + portChannelData[localAddr]["adminUp"] = isAdminUp + + return set([portChannelData[x]["portChannel"] for x in portChannelData.keys() if portChannelData[x]["adminUp"]]) + +def getPortChannelConfig(portChannelName): + (processStdout, _) = getCmdOutput(["teamdctl", portChannelName, "state", "dump"]) + return json.loads(processStdout) + +def getLldpNeighbors(): + (processStdout, _) = getCmdOutput(["lldpctl", "-f", "json"]) + return json.loads(processStdout) + +def craftLacpPacket(portChannelConfig, portName, isResetPacket=False, newVersion=True): + portConfig = portChannelConfig["ports"][portName] + actorConfig = portConfig["runner"]["actor_lacpdu_info"] + partnerConfig = portConfig["runner"]["partner_lacpdu_info"] + l2 = Ether(dst=SLOW_PROTOCOL_MAC_ADDRESS, src=portConfig["ifinfo"]["dev_addr"], type=LACP_ETHERTYPE) + l3 = scapy.contrib.lacp.SlowProtocol(subtype=0x01) + l4 = LACPRetryCount() + if newVersion: + l4.version = 0xf1 + else: + l4.version = 0x1 + l4.actor_system_priority = actorConfig["system_priority"] + l4.actor_system = actorConfig["system"] + l4.actor_key = actorConfig["key"] + l4.actor_port_priority = actorConfig["port_priority"] + l4.actor_port_number = actorConfig["port"] + l4.actor_state = actorConfig["state"] + l4.partner_system_priority = partnerConfig["system_priority"] + l4.partner_system = partnerConfig["system"] + l4.partner_key = partnerConfig["key"] + l4.partner_port_priority = partnerConfig["port_priority"] + l4.partner_port_number = partnerConfig["port"] + l4.partner_state = partnerConfig["state"] + if newVersion: + l4.actor_retry_count = EXTENDED_RETRY_COUNT if not isResetPacket else DEFAULT_RETRY_COUNT + l4.partner_retry_count = DEFAULT_RETRY_COUNT + packet = l2 / l3 / l4 + return packet + +def sendLacpPackets(packets, revertPackets): + global revertTeamdRetryCountChanges + while not revertTeamdRetryCountChanges: + for port, packet in packets: + sendp(packet, iface=port) + time.sleep(15) + if revertTeamdRetryCountChanges: + for port, packet in revertPackets: + sendp(packet, iface=port) + +def abortTeamdChanges(signum, frame): + global revertTeamdRetryCountChanges + log.log_info("Got signal {}, reverting teamd retry count change".format(signum)) + revertTeamdRetryCountChanges = True + +def getCmdOutput(cmd): + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) + return proc.communicate()[0], proc.returncode + +def main(probeOnly=False): + if os.geteuid() != 0: + log.log_error("Root privileges required for this operation", also_print_to_console=True) + sys.exit(1) + portChannels = getPortChannels() + if not portChannels: + log.log_info("No port channels retrieved; exiting") + return + failedPortChannels = [] + if probeOnly: + for portChannel in portChannels: + config = getPortChannelConfig(portChannel) + lldpInfo = getLldpNeighbors() + portChannelChecked = False + for portName in config["ports"].keys(): + if not "runner" in config["ports"][portName] or \ + not "partner_lacpdu_info" in config["ports"][portName]["runner"] or \ + not "actor_lacpdu_info" in config["ports"][portName]["runner"]: + log.log_error("ERROR: Missing information from teamd about {}; skipping".format(portName)) + failedPortChannels.append(portChannel) + break + + interfaceLldpInfo = [k for k in lldpInfo["lldp"]["interface"] if portName in k] + if not interfaceLldpInfo: + log.log_warning("WARNING: No LLDP info available for {}; skipping".format(portName)) + continue + interfaceLldpInfo = interfaceLldpInfo[0][portName] + peerName = list(interfaceLldpInfo["chassis"].keys())[0] + peerInfo = interfaceLldpInfo["chassis"][peerName] + if "descr" not in peerInfo: + log.log_warning("WARNING: No peer description available via LLDP for {}; skipping".format(portName)) + continue + portChannelChecked = True + if "sonic" not in peerInfo["descr"].lower(): + log.log_warning("WARNING: Peer device is not a SONiC device; skipping") + failedPortChannels.append(portChannel) + break + + sendReadyEvent = Event() + + # Start sniffing thread + lacpThread = LacpPacketListenThread(portName, config["ports"][portName]["runner"]["partner_lacpdu_info"]["system"], sendReadyEvent) + lacpThread.start() + + # Generate and send probe packet after sniffing has started + probePacket = craftLacpPacket(config, portName) + sendReadyEvent.wait() + sendp(probePacket, iface=portName) + + lacpThread.join() + + resetProbePacket = craftLacpPacket(config, portName, newVersion=False) + # 2-second sleep for making sure all processing is done on the peer device + time.sleep(2) + sendp(resetProbePacket, iface=portName, count=2, inter=0.5) + + if lacpThread.detectedNewVersion: + log.log_notice("SUCCESS: Peer device {} is running version of SONiC with teamd retry count feature".format(peerName), also_print_to_console=True) + break + else: + log.log_warning("WARNING: Peer device {} is running version of SONiC without teamd retry count feature".format(peerName), also_print_to_console=True) + failedPortChannels.append(portChannel) + break + if not portChannelChecked: + log.log_warning("WARNING: No information available about peer device on port channel {}".format(portChannel), also_print_to_console=True) + failedPortChannels.append(portChannel) + if failedPortChannels: + log.log_error("ERROR: There are port channels/peer devices that failed the probe: {}".format(failedPortChannels), also_print_to_console=True) + sys.exit(2) + else: + global revertTeamdRetryCountChanges + signal.signal(signal.SIGUSR1, abortTeamdChanges) + signal.signal(signal.SIGTERM, abortTeamdChanges) + (_, rc) = getCmdOutput(["config", "portchannel", "retry-count", "get", list(portChannels)[0]]) + if rc == 0: + # Currently running on SONiC version with teamd retry count feature + for portChannel in portChannels: + getCmdOutput(["config", "portchannel", "retry-count", "set", portChannel, str(EXTENDED_RETRY_COUNT)]) + pid = os.fork() + if pid == 0: + # Running in a new process, detached from parent process + while not revertTeamdRetryCountChanges: + time.sleep(15) + if revertTeamdRetryCountChanges: + for portChannel in portChannels: + getCmdOutput(["config", "portchannel", "retry-count", "set", portChannel, str(DEFAULT_RETRY_COUNT)]) + else: + lacpPackets = [] + revertLacpPackets = [] + for portChannel in portChannels: + config = getPortChannelConfig(portChannel) + for portName in config["ports"].keys(): + if not "runner" in config["ports"][portName] or \ + not "partner_lacpdu_info" in config["ports"][portName]["runner"] or \ + not "actor_lacpdu_info" in config["ports"][portName]["runner"]: + log.log_error("ERROR: Missing information from teamd about {}; skipping".format(portName)) + break + + packet = craftLacpPacket(config, portName) + lacpPackets.append((portName, packet)) + packet = craftLacpPacket(config, portName, isResetPacket=True) + revertLacpPackets.append((portName, packet)) + pid = os.fork() + if pid == 0: + # Running in a new process, detached from parent process + sendLacpPackets(lacpPackets, revertLacpPackets) + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Teamd retry count changer.') + parser.add_argument('--probe-only', action='store_true', + help='Probe the peer devices only, to verify that they support the teamd retry count feature') + args = parser.parse_args() + main(args.probe_only) diff --git a/setup.py b/setup.py index a2c851998f..6d7c5b7889 100644 --- a/setup.py +++ b/setup.py @@ -164,6 +164,7 @@ 'scripts/soft-reboot', 'scripts/storyteller', 'scripts/syseeprom-to-json', + 'scripts/teamd_increase_retry_count.py', 'scripts/tempershow', 'scripts/tunnelstat', 'scripts/update_json.py', diff --git a/tests/portchannel_test.py b/tests/portchannel_test.py index 4d6eb33ed0..9b8bf56863 100644 --- a/tests/portchannel_test.py +++ b/tests/portchannel_test.py @@ -1,5 +1,6 @@ import os import pytest +import subprocess import traceback import mock @@ -13,6 +14,7 @@ from mock import patch class TestPortChannel(object): + @classmethod def setup_class(cls): os.environ['UTILITIES_UNIT_TESTING'] = "1" @@ -268,6 +270,179 @@ def test_delete_portchannel_which_is_member_of_a_vlan(self): assert result.exit_code != 0 assert "PortChannel1001 has vlan Vlan4000 configured, remove vlan membership to proceed" in result.output + def test_get_invalid_portchannel_retry_count(self): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + # get the retry count of a portchannel with an invalid portchannel name + result = runner.invoke(config.config.commands["portchannel"].commands["retry-count"].commands["get"], ["Ethernet48"], obj=obj) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Error: Ethernet48 is invalid!" in result.output + + def test_set_invalid_portchannel_retry_count(self): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + # set the retry count of a portchannel with an invalid portchannel name + result = runner.invoke(config.config.commands["portchannel"].commands["retry-count"].commands["set"], ["Ethernet48", "5"], obj=obj) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Error: Ethernet48 is invalid!" in result.output + + def test_get_non_existing_portchannel_retry_count(self): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + # get the retry count of a portchannel with portchannel not yet created + result = runner.invoke(config.config.commands["portchannel"].commands["retry-count"].commands["get"], ["PortChannel0005"], obj=obj) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Error: PortChannel0005 is not present." in result.output + + def test_set_non_existing_portchannel_retry_count(self): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + # set the retry count of a portchannel with portchannel not yet created + result = runner.invoke(config.config.commands["portchannel"].commands["retry-count"].commands["set"], ["PortChannel0005", "5"], obj=obj) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Error: PortChannel0005 is not present." in result.output + + originalSubprocessPopen = subprocess.Popen + + class SubprocessMock: + def __init__(self, *args, **kwargs): + self.retryCountEnabled = True + self.timeout = False + + def __call__(self, *args, **kwargs): + stdoutResult = "" + stderrResult = "" + rc = 0 + + commandArgs = args[0] + if commandArgs[0] != "teamdctl": + return TestPortChannel.originalSubprocessPopen(*args, **kwargs) + if self.timeout: + return TestPortChannel.originalSubprocessPopen(["sleep", "90"], **kwargs) + if commandArgs[5] == "runner.enable_retry_count_feature": + return TestPortChannel.originalSubprocessPopen(["echo", "true" if self.retryCountEnabled else "false"], **kwargs) + elif commandArgs[5] == "runner.retry_count": + if commandArgs[4] == "get": + return TestPortChannel.originalSubprocessPopen(["echo", "3"], **kwargs) + elif commandArgs[4] == "set": + return TestPortChannel.originalSubprocessPopen(["echo", ""], **kwargs) + else: + return TestPortChannel.originalSubprocessPopen(["false"], **kwargs) + else: + return TestPortChannel.originalSubprocessPopen(["false"], **kwargs) + + @patch("subprocess.Popen", new_callable=SubprocessMock) + def test_get_portchannel_retry_count_disabled(self, subprocessMock): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + subprocessMock.retryCountEnabled = False + + # get the retry count of a portchannel, but when the retry count feature is disabled + result = runner.invoke(config.config.commands["portchannel"].commands["retry-count"].commands["get"], ["PortChannel1001"], obj=obj) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Retry count feature is not enabled!" in result.output + + @patch("subprocess.Popen", new_callable=SubprocessMock) + def test_set_portchannel_retry_count_disabled(self, subprocessMock): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + subprocessMock.retryCountEnabled = False + + # set the retry count of a portchannel, but when the retry count feature is disabled + result = runner.invoke(config.config.commands["portchannel"].commands["retry-count"].commands["set"], ["PortChannel1001", "5"], obj=obj) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Retry count feature is not enabled!" in result.output + + @patch("subprocess.Popen", new_callable=SubprocessMock) + def test_get_portchannel_retry_count_timeout(self, subprocessMock): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + subprocessMock.retryCountEnabled = True + subprocessMock.timeout = True + + # get the retry count of a portchannel + result = runner.invoke(config.config.commands["portchannel"].commands["retry-count"].commands["get"], ["PortChannel1001"], obj=obj) + # expect a timeout failure + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Unable to get the retry count" in result.output + + @patch("subprocess.Popen", new_callable=SubprocessMock) + def test_set_portchannel_retry_count_timeout(self, subprocessMock): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + subprocessMock.retryCountEnabled = True + subprocessMock.timeout = True + + # set the retry count of a portchannel + result = runner.invoke(config.config.commands["portchannel"].commands["retry-count"].commands["set"], ["PortChannel1001", "5"], obj=obj) + # expect a timeout failure + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Unable to set the retry count" in result.output + + @patch("subprocess.Popen", new_callable=SubprocessMock) + def test_get_portchannel_retry_count(self, subprocessMock): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + subprocessMock.retryCountEnabled = True + + # get the retry count of a portchannel + result = runner.invoke(config.config.commands["portchannel"].commands["retry-count"].commands["get"], ["PortChannel1001"], obj=obj) + # output has been mocked + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output.strip() == "3" + + @patch("subprocess.Popen", new_callable=SubprocessMock) + def test_set_portchannel_retry_count(self, subprocessMock): + runner = CliRunner() + db = Db() + obj = {'db':db.cfgdb} + + subprocessMock.retryCountEnabled = True + + # set the retry count of a portchannel + result = runner.invoke(config.config.commands["portchannel"].commands["retry-count"].commands["set"], ["PortChannel1001", "5"], obj=obj) + # output has been mocked + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == "" + @classmethod def teardown_class(cls): os.environ['UTILITIES_UNIT_TESTING'] = "0"