diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 8cb6586a9b..57e27d0e47 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -84,6 +84,7 @@ stages: sudo dpkg -i libyang_1.0.73_amd64.deb sudo dpkg -i libyang-cpp_1.0.73_amd64.deb sudo dpkg -i python3-yang_1.0.73_amd64.deb + sudo dpkg -i libprotobuf*.deb workingDirectory: $(Pipeline.Workspace)/target/debs/bullseye/ displayName: 'Install Debian dependencies' @@ -104,6 +105,27 @@ stages: workingDirectory: $(Pipeline.Workspace)/ displayName: 'Install swss-common dependencies' + + - task: DownloadPipelineArtifact@2 + inputs: + source: specific + project: build + pipeline: sonic-net.sonic-dash-api + artifact: sonic-dash-api + runVersion: 'latestFromBranch' + runBranch: 'refs/heads/$(BUILD_BRANCH)' + path: $(Build.ArtifactStagingDirectory)/download + patterns: | + libdashapi*.deb + displayName: "Download dash api" + + - script: | + set -xe + sudo apt-get update + sudo dpkg -i $(Build.ArtifactStagingDirectory)/download/libdashapi_*.deb + workingDirectory: $(Pipeline.Workspace)/ + displayName: 'Install libdashapi libraries' + - script: | set -xe sudo pip3 install swsssdk-2.0.1-py3-none-any.whl @@ -121,7 +143,7 @@ stages: curl -sSL https://packages.microsoft.com/keys/microsoft.asc | sudo apt-key add - sudo apt-add-repository https://packages.microsoft.com/debian/11/prod sudo apt-get update - sudo apt-get install -y dotnet-sdk-5.0 + sudo apt-get install -y dotnet-sdk-8.0 displayName: "Install .NET CORE" - script: | diff --git a/clear/main.py b/clear/main.py index 38dca2737f..cb1e3243b7 100755 --- a/clear/main.py +++ b/clear/main.py @@ -12,7 +12,7 @@ from show.plugins.pbh import read_pbh_counters from config.plugins.pbh import serialize_pbh_counters from . import plugins - +from . import stp # This is from the aliases example: # https://github.com/pallets/click/blob/57c6f09611fc47ca80db0bd010f05998b3c0aa95/examples/aliases/aliases.py class Config(object): @@ -145,6 +145,10 @@ def ipv6(): pass +# 'STP' +# +cli.add_command(stp.spanning_tree) + # # Inserting BGP functionality into cli's clear parse-chain. # BGP commands are determined by the routing-stack being elected. diff --git a/clear/stp.py b/clear/stp.py new file mode 100644 index 0000000000..c3e3a4b098 --- /dev/null +++ b/clear/stp.py @@ -0,0 +1,46 @@ +import click +import utilities_common.cli as clicommon + +# +# This group houses Spanning_tree commands and subgroups +# + + +@click.group(cls=clicommon.AliasedGroup) +@click.pass_context +def spanning_tree(ctx): + '''Clear Spanning-tree counters''' + pass + + +@spanning_tree.group('statistics', cls=clicommon.AliasedGroup, invoke_without_command=True) +@click.pass_context +def stp_clr_stats(ctx): + if ctx.invoked_subcommand is None: + command = 'sudo stpctl clrstsall' + clicommon.run_command(command) + + +@stp_clr_stats.command('interface') +@click.argument('interface_name', metavar='', required=True) +@click.pass_context +def stp_clr_stats_intf(ctx, interface_name): + command = 'sudo stpctl clrstsintf ' + interface_name + clicommon.run_command(command) + + +@stp_clr_stats.command('vlan') +@click.argument('vlan_id', metavar='', required=True) +@click.pass_context +def stp_clr_stats_vlan(ctx, vlan_id): + command = 'sudo stpctl clrstsvlan ' + vlan_id + clicommon.run_command(command) + + +@stp_clr_stats.command('vlan-interface') +@click.argument('vlan_id', metavar='', required=True) +@click.argument('interface_name', metavar='', required=True) +@click.pass_context +def stp_clr_stats_vlan_intf(ctx, vlan_id, interface_name): + command = 'sudo stpctl clrstsvlanintf ' + vlan_id + ' ' + interface_name + clicommon.run_command(command) diff --git a/config/main.py b/config/main.py index bfa6dccadc..f20af3f402 100644 --- a/config/main.py +++ b/config/main.py @@ -34,7 +34,8 @@ from sonic_yang_cfg_generator import SonicYangCfgDbGenerator from utilities_common import util_base from swsscommon import swsscommon -from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector, ConfigDBPipeConnector +from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector, ConfigDBPipeConnector, \ + isInterfaceNameValid, IFACE_NAME_MAX_LEN from utilities_common.db import Db from utilities_common.intf_filter import parse_interface_in_filter from utilities_common import bgp_util @@ -66,7 +67,7 @@ from . import switchport from . import dns from . import bgp_cli - +from . import stp # mock masic APIs for unit test try: @@ -106,7 +107,6 @@ CFG_PORTCHANNEL_PREFIX = "PortChannel" CFG_PORTCHANNEL_PREFIX_LEN = 11 -CFG_PORTCHANNEL_NAME_TOTAL_LEN_MAX = 15 CFG_PORTCHANNEL_MAX_VAL = 9999 CFG_PORTCHANNEL_NO="<0-9999>" @@ -439,7 +439,7 @@ def is_portchannel_name_valid(portchannel_name): if (portchannel_name[CFG_PORTCHANNEL_PREFIX_LEN:].isdigit() is False or int(portchannel_name[CFG_PORTCHANNEL_PREFIX_LEN:]) > CFG_PORTCHANNEL_MAX_VAL) : return False - if len(portchannel_name) > CFG_PORTCHANNEL_NAME_TOTAL_LEN_MAX: + if not isInterfaceNameValid(portchannel_name): return False return True @@ -1372,6 +1372,19 @@ def multiasic_write_to_db(filename, load_sysinfo): migrate_db_to_lastest(ns) +def config_file_yang_validation(filename): + config_to_check = read_json_file(filename) + sy = sonic_yang.SonicYang(YANG_DIR) + sy.loadYangModel() + try: + sy.loadData(configdbJson=config_to_check) + sy.validate_data_tree() + except sonic_yang.SonicYangException as e: + click.secho("{} fails YANG validation! Error: {}".format(filename, str(e)), + fg='magenta') + raise click.Abort() + + # This is our main entrypoint - the main 'config' command @click.group(cls=clicommon.AbbreviationGroup, context_settings=CONTEXT_SETTINGS) @click.pass_context @@ -1421,7 +1434,10 @@ def config(ctx): config.add_command(vlan.vlan) config.add_command(vxlan.vxlan) -#add mclag commands +# add stp commands +config.add_command(stp.spanning_tree) + +# add mclag commands config.add_command(mclag.mclag) config.add_command(mclag.mclag_member) config.add_command(mclag.mclag_unique_ip) @@ -1810,6 +1826,13 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file)) return + if filename is not None and filename != "/dev/stdin": + if multi_asic.is_multi_asic(): + # Multiasic has not 100% fully validated. Thus pass here. + pass + else: + config_file_yang_validation(filename) + #Stop services before config push if not no_service_restart: log.log_notice("'reload' stopping services...") @@ -2000,15 +2023,7 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, # Multiasic has not 100% fully validated. Thus pass here. pass else: - sy = sonic_yang.SonicYang(YANG_DIR) - sy.loadYangModel() - try: - sy.loadData(configdbJson=config_to_check) - sy.validate_data_tree() - except sonic_yang.SonicYangException as e: - click.secho("{} fails YANG validation! Error: {}".format(golden_config_path, str(e)), - fg='magenta') - raise click.Abort() + config_file_yang_validation(golden_config_path) # Dependency check golden config json if multi_asic.is_multi_asic(): @@ -2469,8 +2484,9 @@ def add_portchannel(ctx, portchannel_name, min_links, fallback, fast_rate): db = ValidatedConfigDBConnector(ctx.obj['db']) if ADHOC_VALIDATION: if is_portchannel_name_valid(portchannel_name) != True: - ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}'" - .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO)) + ctx.fail("{} is invalid!, name should have prefix '{}' and suffix '{}' " + "and its length should not exceed {} characters" + .format(portchannel_name, CFG_PORTCHANNEL_PREFIX, CFG_PORTCHANNEL_NO, IFACE_NAME_MAX_LEN)) if is_portchannel_present_in_db(db, portchannel_name): ctx.fail("{} already exists!".format(portchannel_name)) # TODO: MISSING CONSTRAINT IN YANG MODEL @@ -4236,6 +4252,105 @@ def del_user(db, user): click.echo("Restart service snmp failed with error {}".format(e)) raise click.Abort() + +# +# 'bmp' group ('config bmp ...') +# +@config.group() +@clicommon.pass_db +def bmp(db): + """BMP-related configuration""" + pass + + +# +# common function to update bmp config table +# +@clicommon.pass_db +def update_bmp_table(db, table_name, value): + log.log_info(f"'bmp {value} {table_name}' executing...") + bmp_table = db.cfgdb.get_table('BMP') + if not bmp_table: + bmp_table = {'table': {table_name: value}} + else: + bmp_table['table'][table_name] = value + db.cfgdb.mod_entry('BMP', 'table', bmp_table['table']) + + +# +# 'enable' subgroup ('config bmp enable ...') +# +@bmp.group() +@clicommon.pass_db +def enable(db): + """Enable BMP table dump """ + pass + + +# +# 'bgp-neighbor-table' command ('config bmp enable bgp-neighbor-table') +# +@enable.command('bgp-neighbor-table') +@clicommon.pass_db +def enable_bgp_neighbor_table(db): + update_bmp_table('bgp_neighbor_table', 'true') + + +# +# 'bgp-rib-out-table' command ('config bmp enable bgp-rib-out-table') +# +@enable.command('bgp-rib-out-table') +@clicommon.pass_db +def enable_bgp_rib_out_table(db): + update_bmp_table('bgp_rib_out_table', 'true') + + +# +# 'bgp-rib-in-table' command ('config bmp enable bgp-rib-in-table') +# +@enable.command('bgp-rib-in-table') +@clicommon.pass_db +def enable_bgp_rib_in_table(db): + update_bmp_table('bgp_rib_in_table', 'true') + + +# +# 'disable' subgroup ('config bmp disable ...') +# +@bmp.group() +@clicommon.pass_db +def disable(db): + """Disable BMP table dump """ + pass + + +# +# 'bgp-neighbor-table' command ('config bmp disable bgp-neighbor-table') +# +@disable.command('bgp-neighbor-table') +@clicommon.pass_db +def disable_bgp_neighbor_table(db): + update_bmp_table('bgp_neighbor_table', 'false') + + +# +# 'bgp-rib-out-table' command ('config bmp disable bgp-rib-out-table') +# +@disable.command('bgp-rib-out-table') +@clicommon.pass_db +def diable_bgp_rib_out_table(db): + update_bmp_table('bgp_rib_out_table', 'false') + + +# +# 'bgp-rib-in-table' command ('config bmp disable bgp-rib-in-table') +# +@disable.command('bgp-rib-in-table') +@clicommon.pass_db +def disable_bgp_rib_in_table(db): + update_bmp_table('bgp_rib_in_table', 'false') + + # # 'bgp' group ('config bgp ...') # @@ -5899,6 +6014,853 @@ def disable_use_link_local_only(ctx, interface_name): interface_dict = db.get_table(interface_type) set_ipv6_link_local_only_on_interface(db, interface_dict, interface_type, interface_name, "disable") + +def is_vaild_intf_ip_addr(ip_addr) -> bool: + """Check whether the ip address is valid""" + try: + ip_address = ipaddress.ip_interface(ip_addr) + except ValueError as err: + click.echo("IP address {} is not valid: {}".format(ip_addr, err)) + return False + + if ip_address.version == 6: + if ip_address.is_unspecified: + click.echo("IPv6 address {} is unspecified".format(str(ip_address))) + return False + elif ip_address.version == 4: + if str(ip_address.ip) == "0.0.0.0": + click.echo("IPv4 address {} is Zero".format(str(ip_address))) + return False + + if ip_address.is_multicast: + click.echo("IP address {} is multicast".format(str(ip_address))) + return False + + ip = ip_address.ip + if ip.is_loopback: + click.echo("IP address {} is loopback address".format(str(ip_address))) + return False + + return True + + +# +# 'vrrp' subgroup ('config interface vrrp ...') +# +@interface.group(cls=clicommon.AbbreviationGroup) +@click.pass_context +def vrrp(ctx): + """Vrrp configuration""" + pass + + +# +# ip subgroup ('config interface vrrp ip ...') +# +@vrrp.group(cls=clicommon.AbbreviationGroup, name='ip') +@click.pass_context +def ip(ctx): + """vrrp ip configuration """ + pass + + +@ip.command('add') +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument("ip_addr", metavar="", required=True) +@click.pass_context +def add_vrrp_ip(ctx, interface_name, vrrp_id, ip_addr): + """Add IPv4 address to the vrrp instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + if not is_vaild_intf_ip_addr(ip_addr): + ctx.abort() + if check_vrrp_ip_exist(config_db, ip_addr): + ctx.abort() + + if "/" not in ip_addr: + ctx.fail("IP address {} is missing a mask. Such as xx.xx.xx.xx/yy or xx:xx::xx/yy".format(str(ip_addr))) + + # check vip exist + vrrp_entry = config_db.get_entry("VRRP", (interface_name, str(vrrp_id))) + address_list = [] + if vrrp_entry: + # update vrrp + if "vip" in vrrp_entry: + address_list = vrrp_entry.get("vip") + # add ip address + if len(address_list) >= 4: + ctx.fail("The vrrp instance {} has already configured 4 IP addresses".format(vrrp_id)) + + else: + # create new vrrp + vrrp_entry = {} + vrrp_keys = config_db.get_keys("VRRP") + if len(vrrp_keys) >= 254: + ctx.fail("Has already configured 254 vrrp instances") + intf_cfg = 0 + for key in vrrp_keys: + if key[1] == str(vrrp_id): + ctx.fail("The vrrp instance {} has already configured!".format(vrrp_id)) + if key[0] == interface_name: + intf_cfg += 1 + if intf_cfg >= 16: + ctx.fail("{} has already configured 16 vrrp instances!".format(interface_name)) + vrrp_entry["vid"] = vrrp_id + + address_list.append(ip_addr) + vrrp_entry['vip'] = address_list + + config_db.set_entry("VRRP", (interface_name, str(vrrp_id)), vrrp_entry) + + +@ip.command('remove') +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument("ip_addr", metavar="", required=True) +@click.pass_context +def remove_vrrp_ip(ctx, interface_name, vrrp_id, ip_addr): + """Remove IPv4 address to the vrrp instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + try: + ipaddress.ip_interface(ip_addr) + except ValueError as err: + ctx.fail("IP address is not valid: {}".format(err)) + + vrrp_entry = config_db.get_entry("VRRP", (interface_name, str(vrrp_id))) + if not vrrp_entry: + ctx.fail("{} is not configured on the vrrp instance {}!".format(ip_addr, vrrp_id)) + + address_list = vrrp_entry.get("vip") + # address_list = vrrp_entry.get("vip") + if not address_list: + ctx.fail("{} is not configured on the vrrp instance {}!".format(ip_addr, vrrp_id)) + + # del ip address + if ip_addr in address_list: + address_list.remove(ip_addr) + else: + ctx.fail("{} is not configured on the vrrp instance {}!".format(ip_addr, vrrp_id)) + vrrp_entry['vip'] = address_list + config_db.set_entry("VRRP", (interface_name, str(vrrp_id)), vrrp_entry) + + +# +# track interface subgroup ('config interface vrrp track_interface ...') +# +@vrrp.group(cls=clicommon.AbbreviationGroup, name='track_interface') +@click.pass_context +def track_interface(ctx): + """ vrrp track_interface configuration """ + pass + + +@track_interface.command('add') +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument("track_interface", metavar="", required=True) +@click.argument('priority_increment', metavar='', required=True, type=click.IntRange(10, 50), + default=20) +@click.pass_context +def add_track_interface(ctx, interface_name, vrrp_id, track_interface, priority_increment): + """add track_interface to the vrrp instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + track_interface = interface_alias_to_name(config_db, track_interface) + if interface_name is None: + ctx.fail("'interface_name' is None!") + if track_interface is None: + ctx.fail("'track_interface' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + table_name_t = get_interface_table_name(track_interface) + if table_name_t == "" or table_name_t == "LOOPBACK_INTERFACE": + ctx.fail("'track_interface' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if track_interface not in config_db.get_table(table_name_t): + ctx.fail("Router Interface '{}' not found".format(track_interface)) + + vrrp_entry = config_db.get_entry("VRRP", (interface_name, str(vrrp_id))) + if not vrrp_entry: + ctx.fail("vrrp instance {} not found on interface {}".format(vrrp_id, interface_name)) + + track_entry = config_db.get_entry("VRRP_TRACK", (interface_name, str(vrrp_id), track_interface)) + if track_entry: + track_entry['priority_increment'] = priority_increment + else: + track_entry = {} + track_entry["priority_increment"] = priority_increment + + vrrp_track_keys = config_db.get_keys("VRRP_TRACK") + if vrrp_track_keys: + track_key = (interface_name, str(vrrp_id)) + count = 0 + for item in vrrp_track_keys: + subtuple1 = item[:2] + if subtuple1 == track_key: + count += 1 + + if count >= 8: + ctx.fail("The Vrrpv instance {} has already configured 8 track interfaces".format(vrrp_id)) + + config_db.set_entry("VRRP_TRACK", (interface_name, str(vrrp_id), track_interface), track_entry) + + +@track_interface.command('remove') +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument("track_interface", metavar="", required=True) +@click.pass_context +def remove_track_interface(ctx, interface_name, vrrp_id, track_interface): + """Remove track_interface to the vrrp instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + track_interface = interface_alias_to_name(config_db, track_interface) + if interface_name is None: + ctx.fail("'interface_name' is None!") + if track_interface is None: + ctx.fail("'track_interface' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + table_name_t = get_interface_table_name(track_interface) + if table_name_t == "" or table_name_t == "LOOPBACK_INTERFACE": + ctx.fail("'track_interface' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + + vrrp_entry = config_db.get_entry("VRRP", (interface_name, str(vrrp_id))) + if not vrrp_entry: + ctx.fail("vrrp instance {} not found on interface {}".format(vrrp_id, interface_name)) + + track_entry = config_db.get_entry("VRRP_TRACK", (interface_name, str(vrrp_id), track_interface)) + if not track_entry: + ctx.fail("{} is not configured on the vrrp instance {}!".format(track_interface, vrrp_id)) + config_db.set_entry('VRRP_TRACK', (interface_name, str(vrrp_id), track_interface), None) + + +# +# 'vrrp' subcommand ('config interface vrrp priority ...') +# +@vrrp.command("priority") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument('priority', metavar='', required=True, type=click.IntRange(1, 254), default=100) +@click.pass_context +def priority(ctx, interface_name, vrrp_id, priority): + """config priority to the vrrp instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp_entry = config_db.get_entry("VRRP", (interface_name, str(vrrp_id))) + if not vrrp_entry: + ctx.fail("vrrp instance {} not found on interface {}".format(vrrp_id, interface_name)) + + vrrp_entry['priority'] = priority + config_db.set_entry("VRRP", (interface_name, str(vrrp_id)), vrrp_entry) + + +# +# 'vrrp' subcommand ('config interface vrrp adv_interval ...') +# +@vrrp.command("adv_interval") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument('interval', metavar='', required=True, type=click.IntRange(1, 255), default=1) +@click.pass_context +def adv_interval(ctx, interface_name, vrrp_id, interval): + """config adv_interval to the vrrp instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp_entry = config_db.get_entry("VRRP", (interface_name, str(vrrp_id))) + if not vrrp_entry: + ctx.fail("vrrp instance {} not found on interface {}".format(vrrp_id, interface_name)) + + vrrp_entry['adv_interval'] = interval + config_db.set_entry("VRRP", (interface_name, str(vrrp_id)), vrrp_entry) + + +# +# 'vrrp' subcommand ('config interface vrrp pre_empt ...') +# +@vrrp.command("pre_empt") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument('mode', metavar='', required=True, type=click.Choice(["enabled", "disabled"])) +@click.pass_context +def pre_empt(ctx, interface_name, vrrp_id, mode): + """Config pre_empt mode to the vrrp instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp_entry = config_db.get_entry("VRRP", (interface_name, str(vrrp_id))) + if not vrrp_entry: + ctx.fail("vrrp instance {} not found on interface {}".format(vrrp_id, interface_name)) + + vrrp_entry['preempt'] = mode + config_db.set_entry("VRRP", (interface_name, str(vrrp_id)), vrrp_entry) + + +# +# 'vrrp' subcommand ('config interface vrrp version...') +# +@vrrp.command("version") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument('version', metavar='', required=True, type=click.Choice(["2", "3"]), default=3) +@click.pass_context +def version(ctx, interface_name, vrrp_id, version): + """Config vrrp packet version to the vrrp instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp_entry = config_db.get_entry("VRRP", (interface_name, str(vrrp_id))) + if not vrrp_entry: + ctx.fail("vrrp instance {} not found on interface {}".format(vrrp_id, interface_name)) + + vrrp_entry['version'] = version + config_db.set_entry("VRRP", (interface_name, str(vrrp_id)), vrrp_entry) + + +# +# 'vrrp' subcommand +# +@vrrp.command("add") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.pass_context +def add_vrrp(ctx, interface_name, vrrp_id): + """Add vrrp instance to the interface""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp_entry = config_db.get_entry("VRRP", (interface_name, str(vrrp_id))) + if vrrp_entry: + ctx.fail("{} has already configured the vrrp instance {}!".format(interface_name, vrrp_id)) + else: + vrrp_keys = config_db.get_keys("VRRP") + if len(vrrp_keys) >= 254: + ctx.fail("Has already configured 254 vrrp instances!") + intf_cfg = 0 + for key in vrrp_keys: + if key[1] == str(vrrp_id): + ctx.fail("The vrrp instance {} has already configured!".format(vrrp_id)) + if key[0] == interface_name: + intf_cfg += 1 + if intf_cfg >= 16: + ctx.fail("{} has already configured 16 vrrp instances!".format(interface_name)) + + config_db.set_entry('VRRP', (interface_name, str(vrrp_id)), {"vid": vrrp_id}) + + +@vrrp.command("remove") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.pass_context +def remove_vrrp(ctx, interface_name, vrrp_id): + """Remove vrrp instance to the interface""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp_entry = config_db.get_entry("VRRP", (interface_name, str(vrrp_id))) + if not vrrp_entry: + ctx.fail("{} dose not configured the vrrp instance {}!".format(interface_name, vrrp_id)) + config_db.set_entry('VRRP', (interface_name, str(vrrp_id)), None) + + +# +# 'vrrp6' subgroup ('config interface vrrp6 ...') +# +@interface.group(cls=clicommon.AbbreviationGroup) +@click.pass_context +def vrrp6(ctx): + """Vrrpv6 configuration""" + pass + + +# +# ip subgroup ('config interface vrrp6 ipv6 ...') +# +@vrrp6.group(cls=clicommon.AbbreviationGroup, name='ipv6') +@click.pass_context +def ipv6(ctx): + """Vrrpv6 ipv6 configuration """ + pass + + +@ipv6.command('add') +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument("ipv6_addr", metavar="", required=True) +@click.pass_context +def add_vrrp6_ipv6(ctx, interface_name, vrrp_id, ipv6_addr): + """Add IPv6 address to the Vrrpv6 instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + if not is_vaild_intf_ip_addr(ipv6_addr): + ctx.abort() + if check_vrrp_ip_exist(config_db, ipv6_addr): + ctx.abort() + + if "/" not in ipv6_addr: + ctx.fail("IPv6 address {} is missing a mask. Such as xx:xx::xx/yy".format(str(ipv6_addr))) + + # check vip exist + vrrp6_entry = config_db.get_entry("VRRP6", (interface_name, str(vrrp_id))) + address_list = [] + if vrrp6_entry: + # update vrrp + if "vip" in vrrp6_entry: + address_list = vrrp6_entry.get("vip") + # add ip address + if len(address_list) >= 4: + ctx.fail("The vrrp instance {} has already configured 4 IPv6 addresses".format(vrrp_id)) + + else: + # create new vrrp + vrrp6_entry = {} + vrrp6_keys = config_db.get_keys("VRRP6") + if len(vrrp6_keys) >= 254: + ctx.fail("Has already configured 254 Vrrpv6 instances.") + intf_cfg = 0 + for key in vrrp6_keys: + if key[1] == str(vrrp_id): + ctx.fail("The Vrrpv6 instance {} has already configured!".format(vrrp_id)) + if key[0] == interface_name: + intf_cfg += 1 + if intf_cfg >= 16: + ctx.fail("{} has already configured 16 Vrrpv6 instances!".format(interface_name)) + vrrp6_entry["vid"] = vrrp_id + + address_list.append(ipv6_addr) + vrrp6_entry['vip'] = address_list + + config_db.set_entry("VRRP6", (interface_name, str(vrrp_id)), vrrp6_entry) + + +@ipv6.command('remove') +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument("ipv6_addr", metavar="", required=True) +@click.pass_context +def remove_vrrp_ipv6(ctx, interface_name, vrrp_id, ipv6_addr): + """Remove IPv6 address to the Vrrpv6 instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + try: + ipaddress.ip_interface(ipv6_addr) + except ValueError as err: + ctx.fail("IPv6 address is not valid: {}".format(err)) + + vrrp6_entry = config_db.get_entry("VRRP6", (interface_name, str(vrrp_id))) + if not vrrp6_entry: + ctx.fail("{} is not configured on the Vrrpv6 instance {}!".format(ipv6_addr, vrrp_id)) + + address_list = vrrp6_entry.get("vip") + # address_list = vrrp6_entry.get("vip") + if not address_list: + ctx.fail("{} is not configured on the Vrrpv6 instance {}!".format(ipv6_addr, vrrp_id)) + + # del ip address + if ipv6_addr in address_list: + address_list.remove(ipv6_addr) + else: + ctx.fail("{} is not configured on the Vrrpv6 instance {}!".format(ipv6_addr, vrrp_id)) + vrrp6_entry['vip'] = address_list + config_db.set_entry("VRRP6", (interface_name, str(vrrp_id)), vrrp6_entry) + + +def check_vrrp_ip_exist(config_db, ip_addr) -> bool: + addr_type = ipaddress.ip_interface(ip_addr).version + vrrp_table = "VRRP" if addr_type == 4 else "VRRP6" + vrrp_keys = config_db.get_keys(vrrp_table) + for vrrp_key in vrrp_keys: + vrrp_entry = config_db.get_entry(vrrp_table, vrrp_key) + if "vip" not in vrrp_entry: + continue + if ip_addr in vrrp_entry["vip"]: + click.echo("{} has already configured on the {} vrrp instance {}!".format(ip_addr, vrrp_key[0], + vrrp_key[1])) + return True + return False + + +# +# track interface subgroup ('config interface vrrp6 track_interface ...') +# +@vrrp6.group(cls=clicommon.AbbreviationGroup, name='track_interface') +@click.pass_context +def vrrp6_track_interface(ctx): + """ Vrrpv6 track_interface configuration """ + pass + + +@vrrp6_track_interface.command('add') +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument("track_interface", metavar="", required=True) +@click.argument('priority_increment', metavar='', required=True, type=click.IntRange(10, 50), + default=20) +@click.pass_context +def add_track_interface_v6(ctx, interface_name, vrrp_id, track_interface, priority_increment): + """add track_interface to the vrrp instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + track_interface = interface_alias_to_name(config_db, track_interface) + if interface_name is None: + ctx.fail("'interface_name' is None!") + if track_interface is None: + ctx.fail("'track_interface' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + table_name_t = get_interface_table_name(track_interface) + if table_name_t == "" or table_name_t == "LOOPBACK_INTERFACE": + ctx.fail("'track_interface' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if track_interface not in config_db.get_table(table_name_t): + ctx.fail("Router Interface '{}' not found".format(track_interface)) + + vrrp_entry = config_db.get_entry("VRRP6", (interface_name, str(vrrp_id))) + if not vrrp_entry: + ctx.fail("vrrp6 instance {} not found on interface {}".format(vrrp_id, interface_name)) + + # track_intf_key = track_interface + "|weight|" + str(weight) + vrrp6_track_keys = config_db.get_keys("VRRP6_TRACK") + if vrrp6_track_keys: + track_key = (interface_name, str(vrrp_id)) + count = 0 + for item in vrrp6_track_keys: + subtuple1 = item[:2] + if subtuple1 == track_key: + count += 1 + + if count >= 8: + ctx.fail("The Vrrpv6 instance {} has already configured 8 track interfaces".format(vrrp_id)) + + # create a new entry + track6_entry = {} + track6_entry["priority_increment"] = priority_increment + config_db.set_entry("VRRP6_TRACK", (interface_name, str(vrrp_id), track_interface), track6_entry) + + +@vrrp6_track_interface.command('remove') +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument("track_interface", metavar="", required=True) +@click.pass_context +def remove_track_interface_v6(ctx, interface_name, vrrp_id, track_interface): + """Remove track_interface to the vrrp instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + track_interface = interface_alias_to_name(config_db, track_interface) + if interface_name is None: + ctx.fail("'interface_name' is None!") + if track_interface is None: + ctx.fail("'track_interface' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + table_name_t = get_interface_table_name(track_interface) + if table_name_t == "" or table_name_t == "LOOPBACK_INTERFACE": + ctx.fail("'track_interface' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + + vrrp_entry = config_db.get_entry("VRRP6", (interface_name, str(vrrp_id))) + if not vrrp_entry: + ctx.fail("vrrp6 instance {} not found on interface {}".format(vrrp_id, interface_name)) + + track6_entry = config_db.get_entry("VRRP6_TRACK", (interface_name, str(vrrp_id), track_interface)) + if not track6_entry: + ctx.fail("{} is not configured on the vrrp6 instance {}!".format(track_interface, vrrp_id)) + config_db.set_entry('VRRP6_TRACK', (interface_name, str(vrrp_id), track_interface), None) + + +# +# 'vrrp6' subcommand ('config interface vrrp6 priority ...') +# +@vrrp6.command("priority") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument('priority', metavar='', required=True, type=click.IntRange(1, 254), default=100) +@click.pass_context +def priority_v6(ctx, interface_name, vrrp_id, priority): + """config priority to the Vrrpv6 instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp6_entry = config_db.get_entry("VRRP6", (interface_name, str(vrrp_id))) + if not vrrp6_entry: + ctx.fail("Vrrpv6 instance {} not found on interface {}".format(vrrp_id, interface_name)) + + vrrp6_entry['priority'] = priority + config_db.set_entry("VRRP6", (interface_name, str(vrrp_id)), vrrp6_entry) + + +# +# 'vrrp' subcommand ('config interface vrrp6 adv_interval ...') +# +@vrrp6.command("adv_interval") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument('interval', metavar='', required=True, type=click.IntRange(1, 255), default=1000) +@click.pass_context +def adv_interval_v6(ctx, interface_name, vrrp_id, interval): + """config adv_interval to the Vrrpv6 instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp6_entry = config_db.get_entry("VRRP6", (interface_name, str(vrrp_id))) + if not vrrp6_entry: + ctx.fail("Vrrpv6 instance {} not found on interface {}".format(vrrp_id, interface_name)) + + vrrp6_entry['adv_interval'] = interval + config_db.set_entry("VRRP6", (interface_name, str(vrrp_id)), vrrp6_entry) + + +# +# 'vrrp' subcommand ('config interface vrrp6 pre_empt ...') +# +@vrrp6.command("pre_empt") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument('mode', metavar='', required=True, type=click.Choice(["enabled", "disabled"])) +@click.pass_context +def pre_empt_v6(ctx, interface_name, vrrp_id, mode): + """Config pre_empt mode to the Vrrpv6 instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp6_entry = config_db.get_entry("VRRP6", (interface_name, str(vrrp_id))) + if not vrrp6_entry: + ctx.fail("Vrrpv6 instance {} not found on interface {}".format(vrrp_id, interface_name)) + + vrrp6_entry['preempt'] = mode + config_db.set_entry("VRRP6", (interface_name, str(vrrp_id)), vrrp6_entry) + + +# +# 'vrrp6' subcommand +# +@vrrp6.command("add") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.pass_context +def add_vrrp_v6(ctx, interface_name, vrrp_id): + """Add Vrrpv6 instance to the interface""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp6_entry = config_db.get_entry("VRRP6", (interface_name, str(vrrp_id))) + if vrrp6_entry: + ctx.fail("{} has already configured the Vrrpv6 instance {}!".format(interface_name, vrrp_id)) + else: + vrrp6_keys = config_db.get_keys("VRRP6") + if len(vrrp6_keys) >= 254: + ctx.fail("Has already configured 254 Vrrpv6 instances!") + intf_cfg = 0 + for key in vrrp6_keys: + if key[1] == str(vrrp_id): + ctx.fail("The Vrrpv6 instance {} has already configured!".format(vrrp_id)) + if key[0] == interface_name: + intf_cfg += 1 + if intf_cfg >= 16: + ctx.fail("{} has already configured 16 Vrrpv6 instances!".format(interface_name)) + + config_db.set_entry('VRRP6', (interface_name, str(vrrp_id)), {"vid": vrrp_id}) + + +@vrrp6.command("remove") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.pass_context +def remove_vrrp_v6(ctx, interface_name, vrrp_id): + """Remove Vrrpv6 instance to the interface""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp6_entry = config_db.get_entry("VRRP6", (interface_name, str(vrrp_id))) + if not vrrp6_entry: + ctx.fail("{} dose not configured the Vrrpv6 instance {}!".format(interface_name, vrrp_id)) + config_db.set_entry('VRRP6', (interface_name, str(vrrp_id)), None) + + # # 'vrf' group ('config vrf ...') # @@ -5920,8 +6882,8 @@ def add_vrf(ctx, vrf_name): config_db = ValidatedConfigDBConnector(ctx.obj['config_db']) if not vrf_name.startswith("Vrf") and not (vrf_name == 'mgmt') and not (vrf_name == 'management'): ctx.fail("'vrf_name' must begin with 'Vrf' or named 'mgmt'/'management' in case of ManagementVRF.") - if len(vrf_name) > 15: - ctx.fail("'vrf_name' is too long!") + if not isInterfaceNameValid(vrf_name): + ctx.fail("'vrf_name' length should not exceed {} characters".format(IFACE_NAME_MAX_LEN)) if is_vrf_exists(config_db, vrf_name): ctx.fail("VRF {} already exists!".format(vrf_name)) elif (vrf_name == 'mgmt' or vrf_name == 'management'): @@ -5940,8 +6902,8 @@ def del_vrf(ctx, vrf_name): config_db = ValidatedConfigDBConnector(ctx.obj['config_db']) if not vrf_name.startswith("Vrf") and not (vrf_name == 'mgmt') and not (vrf_name == 'management'): ctx.fail("'vrf_name' must begin with 'Vrf' or named 'mgmt'/'management' in case of ManagementVRF.") - if len(vrf_name) > 15: - ctx.fail("'vrf_name' is too long!") + if not isInterfaceNameValid(vrf_name): + ctx.fail("'vrf_name' length should not exceed {} characters".format((IFACE_NAME_MAX_LEN))) syslog_table = config_db.get_table("SYSLOG_SERVER") syslog_vrf_dev = "mgmt" if vrf_name == "management" else vrf_name for syslog_entry, syslog_data in syslog_table.items(): @@ -6971,8 +7933,8 @@ def add_loopback(ctx, loopback_name): config_db = ValidatedConfigDBConnector(ctx.obj['db']) if ADHOC_VALIDATION: if is_loopback_name_valid(loopback_name) is False: - ctx.fail("{} is invalid, name should have prefix '{}' and suffix '{}' " - .format(loopback_name, CFG_LOOPBACK_PREFIX, CFG_LOOPBACK_NO)) + ctx.fail("{} is invalid, name should have prefix '{}' and suffix '{}' and should not exceed {} characters" + .format(loopback_name, CFG_LOOPBACK_PREFIX, CFG_LOOPBACK_NO, IFACE_NAME_MAX_LEN)) lo_intfs = [k for k, v in config_db.get_table('LOOPBACK_INTERFACE').items() if type(k) != tuple] if loopback_name in lo_intfs: @@ -7719,6 +8681,8 @@ def add_subinterface(ctx, subinterface_name, vid): if interface_alias is None: ctx.fail("{} invalid subinterface".format(interface_alias)) + if not isInterfaceNameValid(interface_alias): + ctx.fail("Subinterface name length should not exceed {} characters".format(IFACE_NAME_MAX_LEN)) if interface_alias.startswith("Po") is True: intf_table_name = CFG_PORTCHANNEL_PREFIX @@ -8053,5 +9017,58 @@ def max_sessions(max_sessions): {'max_sessions': max_sessions}) +# +# 'banner' group ('config banner ...') +# +@config.group() +def banner(): + """Configuring system banner messages""" + pass + + +@banner.command() +@click.argument('state', metavar='', required=True, type=click.Choice(['enabled', 'disabled'])) +def state(state): + """Set banner feature state""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry(swsscommon.CFG_BANNER_MESSAGE_TABLE_NAME, 'global', + {'state': state}) + + +@banner.command() +@click.argument('message', metavar='', required=True) +def login(message): + """Set login message""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry(swsscommon.CFG_BANNER_MESSAGE_TABLE_NAME, 'global', + {'login': message}) + + +@banner.command() +@click.argument('message', metavar='', required=True) +def logout(message): + """Set logout message""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry(swsscommon.CFG_BANNER_MESSAGE_TABLE_NAME, 'global', + {'logout': message}) + + +@banner.command() +@click.argument('message', metavar='', required=True) +def motd(message): + """Set message of the day""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry(swsscommon.CFG_BANNER_MESSAGE_TABLE_NAME, 'global', + {'motd': message}) + + if __name__ == '__main__': config() diff --git a/config/plugins/mlnx.py b/config/plugins/mlnx.py index f61335d4f4..115b310f69 100644 --- a/config/plugins/mlnx.py +++ b/config/plugins/mlnx.py @@ -164,40 +164,6 @@ def mlnx(): """ Mellanox platform configuration tasks """ pass - -# 'sniffer' group -@mlnx.group() -def sniffer(): - """ Utility for managing Mellanox SDK/PRM sniffer """ - pass - - -# 'sdk' subgroup -@sniffer.group() -def sdk(): - """SDK Sniffer - Command Line to enable/disable SDK sniffer""" - pass - - -@sdk.command() -@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, - prompt='Swss service will be restarted, continue?') -def enable(): - """Enable SDK Sniffer""" - click.echo("Enabling SDK sniffer") - sdk_sniffer_enable() - click.echo("Note: the sniffer file may exhaust the space on /var/log, please disable it when you are done with this sniffering.") - - -@sdk.command() -@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, - prompt='Swss service will be restarted, continue?') -def disable(): - """Disable SDK Sniffer""" - click.echo("Disabling SDK sniffer") - sdk_sniffer_disable() - - def sdk_sniffer_enable(): """Enable SDK Sniffer""" sdk_sniffer_filename = sniffer_filename_generate(SDK_SNIFFER_TARGET_PATH, diff --git a/config/stp.py b/config/stp.py new file mode 100644 index 0000000000..85d7041847 --- /dev/null +++ b/config/stp.py @@ -0,0 +1,917 @@ + +# +# 'spanning-tree' group ('config spanning-tree ...') +# + +import click +import utilities_common.cli as clicommon +from natsort import natsorted +import logging + +STP_MIN_ROOT_GUARD_TIMEOUT = 5 +STP_MAX_ROOT_GUARD_TIMEOUT = 600 +STP_DEFAULT_ROOT_GUARD_TIMEOUT = 30 + +STP_MIN_FORWARD_DELAY = 4 +STP_MAX_FORWARD_DELAY = 30 +STP_DEFAULT_FORWARD_DELAY = 15 + +STP_MIN_HELLO_INTERVAL = 1 +STP_MAX_HELLO_INTERVAL = 10 +STP_DEFAULT_HELLO_INTERVAL = 2 + +STP_MIN_MAX_AGE = 6 +STP_MAX_MAX_AGE = 40 +STP_DEFAULT_MAX_AGE = 20 + +STP_MIN_BRIDGE_PRIORITY = 0 +STP_MAX_BRIDGE_PRIORITY = 61440 +STP_DEFAULT_BRIDGE_PRIORITY = 32768 + +PVST_MAX_INSTANCES = 255 + + +def get_intf_list_in_vlan_member_table(config_db): + """ + Get info from REDIS ConfigDB and create interface to vlan mapping + """ + get_int_vlan_configdb_info = config_db.get_table('VLAN_MEMBER') + int_list = [] + for key in get_int_vlan_configdb_info: + interface = key[1] + if interface not in int_list: + int_list.append(interface) + return int_list + +################################## +# STP parameter validations +################################## + + +def is_valid_root_guard_timeout(ctx, root_guard_timeout): + if root_guard_timeout not in range(STP_MIN_ROOT_GUARD_TIMEOUT, STP_MAX_ROOT_GUARD_TIMEOUT + 1): + ctx.fail("STP root guard timeout must be in range 5-600") + + +def is_valid_forward_delay(ctx, forward_delay): + if forward_delay not in range(STP_MIN_FORWARD_DELAY, STP_MAX_FORWARD_DELAY + 1): + ctx.fail("STP forward delay value must be in range 4-30") + + +def is_valid_hello_interval(ctx, hello_interval): + if hello_interval not in range(STP_MIN_HELLO_INTERVAL, STP_MAX_HELLO_INTERVAL + 1): + ctx.fail("STP hello timer must be in range 1-10") + + +def is_valid_max_age(ctx, max_age): + if max_age not in range(STP_MIN_MAX_AGE, STP_MAX_MAX_AGE + 1): + ctx.fail("STP max age value must be in range 6-40") + + +def is_valid_bridge_priority(ctx, priority): + if priority % 4096 != 0: + ctx.fail("STP bridge priority must be multiple of 4096") + if priority not in range(STP_MIN_BRIDGE_PRIORITY, STP_MAX_BRIDGE_PRIORITY + 1): + ctx.fail("STP bridge priority must be in range 0-61440") + + +def validate_params(forward_delay, max_age, hello_time): + if (2 * (int(forward_delay) - 1)) >= int(max_age) >= (2 * (int(hello_time) + 1)): + return True + else: + return False + + +def is_valid_stp_vlan_parameters(ctx, db, vlan_name, param_type, new_value): + stp_vlan_entry = db.get_entry('STP_VLAN', vlan_name) + cfg_vlan_forward_delay = stp_vlan_entry.get("forward_delay") + cfg_vlan_max_age = stp_vlan_entry.get("max_age") + cfg_vlan_hello_time = stp_vlan_entry.get("hello_time") + ret_val = False + if param_type == "forward_delay": + ret_val = validate_params(new_value, cfg_vlan_max_age, cfg_vlan_hello_time) + elif param_type == "max_age": + ret_val = validate_params(cfg_vlan_forward_delay, new_value, cfg_vlan_hello_time) + elif param_type == "hello_time": + ret_val = validate_params(cfg_vlan_forward_delay, cfg_vlan_max_age, new_value) + + if ret_val is not True: + ctx.fail("2*(forward_delay-1) >= max_age >= 2*(hello_time +1 ) not met for VLAN") + + +def is_valid_stp_global_parameters(ctx, db, param_type, new_value): + stp_global_entry = db.get_entry('STP', "GLOBAL") + cfg_forward_delay = stp_global_entry.get("forward_delay") + cfg_max_age = stp_global_entry.get("max_age") + cfg_hello_time = stp_global_entry.get("hello_time") + ret_val = False + if param_type == "forward_delay": + ret_val = validate_params(new_value, cfg_max_age, cfg_hello_time) + elif param_type == "max_age": + ret_val = validate_params(cfg_forward_delay, new_value, cfg_hello_time) + elif param_type == "hello_time": + ret_val = validate_params(cfg_forward_delay, cfg_max_age, new_value) + + if ret_val is not True: + ctx.fail("2*(forward_delay-1) >= max_age >= 2*(hello_time +1 ) not met") + + +def get_max_stp_instances(): + return PVST_MAX_INSTANCES + + +def update_stp_vlan_parameter(ctx, db, param_type, new_value): + stp_global_entry = db.get_entry('STP', "GLOBAL") + + allowed_params = {"priority", "max_age", "hello_time", "forward_delay"} + if param_type not in allowed_params: + ctx.fail("Invalid parameter") + + current_global_value = stp_global_entry.get("forward_delay") + + vlan_dict = db.get_table('STP_VLAN') + for vlan in vlan_dict.keys(): + vlan_entry = db.get_entry('STP_VLAN', vlan) + current_vlan_value = vlan_entry.get(param_type) + if current_global_value == current_vlan_value: + db.mod_entry('STP_VLAN', vlan, {param_type: new_value}) + + +def check_if_vlan_exist_in_db(db, ctx, vid): + vlan_name = 'Vlan{}'.format(vid) + vlan = db.get_entry('VLAN', vlan_name) + if len(vlan) == 0: + ctx.fail("{} doesn't exist".format(vlan_name)) + + +def enable_stp_for_vlans(db): + vlan_count = 0 + fvs = {'enabled': 'true', + 'forward_delay': get_global_stp_forward_delay(db), + 'hello_time': get_global_stp_hello_time(db), + 'max_age': get_global_stp_max_age(db), + 'priority': get_global_stp_priority(db) + } + vlan_dict = natsorted(db.get_table('VLAN')) + max_stp_instances = get_max_stp_instances() + for vlan_key in vlan_dict: + if vlan_count >= max_stp_instances: + logging.warning("Exceeded maximum STP configurable VLAN instances for {}".format(vlan_key)) + break + db.set_entry('STP_VLAN', vlan_key, fvs) + vlan_count += 1 + + +def get_stp_enabled_vlan_count(db): + count = 0 + stp_vlan_keys = db.get_table('STP_VLAN').keys() + for key in stp_vlan_keys: + if db.get_entry('STP_VLAN', key).get('enabled') == 'true': + count += 1 + return count + + +def vlan_enable_stp(db, vlan_name): + fvs = {'enabled': 'true', + 'forward_delay': get_global_stp_forward_delay(db), + 'hello_time': get_global_stp_hello_time(db), + 'max_age': get_global_stp_max_age(db), + 'priority': get_global_stp_priority(db) + } + if is_global_stp_enabled(db): + if get_stp_enabled_vlan_count(db) < get_max_stp_instances(): + db.set_entry('STP_VLAN', vlan_name, fvs) + else: + logging.warning("Exceeded maximum STP configurable VLAN instances for {}".format(vlan_name)) + + +def interface_enable_stp(db, interface_name): + fvs = {'enabled': 'true', + 'root_guard': 'false', + 'bpdu_guard': 'false', + 'bpdu_guard_do_disable': 'false', + 'portfast': 'false', + 'uplink_fast': 'false' + } + if is_global_stp_enabled(db): + db.set_entry('STP_PORT', interface_name, fvs) + + +def is_vlan_configured_interface(db, interface_name): + intf_to_vlan_list = get_vlan_list_for_interface(db, interface_name) + if intf_to_vlan_list: # if empty + return True + else: + return False + + +def is_interface_vlan_member(db, vlan_name, interface_name): + ctx = click.get_current_context() + key = vlan_name + '|' + interface_name + entry = db.get_entry('VLAN_MEMBER', key) + if len(entry) == 0: # if empty + ctx.fail("{} is not member of {}".format(interface_name, vlan_name)) + + +def get_vlan_list_for_interface(db, interface_name): + vlan_intf_info = db.get_table('VLAN_MEMBER') + vlan_list = [] + for line in vlan_intf_info: + if interface_name == line[1]: + vlan_name = line[0] + vlan_list.append(vlan_name) + return vlan_list + + +def get_pc_member_port_list(db): + pc_member_info = db.get_table('PORTCHANNEL_MEMBER') + pc_member_port_list = [] + for line in pc_member_info: + intf_name = line[1] + pc_member_port_list.append(intf_name) + return pc_member_port_list + + +def get_vlan_list_from_stp_vlan_intf_table(db, intf_name): + stp_vlan_intf_info = db.get_table('STP_VLAN_PORT') + vlan_list = [] + for line in stp_vlan_intf_info: + if line[1] == intf_name: + vlan_list.append(line[0]) + return vlan_list + + +def get_intf_list_from_stp_vlan_intf_table(db, vlan_name): + stp_vlan_intf_info = db.get_table('STP_VLAN_PORT') + intf_list = [] + for line in stp_vlan_intf_info: + if line[0] == vlan_name: + intf_list.append(line[1]) + return intf_list + + +def is_portchannel_member_port(db, interface_name): + return interface_name in get_pc_member_port_list(db) + + +def enable_stp_for_interfaces(db): + fvs = {'enabled': 'true', + 'root_guard': 'false', + 'bpdu_guard': 'false', + 'bpdu_guard_do_disable': 'false', + 'portfast': 'false', + 'uplink_fast': 'false' + } + port_dict = natsorted(db.get_table('PORT')) + intf_list_in_vlan_member_table = get_intf_list_in_vlan_member_table(db) + + for port_key in port_dict: + if port_key in intf_list_in_vlan_member_table: + db.set_entry('STP_PORT', port_key, fvs) + + po_ch_dict = natsorted(db.get_table('PORTCHANNEL')) + for po_ch_key in po_ch_dict: + if po_ch_key in intf_list_in_vlan_member_table: + db.set_entry('STP_PORT', po_ch_key, fvs) + + +def is_global_stp_enabled(db): + stp_entry = db.get_entry('STP', "GLOBAL") + mode = stp_entry.get("mode") + if mode: + return True + else: + return False + + +def check_if_global_stp_enabled(db, ctx): + if not is_global_stp_enabled(db): + ctx.fail("Global STP is not enabled - first configure STP mode") + + +def get_global_stp_mode(db): + stp_entry = db.get_entry('STP', "GLOBAL") + mode = stp_entry.get("mode") + return mode + + +def get_global_stp_forward_delay(db): + stp_entry = db.get_entry('STP', "GLOBAL") + forward_delay = stp_entry.get("forward_delay") + return forward_delay + + +def get_global_stp_hello_time(db): + stp_entry = db.get_entry('STP', "GLOBAL") + hello_time = stp_entry.get("hello_time") + return hello_time + + +def get_global_stp_max_age(db): + stp_entry = db.get_entry('STP', "GLOBAL") + max_age = stp_entry.get("max_age") + return max_age + + +def get_global_stp_priority(db): + stp_entry = db.get_entry('STP', "GLOBAL") + priority = stp_entry.get("priority") + return priority + + +@click.group() +@clicommon.pass_db +def spanning_tree(_db): + """STP command line""" + pass + + +############################################### +# STP Global commands implementation +############################################### + +# cmd: STP enable +@spanning_tree.command('enable') +@click.argument('mode', metavar='', required=True, type=click.Choice(["pvst"])) +@clicommon.pass_db +def spanning_tree_enable(_db, mode): + """enable STP """ + ctx = click.get_current_context() + db = _db.cfgdb + if mode == "pvst" and get_global_stp_mode(db) == "pvst": + ctx.fail("PVST is already configured") + fvs = {'mode': mode, + 'rootguard_timeout': STP_DEFAULT_ROOT_GUARD_TIMEOUT, + 'forward_delay': STP_DEFAULT_FORWARD_DELAY, + 'hello_time': STP_DEFAULT_HELLO_INTERVAL, + 'max_age': STP_DEFAULT_MAX_AGE, + 'priority': STP_DEFAULT_BRIDGE_PRIORITY + } + db.set_entry('STP', "GLOBAL", fvs) + # Enable STP for VLAN by default + enable_stp_for_interfaces(db) + enable_stp_for_vlans(db) + + +# cmd: STP disable +@spanning_tree.command('disable') +@click.argument('mode', metavar='', required=True, type=click.Choice(["pvst"])) +@clicommon.pass_db +def stp_disable(_db, mode): + """disable STP """ + db = _db.cfgdb + db.set_entry('STP', "GLOBAL", None) + # Disable STP for all VLANs and interfaces + db.delete_table('STP_VLAN') + db.delete_table('STP_PORT') + db.delete_table('STP_VLAN_PORT') + if get_global_stp_mode(db) == "pvst": + print("Error PVST disable failed") + + +# cmd: STP global root guard timeout +@spanning_tree.command('root_guard_timeout') +@click.argument('root_guard_timeout', metavar='<5-600 seconds>', required=True, type=int) +@clicommon.pass_db +def stp_global_root_guard_timeout(_db, root_guard_timeout): + """Configure STP global root guard timeout value""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_global_stp_enabled(db, ctx) + is_valid_root_guard_timeout(ctx, root_guard_timeout) + db.mod_entry('STP', "GLOBAL", {'rootguard_timeout': root_guard_timeout}) + + +# cmd: STP global forward delay +@spanning_tree.command('forward_delay') +@click.argument('forward_delay', metavar='<4-30 seconds>', required=True, type=int) +@clicommon.pass_db +def stp_global_forward_delay(_db, forward_delay): + """Configure STP global forward delay""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_global_stp_enabled(db, ctx) + is_valid_forward_delay(ctx, forward_delay) + is_valid_stp_global_parameters(ctx, db, "forward_delay", forward_delay) + update_stp_vlan_parameter(ctx, db, "forward_delay", forward_delay) + db.mod_entry('STP', "GLOBAL", {'forward_delay': forward_delay}) + + +# cmd: STP global hello interval +@spanning_tree.command('hello') +@click.argument('hello_interval', metavar='<1-10 seconds>', required=True, type=int) +@clicommon.pass_db +def stp_global_hello_interval(_db, hello_interval): + """Configure STP global hello interval""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_global_stp_enabled(db, ctx) + is_valid_hello_interval(ctx, hello_interval) + is_valid_stp_global_parameters(ctx, db, "hello_time", hello_interval) + update_stp_vlan_parameter(ctx, db, "hello_time", hello_interval) + db.mod_entry('STP', "GLOBAL", {'hello_time': hello_interval}) + + +# cmd: STP global max age +@spanning_tree.command('max_age') +@click.argument('max_age', metavar='<6-40 seconds>', required=True, type=int) +@clicommon.pass_db +def stp_global_max_age(_db, max_age): + """Configure STP global max_age""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_global_stp_enabled(db, ctx) + is_valid_max_age(ctx, max_age) + is_valid_stp_global_parameters(ctx, db, "max_age", max_age) + update_stp_vlan_parameter(ctx, db, "max_age", max_age) + db.mod_entry('STP', "GLOBAL", {'max_age': max_age}) + + +# cmd: STP global bridge priority +@spanning_tree.command('priority') +@click.argument('priority', metavar='<0-61440>', required=True, type=int) +@clicommon.pass_db +def stp_global_priority(_db, priority): + """Configure STP global bridge priority""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_global_stp_enabled(db, ctx) + is_valid_bridge_priority(ctx, priority) + update_stp_vlan_parameter(ctx, db, "priority", priority) + db.mod_entry('STP', "GLOBAL", {'priority': priority}) + + +############################################### +# STP VLAN commands implementation +############################################### +@spanning_tree.group('vlan') +@clicommon.pass_db +def spanning_tree_vlan(_db): + """Configure STP for a VLAN""" + pass + + +def is_stp_enabled_for_vlan(db, vlan_name): + stp_entry = db.get_entry('STP_VLAN', vlan_name) + stp_enabled = stp_entry.get("enabled") + if stp_enabled == "true": + return True + else: + return False + + +def check_if_stp_enabled_for_vlan(ctx, db, vlan_name): + if not is_stp_enabled_for_vlan(db, vlan_name): + ctx.fail("STP is not enabled for VLAN") + + +@spanning_tree_vlan.command('enable') +@click.argument('vid', metavar='', required=True, type=int) +@clicommon.pass_db +def stp_vlan_enable(_db, vid): + """Enable STP for a VLAN""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_vlan_exist_in_db(db, ctx, vid) + vlan_name = 'Vlan{}'.format(vid) + if is_stp_enabled_for_vlan(db, vlan_name): + ctx.fail("STP is already enabled for " + vlan_name) + if get_stp_enabled_vlan_count(db) >= get_max_stp_instances(): + ctx.fail("Exceeded maximum STP configurable VLAN instances") + check_if_global_stp_enabled(db, ctx) + # when enabled for first time, create VLAN entry with + # global values - else update only VLAN STP state + stp_vlan_entry = db.get_entry('STP_VLAN', vlan_name) + if len(stp_vlan_entry) == 0: + fvs = {'enabled': 'true', + 'forward_delay': get_global_stp_forward_delay(db), + 'hello_time': get_global_stp_hello_time(db), + 'max_age': get_global_stp_max_age(db), + 'priority': get_global_stp_priority(db) + } + db.set_entry('STP_VLAN', vlan_name, fvs) + else: + db.mod_entry('STP_VLAN', vlan_name, {'enabled': 'true'}) + # Refresh stp_vlan_intf entry for vlan + for vlan, intf in db.get_table('STP_VLAN_PORT'): + if vlan == vlan_name: + vlan_intf_key = "{}|{}".format(vlan_name, intf) + vlan_intf_entry = db.get_entry('STP_VLAN_PORT', vlan_intf_key) + db.mod_entry('STP_VLAN_PORT', vlan_intf_key, vlan_intf_entry) + + +@spanning_tree_vlan.command('disable') +@click.argument('vid', metavar='', required=True, type=int) +@clicommon.pass_db +def stp_vlan_disable(_db, vid): + """Disable STP for a VLAN""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_vlan_exist_in_db(db, ctx, vid) + vlan_name = 'Vlan{}'.format(vid) + db.mod_entry('STP_VLAN', vlan_name, {'enabled': 'false'}) + + +@spanning_tree_vlan.command('forward_delay') +@click.argument('vid', metavar='', required=True, type=int) +@click.argument('forward_delay', metavar='<4-30 seconds>', required=True, type=int) +@clicommon.pass_db +def stp_vlan_forward_delay(_db, vid, forward_delay): + """Configure STP forward delay for VLAN""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_vlan_exist_in_db(db, ctx, vid) + vlan_name = 'Vlan{}'.format(vid) + check_if_stp_enabled_for_vlan(ctx, db, vlan_name) + is_valid_forward_delay(ctx, forward_delay) + is_valid_stp_vlan_parameters(ctx, db, vlan_name, "forward_delay", forward_delay) + db.mod_entry('STP_VLAN', vlan_name, {'forward_delay': forward_delay}) + + +@spanning_tree_vlan.command('hello') +@click.argument('vid', metavar='', required=True, type=int) +@click.argument('hello_interval', metavar='<1-10 seconds>', required=True, type=int) +@clicommon.pass_db +def stp_vlan_hello_interval(_db, vid, hello_interval): + """Configure STP hello interval for VLAN""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_vlan_exist_in_db(db, ctx, vid) + vlan_name = 'Vlan{}'.format(vid) + check_if_stp_enabled_for_vlan(ctx, db, vlan_name) + is_valid_hello_interval(ctx, hello_interval) + is_valid_stp_vlan_parameters(ctx, db, vlan_name, "hello_time", hello_interval) + db.mod_entry('STP_VLAN', vlan_name, {'hello_time': hello_interval}) + + +@spanning_tree_vlan.command('max_age') +@click.argument('vid', metavar='', required=True, type=int) +@click.argument('max_age', metavar='<6-40 seconds>', required=True, type=int) +@clicommon.pass_db +def stp_vlan_max_age(_db, vid, max_age): + """Configure STP max age for VLAN""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_vlan_exist_in_db(db, ctx, vid) + vlan_name = 'Vlan{}'.format(vid) + check_if_stp_enabled_for_vlan(ctx, db, vlan_name) + is_valid_max_age(ctx, max_age) + is_valid_stp_vlan_parameters(ctx, db, vlan_name, "max_age", max_age) + db.mod_entry('STP_VLAN', vlan_name, {'max_age': max_age}) + + +@spanning_tree_vlan.command('priority') +@click.argument('vid', metavar='', required=True, type=int) +@click.argument('priority', metavar='<0-61440>', required=True, type=int) +@clicommon.pass_db +def stp_vlan_priority(_db, vid, priority): + """Configure STP bridge priority for VLAN""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_vlan_exist_in_db(db, ctx, vid) + vlan_name = 'Vlan{}'.format(vid) + check_if_stp_enabled_for_vlan(ctx, db, vlan_name) + is_valid_bridge_priority(ctx, priority) + db.mod_entry('STP_VLAN', vlan_name, {'priority': priority}) + + +############################################### +# STP interface commands implementation +############################################### + + +def is_stp_enabled_for_interface(db, intf_name): + stp_entry = db.get_entry('STP_PORT', intf_name) + stp_enabled = stp_entry.get("enabled") + if stp_enabled == "true": + return True + else: + return False + + +def check_if_stp_enabled_for_interface(ctx, db, intf_name): + if not is_stp_enabled_for_interface(db, intf_name): + ctx.fail("STP is not enabled for interface {}".format(intf_name)) + + +def check_if_interface_is_valid(ctx, db, interface_name): + from config.main import interface_name_is_valid + if interface_name_is_valid(db, interface_name) is False: + ctx.fail("Interface name is invalid. Please enter a valid interface name!!") + for key in db.get_table('INTERFACE'): + if type(key) != tuple: + continue + if key[0] == interface_name: + ctx.fail(" {} has ip address {} configured - It's not a L2 interface".format(interface_name, key[1])) + if is_portchannel_member_port(db, interface_name): + ctx.fail(" {} is a portchannel member port - STP can't be configured".format(interface_name)) + if not is_vlan_configured_interface(db, interface_name): + ctx.fail(" {} has no VLAN configured - It's not a L2 interface".format(interface_name)) + + +@spanning_tree.group('interface') +@clicommon.pass_db +def spanning_tree_interface(_db): + """Configure STP for interface""" + pass + + +@spanning_tree_interface.command('enable') +@click.argument('interface_name', metavar='', required=True) +@clicommon.pass_db +def stp_interface_enable(_db, interface_name): + """Enable STP for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_global_stp_enabled(db, ctx) + if is_stp_enabled_for_interface(db, interface_name): + ctx.fail("STP is already enabled for " + interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + stp_intf_entry = db.get_entry('STP_PORT', interface_name) + if len(stp_intf_entry) == 0: + fvs = {'enabled': 'true', + 'root_guard': 'false', + 'bpdu_guard': 'false', + 'bpdu_guard_do_disable': 'false', + 'portfast': 'false', + 'uplink_fast': 'false'} + db.set_entry('STP_PORT', interface_name, fvs) + else: + db.mod_entry('STP_PORT', interface_name, {'enabled': 'true'}) + + +@spanning_tree_interface.command('disable') +@click.argument('interface_name', metavar='', required=True) +@clicommon.pass_db +def stp_interface_disable(_db, interface_name): + """Disable STP for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_global_stp_enabled(db, ctx) + check_if_interface_is_valid(ctx, db, interface_name) + db.mod_entry('STP_PORT', interface_name, {'enabled': 'false'}) + + +# STP interface port priority +STP_INTERFACE_MIN_PRIORITY = 0 +STP_INTERFACE_MAX_PRIORITY = 240 +STP_INTERFACE_DEFAULT_PRIORITY = 128 + + +def is_valid_interface_priority(ctx, intf_priority): + if intf_priority not in range(STP_INTERFACE_MIN_PRIORITY, STP_INTERFACE_MAX_PRIORITY + 1): + ctx.fail("STP interface priority must be in range 0-240") + + +@spanning_tree_interface.command('priority') +@click.argument('interface_name', metavar='', required=True) +@click.argument('priority', metavar='<0-240>', required=True, type=int) +@clicommon.pass_db +def stp_interface_priority(_db, interface_name, priority): + """Configure STP port priority for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + is_valid_interface_priority(ctx, priority) + curr_intf_proirty = db.get_entry('STP_PORT', interface_name).get('priority') + db.mod_entry('STP_PORT', interface_name, {'priority': priority}) + # update interface priority in all stp_vlan_intf entries if entry exists + for vlan, intf in db.get_table('STP_VLAN_PORT'): + if intf == interface_name: + vlan_intf_key = "{}|{}".format(vlan, interface_name) + vlan_intf_entry = db.get_entry('STP_VLAN_PORT', vlan_intf_key) + if len(vlan_intf_entry) != 0: + vlan_intf_priority = vlan_intf_entry.get('priority') + if curr_intf_proirty == vlan_intf_priority: + db.mod_entry('STP_VLAN_PORT', vlan_intf_key, {'priority': priority}) + # end + + +# STP interface port path cost +STP_INTERFACE_MIN_PATH_COST = 1 +STP_INTERFACE_MAX_PATH_COST = 200000000 + + +def is_valid_interface_path_cost(ctx, intf_path_cost): + if intf_path_cost < STP_INTERFACE_MIN_PATH_COST or intf_path_cost > STP_INTERFACE_MAX_PATH_COST: + ctx.fail("STP interface path cost must be in range 1-200000000") + + +@spanning_tree_interface.command('cost') +@click.argument('interface_name', metavar='', required=True) +@click.argument('cost', metavar='<1-200000000>', required=True, type=int) +@clicommon.pass_db +def stp_interface_path_cost(_db, interface_name, cost): + """Configure STP path cost for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + is_valid_interface_path_cost(ctx, cost) + curr_intf_cost = db.get_entry('STP_PORT', interface_name).get('path_cost') + db.mod_entry('STP_PORT', interface_name, {'path_cost': cost}) + # update interface path_cost in all stp_vlan_intf entries if entry exists + for vlan, intf in db.get_table('STP_VLAN_PORT'): + if intf == interface_name: + vlan_intf_key = "{}|{}".format(vlan, interface_name) + vlan_intf_entry = db.get_entry('STP_VLAN_PORT', vlan_intf_key) + if len(vlan_intf_entry) != 0: + vlan_intf_cost = vlan_intf_entry.get('path_cost') + if curr_intf_cost == vlan_intf_cost: + db.mod_entry('STP_VLAN_PORT', vlan_intf_key, {'path_cost': cost}) + # end + + +# STP interface root guard +@spanning_tree_interface.group('root_guard') +@clicommon.pass_db +def spanning_tree_interface_root_guard(_db): + """Configure STP root guard for interface""" + pass + + +@spanning_tree_interface_root_guard.command('enable') +@click.argument('interface_name', metavar='', required=True) +@clicommon.pass_db +def stp_interface_root_guard_enable(_db, interface_name): + """Enable STP root guard for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + db.mod_entry('STP_PORT', interface_name, {'root_guard': 'true'}) + + +@spanning_tree_interface_root_guard.command('disable') +@click.argument('interface_name', metavar='', required=True) +@clicommon.pass_db +def stp_interface_root_guard_disable(_db, interface_name): + """Disable STP root guard for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + db.mod_entry('STP_PORT', interface_name, {'root_guard': 'false'}) + + +# STP interface bpdu guard +@spanning_tree_interface.group('bpdu_guard') +@clicommon.pass_db +def spanning_tree_interface_bpdu_guard(_db): + """Configure STP bpdu guard for interface""" + pass + + +@spanning_tree_interface_bpdu_guard.command('enable') +@click.argument('interface_name', metavar='', required=True) +@click.option('-s', '--shutdown', is_flag=True) +@clicommon.pass_db +def stp_interface_bpdu_guard_enable(_db, interface_name, shutdown): + """Enable STP bpdu guard for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + if shutdown is True: + bpdu_guard_do_disable = 'true' + else: + bpdu_guard_do_disable = 'false' + fvs = {'bpdu_guard': 'true', + 'bpdu_guard_do_disable': bpdu_guard_do_disable} + db.mod_entry('STP_PORT', interface_name, fvs) + + +@spanning_tree_interface_bpdu_guard.command('disable') +@click.argument('interface_name', metavar='', required=True) +@clicommon.pass_db +def stp_interface_bpdu_guard_disable(_db, interface_name): + """Disable STP bpdu guard for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + db.mod_entry('STP_PORT', interface_name, {'bpdu_guard': 'false'}) + + +# STP interface portfast +@spanning_tree_interface.group('portfast') +@clicommon.pass_db +def spanning_tree_interface_portfast(_db): + """Configure STP portfast for interface""" + pass + + +@spanning_tree_interface_portfast.command('enable') +@click.argument('interface_name', metavar='', required=True) +@clicommon.pass_db +def stp_interface_portfast_enable(_db, interface_name): + """Enable STP portfast for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + db.mod_entry('STP_PORT', interface_name, {'portfast': 'true'}) + + +@spanning_tree_interface_portfast.command('disable') +@click.argument('interface_name', metavar='', required=True) +@clicommon.pass_db +def stp_interface_portfast_disable(_db, interface_name): + """Disable STP portfast for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + db.mod_entry('STP_PORT', interface_name, {'portfast': 'false'}) + + +# STP interface root uplink_fast +@spanning_tree_interface.group('uplink_fast') +@clicommon.pass_db +def spanning_tree_interface_uplink_fast(_db): + """Configure STP uplink fast for interface""" + pass + + +@spanning_tree_interface_uplink_fast.command('enable') +@click.argument('interface_name', metavar='', required=True) +@clicommon.pass_db +def stp_interface_uplink_fast_enable(_db, interface_name): + """Enable STP uplink fast for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + db.mod_entry('STP_PORT', interface_name, {'uplink_fast': 'true'}) + + +@spanning_tree_interface_uplink_fast.command('disable') +@click.argument('interface_name', metavar='', required=True) +@clicommon.pass_db +def stp_interface_uplink_fast_disable(_db, interface_name): + """Disable STP uplink fast for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + db.mod_entry('STP_PORT', interface_name, {'uplink_fast': 'false'}) + + +############################################### +# STP interface per VLAN commands implementation +############################################### +@spanning_tree_vlan.group('interface') +@clicommon.pass_db +def spanning_tree_vlan_interface(_db): + """Configure STP parameters for interface per VLAN""" + pass + + +# STP interface per vlan port priority +def is_valid_vlan_interface_priority(ctx, priority): + if priority not in range(STP_INTERFACE_MIN_PRIORITY, STP_INTERFACE_MAX_PRIORITY + 1): + ctx.fail("STP per vlan port priority must be in range 0-240") + + +@spanning_tree_vlan_interface.command('priority') +@click.argument('vid', metavar='', required=True, type=int) +@click.argument('interface_name', metavar='', required=True) +@click.argument('priority', metavar='<0-240>', required=True, type=int) +@clicommon.pass_db +def stp_vlan_interface_priority(_db, vid, interface_name, priority): + """Configure STP per vlan port priority for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + vlan_name = 'Vlan{}'.format(vid) + check_if_stp_enabled_for_vlan(ctx, db, vlan_name) + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_vlan_exist_in_db(db, ctx, vid) + is_interface_vlan_member(db, vlan_name, interface_name) + is_valid_vlan_interface_priority(ctx, priority) + vlan_interface = str(vlan_name) + "|" + interface_name + db.mod_entry('STP_VLAN_PORT', vlan_interface, {'priority': priority}) + + +@spanning_tree_vlan_interface.command('cost') +@click.argument('vid', metavar='', required=True, type=int) +@click.argument('interface_name', metavar='', required=True) +@click.argument('cost', metavar='<1-200000000>', required=True, type=int) +@clicommon.pass_db +def stp_vlan_interface_cost(_db, vid, interface_name, cost): + """Configure STP per vlan path cost for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + vlan_name = 'Vlan{}'.format(vid) + check_if_stp_enabled_for_vlan(ctx, db, vlan_name) + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_vlan_exist_in_db(db, ctx, vid) + is_interface_vlan_member(db, vlan_name, interface_name) + is_valid_interface_path_cost(ctx, cost) + vlan_interface = str(vlan_name) + "|" + interface_name + db.mod_entry('STP_VLAN_PORT', vlan_interface, {'path_cost': cost}) + + +# Invoke main() +# if __name__ == '__main__': +# spanning_tree() diff --git a/config/syslog.py b/config/syslog.py index a5d520d9cf..7228e365c8 100644 --- a/config/syslog.py +++ b/config/syslog.py @@ -642,3 +642,57 @@ def disable_rate_limit_feature(db, service_name, namespace): if not failed: click.echo(f'Disabled syslog rate limit feature for {feature_name}') + + +@syslog.command('level') +@click.option("-i", "--identifier", + required=True, + help="Log identifier in DB for which loglevel is applied (provided with -l)") +@click.option("-l", "--level", + required=True, + help="Loglevel value", + type=click.Choice(['DEBUG', 'INFO', 'NOTICE', 'WARN', 'ERROR'])) +@click.option("--container", + help="Container name to which the SIGHUP is sent (provided with --pid or --program)") +@click.option("--program", + help="Program name to which the SIGHUP is sent (provided with --container)") +@click.option("--pid", + help="Process ID to which the SIGHUP is sent (provided with --container if PID is from container)") +@click.option('--namespace', '-n', 'namespace', default=None, + type=click.Choice(multi_asic_util.multi_asic_ns_choices()), + show_default=True, help='Namespace name') +@clicommon.pass_db +def level(db, identifier, level, container, program, pid, namespace): + """ Configure log level """ + if program and not container: + raise click.UsageError('--program must be specified with --container') + + if container and not program and not pid: + raise click.UsageError('--container must be specified with --pid or --program') + + if not namespace: + cfg_db = db.cfgdb + else: + asic_id = multi_asic.get_asic_id_from_name(namespace) + container = f'{container}{asic_id}' + cfg_db = db.cfgdb_clients[namespace] + + cfg_db.mod_entry('LOGGER', identifier, {'LOGLEVEL': level}) + if not container and not program and not pid: + return + + log_config = cfg_db.get_entry('LOGGER', identifier) + require_manual_refresh = log_config.get('require_manual_refresh') + if not require_manual_refresh: + return + + if container: + if program: + command = ['docker', 'exec', '-i', container, 'supervisorctl', 'signal', 'HUP', program] + else: + command = ['docker', 'exec', '-i', container, 'kill', '-s', 'SIGHUP', pid] + else: + command = ['kill', '-s', 'SIGHUP', pid] + output, ret = clicommon.run_command(command, return_cmd=True) + if ret != 0: + raise click.ClickException(f'Failed: {output}') diff --git a/config/vlan.py b/config/vlan.py index 98cc95757e..eae51eb312 100644 --- a/config/vlan.py +++ b/config/vlan.py @@ -6,6 +6,7 @@ from time import sleep from .utils import log from .validated_config_db_connector import ValidatedConfigDBConnector +from . import stp ADHOC_VALIDATION = True DHCP_RELAY_TABLE = "DHCP_RELAY" @@ -76,6 +77,9 @@ def add_vlan(db, vid, multiple): if clicommon.check_if_vlanid_exist(db.cfgdb, vlan, "DHCP_RELAY"): ctx.fail("DHCPv6 relay config for {} already exists".format(vlan)) + # Enable STP on VLAN if PVST is enabled globally + stp.vlan_enable_stp(db.cfgdb, vlan) + # set dhcpv4_relay table set_dhcp_relay_table('VLAN', config_db, vlan, {'vlanid': str(vid)}) @@ -97,6 +101,29 @@ def delete_db_entry(entry_name, db_connector, db_name): db_connector.delete(db_name, entry_name) +def enable_stp_on_port(db, port): + if stp.is_global_stp_enabled(db) is True: + vlan_list_for_intf = stp.get_vlan_list_for_interface(db, port) + if len(vlan_list_for_intf) == 0: + stp.interface_enable_stp(db, port) + + +def disable_stp_on_vlan_port(db, vlan, port): + if stp.is_global_stp_enabled(db) is True: + vlan_interface = str(vlan) + "|" + port + db.set_entry('STP_VLAN_PORT', vlan_interface, None) + vlan_list_for_intf = stp.get_vlan_list_for_interface(db, port) + if len(vlan_list_for_intf) == 0: + db.set_entry('STP_PORT', port, None) + + +def disable_stp_on_vlan(db, vlan_interface): + db.set_entry('STP_VLAN', vlan_interface, None) + stp_intf_list = stp.get_intf_list_from_stp_vlan_intf_table(db, vlan_interface) + for intf_name in stp_intf_list: + key = vlan_interface + "|" + intf_name + db.set_entry('STP_VLAN_PORT', key, None) + @vlan.command('del') @click.argument('vid', metavar='', required=True) @click.option('-m', '--multiple', is_flag=True, help="Add Multiple Vlan(s) in Range or in Comma separated list") @@ -154,7 +181,8 @@ def del_vlan(db, vid, multiple, no_restart_dhcp_relay): for vxmap_key, vxmap_data in vxlan_table.items(): if vxmap_data['vlan'] == 'Vlan{}'.format(vid): ctx.fail("vlan: {} can not be removed. " - "First remove vxlan mapping '{}' assigned to VLAN".format(vid, '|'.join(vxmap_key))) + "First remove vxlan mapping '{}' assigned to VLAN".format( + vid, '|'.join(vxmap_key))) # set dhcpv4_relay table set_dhcp_relay_table('VLAN', config_db, vlan, None) @@ -169,6 +197,9 @@ def del_vlan(db, vid, multiple, no_restart_dhcp_relay): delete_db_entry("DHCPv6_COUNTER_TABLE|{}".format(vlan), db.db, db.db.STATE_DB) delete_db_entry("DHCP_COUNTER_TABLE|{}".format(vlan), db.db, db.db.STATE_DB) + # Delete STP_VLAN & STP_VLAN_PORT entries when VLAN is deleted. + disable_stp_on_vlan(db.cfgdb, 'Vlan{}'.format(vid)) + vlans = db.cfgdb.get_keys('VLAN') if not vlans: docker_exec_cmd = ['docker', 'exec', '-i', 'swss'] @@ -312,6 +343,10 @@ def add_vlan_member(db, vid, port, untagged, multiple, except_flag): ctx.fail("{} is in access mode! Tagged Members cannot be added".format(port)) elif existing_mode == mode_type or (existing_mode == "trunk" and mode_type == "access"): pass + + # If port is being made L2 port, enable STP + enable_stp_on_port(db.cfgdb, port) + try: config_db.set_entry('VLAN_MEMBER', (vlan, port), {'tagging_mode': "untagged" if untagged else "tagged"}) except ValueError: @@ -356,6 +391,9 @@ def del_vlan_member(db, vid, port, multiple, except_flag): if not clicommon.is_port_vlan_member(db.cfgdb, port, vlan): # TODO: MISSING CONSTRAINT IN YANG MODEL ctx.fail("{} is not a member of {}".format(port, vlan)) + # If port is being made non-L2 port, disable STP + disable_stp_on_vlan_port(db.cfgdb, vlan, port) + try: config_db.set_entry('VLAN_MEMBER', (vlan, port), None) delete_db_entry("DHCPv6_COUNTER_TABLE|{}".format(port), db.db, db.db.STATE_DB) diff --git a/config/vxlan.py b/config/vxlan.py index 71377d5609..ea49c4a34d 100644 --- a/config/vxlan.py +++ b/config/vxlan.py @@ -3,6 +3,7 @@ from jsonpatch import JsonPatchConflict from .validated_config_db_connector import ValidatedConfigDBConnector +from swsscommon.swsscommon import isInterfaceNameValid, IFACE_NAME_MAX_LEN ADHOC_VALIDATION = True # @@ -24,6 +25,8 @@ def add_vxlan(db, vxlan_name, src_ip): if ADHOC_VALIDATION: if not clicommon.is_ipaddress(src_ip): ctx.fail("{} invalid src ip address".format(src_ip)) + if not isInterfaceNameValid(vxlan_name): + ctx.fail("'vxlan_name' length should not exceed {} characters".format(IFACE_NAME_MAX_LEN)) vxlan_keys = db.cfgdb.get_keys('VXLAN_TUNNEL') if not vxlan_keys: @@ -317,4 +320,3 @@ def del_vxlan_map_range(db, vxlan_name, vlan_start, vlan_end, vni_start): config_db.set_entry('VXLAN_TUNNEL_MAP', mapname, None) except JsonPatchConflict as e: ctx.fail("Invalid ConfigDB. Error: {}".format(e)) - diff --git a/debug/main.py b/debug/main.py index 069159fc75..1c12dffe85 100755 --- a/debug/main.py +++ b/debug/main.py @@ -4,6 +4,7 @@ import subprocess from shlex import join + def run_command(command, pager=False): command_str = join(command) click.echo(click.style("Command: ", fg='cyan') + click.style(command_str, fg='green')) @@ -25,6 +26,7 @@ def cli(): """SONiC command line - 'debug' command""" pass + prefix_pattern = '^[A-Za-z0-9.:/]*$' p = subprocess.check_output(['sudo', 'vtysh', '-c', 'show version'], text=True) if 'FRRouting' in p: diff --git a/debug/stp.py b/debug/stp.py new file mode 100644 index 0000000000..c154537e2a --- /dev/null +++ b/debug/stp.py @@ -0,0 +1,92 @@ +import click +import utilities_common.cli as clicommon + + +# +# This group houses Spanning_tree commands and subgroups +# +@click.group(cls=clicommon.AliasedGroup, default_if_no_args=False, invoke_without_command=True) +@click.pass_context +def spanning_tree(ctx): + '''debug spanning_tree commands''' + if ctx.invoked_subcommand is None: + command = 'sudo stpctl dbg enable' + clicommon.run_command(command) + + +@spanning_tree.group('dump', cls=clicommon.AliasedGroup, default_if_no_args=False, invoke_without_command=True) +def stp_debug_dump(): + pass + + +@stp_debug_dump.command('global') +def stp_debug_dump_global(): + command = 'sudo stpctl global' + clicommon.run_command(command) + + +@stp_debug_dump.command('vlan') +@click.argument('vlan_id', metavar='', required=True) +def stp_debug_dump_vlan(vlan_id): + command = 'sudo stpctl vlan ' + vlan_id + clicommon.run_command(command) + + +@stp_debug_dump.command('interface') +@click.argument('vlan_id', metavar='', required=True) +@click.argument('interface_name', metavar='', required=True) +def stp_debug_dump_vlan_intf(vlan_id, interface_name): + command = 'sudo stpctl port ' + vlan_id + " " + interface_name + clicommon.run_command(command) + + +@spanning_tree.command('show') +def stp_debug_show(): + command = 'sudo stpctl dbg show' + clicommon.run_command(command) + + +@spanning_tree.command('reset') +def stp_debug_reset(): + command = 'sudo stpctl dbg disable' + clicommon.run_command(command) + + +@spanning_tree.command('bpdu') +@click.argument('mode', metavar='{rx|tx}', required=False) +@click.option('-d', '--disable', is_flag=True) +def stp_debug_bpdu(mode, disable): + command = 'sudo stpctl dbg bpdu {}{}'.format( + ('rx-' if mode == 'rx' else 'tx-' if mode == 'tx' else ''), + ('off' if disable else 'on')) + clicommon.run_command(command) + + +@spanning_tree.command('verbose') +@click.option('-d', '--disable', is_flag=True) +def stp_debug_verbose(disable): + command = 'sudo stpctl dbg verbose {}'.format("off" if disable else "on") + clicommon.run_command(command) + + +@spanning_tree.command('event') +@click.option('-d', '--disable', is_flag=True) +def stp_debug_event(disable): + command = 'sudo stpctl dbg event {}'.format("off" if disable else "on") + clicommon.run_command(command) + + +@spanning_tree.command('vlan') +@click.argument('vlan_id', metavar='', required=True) +@click.option('-d', '--disable', is_flag=True) +def stp_debug_vlan(vlan_id, disable): + command = 'sudo stpctl dbg vlan {} {}'.format(vlan_id, "off" if disable else "on") + clicommon.run_command(command) + + +@spanning_tree.command('interface') +@click.argument('interface_name', metavar='', required=True) +@click.option('-d', '--disable', is_flag=True) +def stp_debug_intf(interface_name, disable): + command = 'sudo stpctl dbg port {} {}'.format(interface_name, "off" if disable else "on") + clicommon.run_command(command) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index e9009ef67d..fdff48d5c7 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -225,6 +225,9 @@ * [Static DNS show command](#static-dns-show-command) * [Wake-on-LAN Commands](#wake-on-lan-commands) * [Send Wake-on-LAN Magic Packet command](#send-wake-on-lan-magic-packet-command) +* [Banner Commands](#banner-commands) + * [Banner config commands](#banner-config-commands) + * [Banner show command](#banner-show-command) ## Document History @@ -4891,6 +4894,7 @@ Optional argument "-p" specify a period (in seconds) with which to gather counte show interfaces counters rates show interfaces counters rif [-p|--period ] [-i ] show interfaces counters fec-histogram [-i ] + show interfaces counters fec-stats ``` - Example: @@ -5016,29 +5020,37 @@ In a FEC histogram, "bins" represent ranges of errors or specific categories of - Example: ``` admin@str-s6000-acs-11:/usr/bin$ show interface counters fec-histogram -i + Symbol Errors Per Codeword Codewords + -------------------------- --------- + BIN0: 1000000 + BIN1: 900000 + BIN2: 800000 + BIN3: 700000 + BIN4: 600000 + BIN5: 500000 + BIN6: 400000 + BIN7: 300000 + BIN8: 0 + BIN9: 0 + BIN10: 0 + BIN11: 0 + BIN12: 0 + BIN13: 0 + BIN14: 0 + BIN15: 0 + ``` -Symbol Errors Per Codeword Codewords --------------------------- --------- -BIN0: 1000000 -BIN1: 900000 -BIN2: 800000 -BIN3: 700000 -BIN4: 600000 -BIN5: 500000 -BIN6: 400000 -BIN7: 300000 -BIN8: 0 -BIN9: 0 -BIN10: 0 -BIN11: 0 -BIN12: 0 -BIN13: 0 -BIN14: 0 -BIN15: 0 - - ``` - +The "fec-stats" subcommand is used to disply the interface fec related statistic. +- Example: + ``` + admin@ctd615:~$ show interfaces counters fec-stats + IFACE STATE FEC_CORR FEC_UNCORR FEC_SYMBOL_ERR FEC_PRE_BER FEC_POST_BER + ----------- ------- ---------- ------------ ---------------- ------------- -------------- + Ethernet0 U 0 0 0 1.48e-20 0.00e+00 + Ethernet8 U 0 0 0 1.98e-19 0.00e+00 + Ethernet16 U 0 0 0 1.77e-20 0.00e+00 + ``` **show interfaces description** @@ -8604,74 +8616,11 @@ Go Back To [Beginning of the document](#) or [Beginning of this section](#platfo ### Mellanox Platform Specific Commands -There are few commands that are platform specific. Mellanox has used this feature and implemented Mellanox specific commands as follows. - -**show platform mlnx sniffer** - -This command shows the SDK sniffer status - -- Usage: - ``` - show platform mlnx sniffer - ``` - -- Example: - ``` - admin@sonic:~$ show platform mlnx sniffer - sdk sniffer is disabled - ``` - -**show platform mlnx sniffer** - -Another show command available on ‘show platform mlnx’ which is the issu status. -This means if ISSU is enabled on this SKU or not. A warm boot command can be executed only when ISSU is enabled on the SKU. - -- Usage: - ``` - show platform mlnx issu - ``` - -- Example: - ``` - admin@sonic:~$ show platform mlnx issu - ISSU is enabled - ``` - -In the case ISSU is disabled and warm-boot is called, the user will get a notification message explaining that the command cannot be invoked. - -- Example: - ``` - admin@sonic:~$ sudo warm-reboot - ISSU is not enabled on this HWSKU - Warm reboot is not supported - ``` - -**config platform mlnx** +config platform mlnx -This command is valid only on mellanox devices. The sub-commands for "config platform" gets populated only on mellanox platforms. -There are no other subcommands on non-Mellanox devices and hence this command appears empty and useless in other platforms. -The platform mellanox command currently includes a single sub command which is the SDK sniffer. -The SDK sniffer is a troubleshooting tool which records the RPC calls from the Mellanox SDK user API library to the sx_sdk task into a .pcap file. -This .pcap file can be replayed afterward to get the exact same configuration state on SDK and FW to reproduce and investigate issues. +This command is valid only on mellanox devices. The sub-commands for "config platform" gets populated only on mellanox platforms. There are no other subcommands on non-Mellanox devices and hence this command appears empty and useless in other platforms. -A new folder will be created to store the sniffer files: "/var/log/mellanox/sniffer/". The result file will be stored in a .pcap file, which includes a time stamp of the starting time in the file name, for example, "sx_sdk_sniffer_20180224081306.pcap" -In order to have a complete .pcap file with all the RPC calls, the user should disable the SDK sniffer. Swss service will be restarted and no capturing is taken place from that moment. -It is recommended to review the .pcap file while sniffing is disabled. -Once SDK sniffer is enabled/disabled, the user is requested to approve that swss service will be restarted. -For example: To change SDK sniffer status, swss service will be restarted, continue? [y/N]: -In order to avoid that confirmation the -y / --yes option should be used. - -- Usage: - ``` - config platform mlnx sniffer sdk [-y|--yes] - ``` - -- Example: - ``` - admin@sonic:~$ config platform mlnx sniffer sdk - To change SDK sniffer status, swss service will be restarted, continue? [y/N]: y - NOTE: In order to avoid that confirmation the -y / --yes option should be used. - ``` +The platform mellanox command currently includes no sub command. ### Barefoot Platform Specific Commands @@ -10858,6 +10807,35 @@ This command is used to disable syslog rate limit feature. config syslog rate-limit-feature disable database -n asci0 ``` +**config syslog level** + +This command is used to configure log level for a given log identifier. + +- Usage: + ``` + config syslog level -i -l --container [] --program [] + + config syslog level -i -l --container [] --pid [] + + config syslog level -i -l ---pid [] + ``` + +- Example: + + ``` + # Update the log level without refresh the configuration + config syslog level -i xcvrd -l DEBUG + + # Update the log level and send SIGHUP to xcvrd running in PMON + config syslog level -i xcvrd -l DEBUG --container pmon --program xcvrd + + # Update the log level and send SIGHUP to PID 20 running in PMON + config syslog level -i xcvrd -l DEBUG --container pmon --pid 20 + + # Update the log level and send SIGHUP to PID 20 running in host + config syslog level -i xcvrd -l DEBUG --pid 20 + ``` + Go Back To [Beginning of the document](#) or [Beginning of this section](#syslog) ## System State @@ -13857,3 +13835,89 @@ Sending 3 magic packet to 11:33:55:77:99:bb via interface Vlan1000 ``` For the 4th example, it specifise 2 target MAC addresses and `count` is 3. So it'll send 6 magic packets in total. + +# Banner Commands + +This sub-section explains the list of the configuration options available for Banner feature. + +## Banner config commands + +- Set banner feature state + +``` +admin@sonic:~$ config banner state +Usage: config config banner state + + Set banner feature state + +Options: + -?, -h, --help Show this message and exit. +``` + +- Set login message + +``` +admin@sonic:~$ config banner login +Usage: config banner login + + Set login message + +Options: + -?, -h, --help Show this message and exit. +``` + +- Set logout message + +``` +admin@sonic:~$ config banner logout +Usage: config banner logout + + Set logout message + +Options: + -?, -h, --help Show this message and exit. +``` + +- Set message of the day + +``` +admin@sonic:~$ config banner motd +Usage: config banner motd + + Set message of the day + +Options: + -?, -h, --help Show this message and exit. +``` + +## Banner show command + +- how banner messages + +``` +admin@sonic:~$ show banner +Usage: show banner + + Show banner messages + +Options: + -h, -?, --help Show this message and exit. +``` +``` +admin@sonic:~$ show banner +state login motd logout +------- ------- ------------------------------------------------ -------- +enabled Login You are on + Message ____ ___ _ _ _ ____ + / ___| / _ \| \ | (_)/ ___| + \___ \| | | | \| | | | + ___) | |_| | |\ | | |___ + |____/ \___/|_| \_|_|\____| + + -- Software for Open Networking in the Cloud -- + + Unauthorized access and/or use are prohibited. + All access and/or use are subject to monitoring. + + Help: https://sonic-net.github.io/SONiC/ +``` diff --git a/dump/dash_util.py b/dump/dash_util.py new file mode 100644 index 0000000000..4e1c7bae61 --- /dev/null +++ b/dump/dash_util.py @@ -0,0 +1,90 @@ +import base64 +import uuid +import socket +import ipaddress +from google.protobuf.message import Message +from dash_api.types_pb2 import Guid, IpAddress, IpPrefix +from google.protobuf.json_format import MessageToDict + + +def format_ip(node): + return str(ipaddress.IPv4Address(socket.ntohl(node))) + + +def format_mac(node): + b64 = base64.b64decode(node) + return ':'.join(b64.hex()[i:i + 2] for i in range(0, 12, 2)) + + +def format_guid_dict(node): + b64 = base64.b64decode(node['value']) + return str(uuid.UUID(bytes=b64)) + + +def format_ip_address_dict(node): + if 'ipv4' in node: + return format_ip(node['ipv4']) + + +def format_ip_prefix(node): + ip = format_ip_address_dict(node['ip']) + mask = format_ip_address_dict(node['mask']) + network = ipaddress.IPv4Network(f'{ip}/{mask}', strict=False) + return str(network) + + +def get_decoded_value(pb, pb_data): + pb.ParseFromString(pb_data[b'pb']) + json_string = MessageToDict(pb, preserving_proto_field_name=True) + json_string = find_known_types_sec(pb, json_string) + return json_string + + +decode_types = [IpAddress, Guid, IpPrefix] +decode_types = [cls.__module__ + '.' + cls.__name__ for cls in decode_types] +decode_fn = {'IpAddress': format_ip_address_dict, + 'Guid': format_guid_dict, + 'mac_address': format_mac, + 'IpPrefix': format_ip_prefix} + + +def find_known_types_sec(pb2_obj, pb2_dict): + + def process_msg_field(obj, proto_dict, field_name): + class_name = type(obj).__name__ + obj_type = f"{type(obj).__module__}.{type(obj).__name__}" + if obj_type in decode_types: + proto_dict[field_name] = decode_fn[class_name](proto_dict[field_name]) + else: + find_index(obj, proto_dict[field_name]) + + def process_rep_field(obj, proto_dict, field_name): + final_list = [] + requires_change = False + for ind, value in enumerate(obj): + if isinstance(value, Message): + obj_type = f"{type(value).__module__}.{type(value).__name__}" + if obj_type in decode_types: + requires_change = True + class_name = type(value).__name__ + final_list.append(decode_fn[class_name](proto_dict[field_name][ind])) + else: + find_index(value, pb2_dict[field_name][ind]) + if requires_change: + proto_dict[field_name] = final_list + + def find_index(proto_obj, proto_dict=pb2_dict): + for field_descriptor, value in proto_obj.ListFields(): + field_name = field_descriptor.name + field_type = field_descriptor.type + if field_type == field_descriptor.TYPE_MESSAGE: + obj = getattr(proto_obj, field_name) + if field_descriptor.label == field_descriptor.LABEL_REPEATED: + process_rep_field(obj, proto_dict, field_name) + else: + process_msg_field(obj, proto_dict, field_name) + elif field_name in decode_fn: + proto_dict[field_name] = decode_fn[field_name](proto_dict[field_name]) + + find_index(pb2_obj) + return pb2_dict diff --git a/dump/main.py b/dump/main.py index eb33c95ad6..89cbb60321 100644 --- a/dump/main.py +++ b/dump/main.py @@ -93,7 +93,7 @@ def state(ctx, module, identifier, db, table, key_map, verbose, namespace): vidtorid = extract_rid(collected_info, namespace, ctx.obj.conn_pool) if not key_map: - collected_info = populate_fv(collected_info, module, namespace, ctx.obj.conn_pool) + collected_info = populate_fv(collected_info, module, namespace, ctx.obj.conn_pool, obj.return_pb2_obj()) for id in vidtorid.keys(): collected_info[id]["ASIC_DB"]["vidtorid"] = vidtorid[id] @@ -145,7 +145,7 @@ def filter_out_dbs(db_list, collected_info): return collected_info -def populate_fv(info, module, namespace, conn_pool): +def populate_fv(info, module, namespace, conn_pool, dash_object): all_dbs = set() for id in info.keys(): for db_name in info[id].keys(): @@ -157,7 +157,9 @@ def populate_fv(info, module, namespace, conn_pool): db_cfg_file.connect(plugins.dump_modules[module].CONFIG_FILE, namespace) else: conn_pool.get(db_name, namespace) - + if dash_object: + conn_pool.get_dash_conn(namespace) + redis_conn = conn_pool.cache.get(namespace, {}).get("DASH_"+CONN, None) db_conn = conn_pool.cache.get(namespace, {}).get(CONN, None) final_info = {} @@ -170,10 +172,17 @@ def populate_fv(info, module, namespace, conn_pool): for key in info[id][db_name]["keys"]: if db_name == "CONFIG_FILE": fv = db_cfg_file.get(db_name, key) + elif dash_object and db_name == "APPL_DB": + try: + from dump.dash_util import get_decoded_value + pb_data = redis_conn.hgetall(key) + fv = get_decoded_value(dash_object, pb_data) + except ModuleNotFoundError: + print("Issue in importing dash module!") + return final_info else: fv = db_conn.get_all(db_name, key) final_info[id][db_name]["keys"].append({key: fv}) - return final_info diff --git a/dump/match_infra.py b/dump/match_infra.py index 8b15f69a05..e3da6c08c3 100644 --- a/dump/match_infra.py +++ b/dump/match_infra.py @@ -6,6 +6,8 @@ from swsscommon.swsscommon import SonicV2Connector, SonicDBConfig from sonic_py_common import multi_asic from utilities_common.constants import DEFAULT_NAMESPACE +import redis + # Constants CONN = "conn" @@ -60,6 +62,7 @@ def __init__(self, **kwargs): self.just_keys = kwargs["just_keys"] if "just_keys" in kwargs else True self.ns = kwargs["ns"] if "ns" in kwargs else "" self.match_entire_list = kwargs["match_entire_list"] if "match_entire_list" in kwargs else False + self.PbObj = kwargs["pb"] if "pb" in kwargs else None err = self.__static_checks() verbose_print(str(err)) if err: @@ -194,6 +197,51 @@ def hgetall(self, db, key): return self.conn.get_all(db, key) +class RedisPySource(SourceAdapter): + """ Concrete Adaptor Class for connecting to APPL_DB using Redis library""" + + def __init__(self, conn_pool, pb_obj): + self.conn = None + self.pool = conn_pool + self.pb_obj = pb_obj + + def get_decoded_value(self, pb_obj, key_val): + try: + from dump.dash_util import get_decoded_value + except ModuleNotFoundError as e: + verbose_print("RedisPySource: decoded value cannot be obtained \ + since dash related library import issues\n" + str(e)) + return + return get_decoded_value(pb_obj, key_val) + + def connect(self, db, ns): + try: + self.conn = self.pool.get_dash_conn(ns) + except Exception as e: + verbose_print("RedisPySource: Connection Failed\n" + str(e)) + return False + return True + + def get_separator(self): + return ":" + + def getKeys(self, db, table, key_pattern): + bin_keys = self.conn.keys(table + self.get_separator() + key_pattern) + return [key1.decode() for key1 in bin_keys] + + def get(self, db, key): + key_val = self.conn.hgetall(key) + return self.get_decoded_value(self.pb_obj, key_val) + + def hget(self, db, key, field): + key_val = self.conn.hgetall(key) + decoded_dict = self.get_decoded_value(self.pb_obj, key_val) + return decoded_dict.get(field) + + def hgetall(self, db, key): + key_val = self.conn.hgetall(key) + return self.get_decoded_value(self.pb_obj, key_val) + class JsonSource(SourceAdapter): """ Concrete Adaptor Class for connecting to JSON Data Sources """ @@ -249,26 +297,53 @@ def initialize_connector(self, ns): SonicDBConfig.load_sonic_db_config() return SonicV2Connector(namespace=ns, use_unix_socket_path=True) + def initialize_redis_conn(self, ns): + """Return redis connection for APPL_DB, + as APPL_DB is the only one which stores data in protobuf + format which is not obtained fully by the SonicV2Connector + get_all API + """ + # The get_all function for a SonicV2Connector does not support binary data due to which we + # have to use the redis Library. Relevant Issue: https://github.com/sonic-net/sonic-swss-common/issues/886 + return redis.Redis(unix_socket_path=SonicDBConfig.getDbSock("APPL_DB", ns), + db=SonicDBConfig.getDbId("APPL_DB", ns)) + def get(self, db_name, ns, update=False): """ Returns a SonicV2Connector Object and caches it for further requests """ if ns not in self.cache: self.cache[ns] = {} + if CONN not in self.cache[ns]: self.cache[ns][CONN] = self.initialize_connector(ns) + if CONN_TO not in self.cache[ns]: self.cache[ns][CONN_TO] = set() if update or db_name not in self.cache[ns][CONN_TO]: self.cache[ns][CONN].connect(db_name) self.cache[ns][CONN_TO].add(db_name) return self.cache[ns][CONN] + def get_dash_conn(self, ns): + """ Returns a Redis Connection Object and caches it for further requests """ + if ns not in self.cache: + self.cache[ns] = {} + if "DASH_"+CONN not in self.cache[ns]: + self.cache[ns]["DASH_"+CONN] = self.initialize_redis_conn(ns) + return self.cache[ns]["DASH_"+CONN] + def clear(self, namespace=None): if not namespace: self.cache.clear() elif namespace in self.cache: del self.cache[namespace] - def fill(self, ns, conn, connected_to): + def fill(self, ns, conn, connected_to, dash_object=False): """ Update internal cache """ - self.cache[ns] = {CONN: conn, CONN_TO: set(connected_to)} + if ns not in self.cache: + self.cache[ns] = {} + if dash_object: + self.cache[ns]["DASH_"+CONN] = conn + return + self.cache[ns][CONN] = conn + self.cache[ns][CONN_TO] = set(connected_to) class MatchEngine: @@ -293,10 +368,16 @@ def get_redis_source_adapter(self): def get_json_source_adapter(self): return JsonSource() + def get_redis_py_adapter(self, pb_obj): + return RedisPySource(self.conn_pool, pb_obj) + def __get_source_adapter(self, req): src = None d_src = "" - if req.db: + if req.PbObj: + d_src = req.db + src = self.get_redis_py_adapter(req.PbObj) + elif req.db: d_src = req.db src = self.get_redis_source_adapter() else: diff --git a/dump/plugins/__init__.py b/dump/plugins/__init__.py index 2141e4fec8..f6e977ee42 100644 --- a/dump/plugins/__init__.py +++ b/dump/plugins/__init__.py @@ -1,15 +1,22 @@ import os -import sys import pkgutil import importlib from .executor import Executor +from sonic_py_common.syslogger import SysLogger dump_modules = {} pkg_dir = os.path.dirname(__file__) +log = SysLogger() # import child classes automatically for (module_loader, name, ispkg) in pkgutil.iter_modules([pkg_dir]): - importlib.import_module('.' + name, __package__) + try: + importlib.import_module('.' + name, __package__) + except ModuleNotFoundError as e: + if e.name != "dash_api": + # dash_api is only used in a specific platform + log.log_debug("dump utility - dash_api package not found for platform") + raise # Classes inheriting Executor dump_modules = {cls.__name__.lower(): cls for cls in Executor.__subclasses__()} diff --git a/dump/plugins/dash_acl_group.py b/dump/plugins/dash_acl_group.py new file mode 100644 index 0000000000..a1c304ea6e --- /dev/null +++ b/dump/plugins/dash_acl_group.py @@ -0,0 +1,41 @@ +from dump.helper import create_template_dict +from dump.match_infra import MatchRequest +from swsscommon.swsscommon import SonicDBConfig +from dash_api.acl_group_pb2 import AclGroup +from .executor import Executor + + +APPL_DB_SEPARATOR = SonicDBConfig.getSeparator("APPL_DB") + + +class Dash_Acl_Group(Executor): + """ + Debug Dump Plugin for DASH ACL Group + """ + ARG_NAME = "dash_acl_group_name" + + def __init__(self, match_engine=None): + super().__init__(match_engine) + self.is_dash_object = True + + def get_all_args(self, ns=""): + req = MatchRequest(db="APPL_DB", table="DASH_ACL_GROUP_TABLE", key_pattern="*", ns=ns) + ret = self.match_engine.fetch(req) + appliance_tables = ret["keys"] + return [key.split(APPL_DB_SEPARATOR)[-1] for key in appliance_tables] + + def execute(self, params): + self.ret_temp = create_template_dict(dbs=["APPL_DB"]) + dash_acl_group_table_name = params[self.ARG_NAME] + self.ns = params["namespace"] + self.init_dash_acl_group_table_appl_info(dash_acl_group_table_name) + return self.ret_temp + + def init_dash_acl_group_table_appl_info(self, dash_acl_group_table_name): + req = MatchRequest(db="APPL_DB", table="DASH_ACL_GROUP_TABLE", + key_pattern=dash_acl_group_table_name, ns=self.ns) + ret = self.match_engine.fetch(req) + self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) + + def return_pb2_obj(self): + return AclGroup() diff --git a/dump/plugins/dash_acl_in.py b/dump/plugins/dash_acl_in.py new file mode 100644 index 0000000000..63d53a9624 --- /dev/null +++ b/dump/plugins/dash_acl_in.py @@ -0,0 +1,40 @@ +from dump.helper import create_template_dict +from dump.match_infra import MatchRequest +from swsscommon.swsscommon import SonicDBConfig +from dash_api.acl_in_pb2 import AclIn +from .executor import Executor + + +APPL_DB_SEPARATOR = SonicDBConfig.getSeparator("APPL_DB") + + +class Dash_Acl_In(Executor): + """ + Debug Dump Plugin for DASH ACL In Rule + """ + ARG_NAME = "dash_acl_in" + + def __init__(self, match_engine=None): + super().__init__(match_engine) + self.is_dash_object = True + + def get_all_args(self, ns=""): + req = MatchRequest(db="APPL_DB", table="DASH_ACL_IN_TABLE", key_pattern="*", ns=ns) + ret = self.match_engine.fetch(req) + appliance_tables = ret["keys"] + return [key.split(APPL_DB_SEPARATOR, 1)[1] for key in appliance_tables] + + def execute(self, params): + self.ret_temp = create_template_dict(dbs=["APPL_DB"]) + dash_acl_in_table_name = params[self.ARG_NAME] + self.ns = params["namespace"] + self.init_dash_acl_in_table_appl_info(dash_acl_in_table_name) + return self.ret_temp + + def init_dash_acl_in_table_appl_info(self, dash_acl_in_table_name): + req = MatchRequest(db="APPL_DB", table="DASH_ACL_IN_TABLE", key_pattern=dash_acl_in_table_name, ns=self.ns) + ret = self.match_engine.fetch(req) + self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) + + def return_pb2_obj(self): + return AclIn() diff --git a/dump/plugins/dash_acl_out.py b/dump/plugins/dash_acl_out.py new file mode 100644 index 0000000000..147e8d3fd2 --- /dev/null +++ b/dump/plugins/dash_acl_out.py @@ -0,0 +1,40 @@ +from dump.helper import create_template_dict +from dump.match_infra import MatchRequest +from swsscommon.swsscommon import SonicDBConfig +from dash_api.acl_out_pb2 import AclOut +from .executor import Executor + + +APPL_DB_SEPARATOR = SonicDBConfig.getSeparator("APPL_DB") + + +class Dash_Acl_Out(Executor): + """ + Debug Dump Plugin for DASH ACL Out Rule + """ + ARG_NAME = "dash_acl_out" + + def __init__(self, match_engine=None): + super().__init__(match_engine) + self.is_dash_object = True + + def get_all_args(self, ns=""): + req = MatchRequest(db="APPL_DB", table="DASH_ACL_OUT_TABLE", key_pattern="*", ns=ns) + ret = self.match_engine.fetch(req) + appliance_tables = ret["keys"] + return [key.split(APPL_DB_SEPARATOR, 1)[1] for key in appliance_tables] + + def execute(self, params): + self.ret_temp = create_template_dict(dbs=["APPL_DB"]) + dash_acl_out_table_name = params[self.ARG_NAME] + self.ns = params["namespace"] + self.init_dash_acl_out_table_appl_info(dash_acl_out_table_name) + return self.ret_temp + + def init_dash_acl_out_table_appl_info(self, dash_acl_out_table_name): + req = MatchRequest(db="APPL_DB", table="DASH_ACL_OUT_TABLE", key_pattern=dash_acl_out_table_name, ns=self.ns) + ret = self.match_engine.fetch(req) + self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) + + def return_pb2_obj(self): + return AclOut() diff --git a/dump/plugins/dash_acl_rule.py b/dump/plugins/dash_acl_rule.py new file mode 100644 index 0000000000..1048eccfd7 --- /dev/null +++ b/dump/plugins/dash_acl_rule.py @@ -0,0 +1,40 @@ +from dump.helper import create_template_dict +from dump.match_infra import MatchRequest +from swsscommon.swsscommon import SonicDBConfig +from dash_api.acl_rule_pb2 import AclRule +from .executor import Executor + + +APPL_DB_SEPARATOR = SonicDBConfig.getSeparator("APPL_DB") + + +class Dash_Acl_Rule(Executor): + """ + Debug Dump Plugin for DASH ACL Rule + """ + ARG_NAME = "dash_acl_rule_name" + + def __init__(self, match_engine=None): + super().__init__(match_engine) + self.is_dash_object = True + + def get_all_args(self, ns=""): + req = MatchRequest(db="APPL_DB", table="DASH_ACL_RULE_TABLE", key_pattern="*", ns=ns) + ret = self.match_engine.fetch(req) + appliance_tables = ret["keys"] + return [key.split(APPL_DB_SEPARATOR, 1)[1] for key in appliance_tables] + + def execute(self, params): + self.ret_temp = create_template_dict(dbs=["APPL_DB"]) + dash_acl_rule_table_name = params[self.ARG_NAME] + self.ns = params["namespace"] + self.init_dash_acl_rule_table_appl_info(dash_acl_rule_table_name) + return self.ret_temp + + def init_dash_acl_rule_table_appl_info(self, dash_acl_rule_table_name): + req = MatchRequest(db="APPL_DB", table="DASH_ACL_RULE_TABLE", key_pattern=dash_acl_rule_table_name, ns=self.ns) + ret = self.match_engine.fetch(req) + self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) + + def return_pb2_obj(self): + return AclRule() diff --git a/dump/plugins/dash_appliance.py b/dump/plugins/dash_appliance.py new file mode 100644 index 0000000000..a0c299321b --- /dev/null +++ b/dump/plugins/dash_appliance.py @@ -0,0 +1,40 @@ +from dump.helper import create_template_dict +from dump.match_infra import MatchRequest +from swsscommon.swsscommon import SonicDBConfig +from dash_api.appliance_pb2 import Appliance +from .executor import Executor + + +APPL_DB_SEPARATOR = SonicDBConfig.getSeparator("APPL_DB") + + +class Dash_Appliance(Executor): + """ + Debug Dump Plugin for DASH Appliance + """ + ARG_NAME = "dash_appliance_table" + + def __init__(self, match_engine=None): + super().__init__(match_engine) + self.is_dash_object = True + + def get_all_args(self, ns=""): + req = MatchRequest(db="APPL_DB", table="DASH_APPLIANCE_TABLE", key_pattern="*", ns=ns) + ret = self.match_engine.fetch(req) + appliance_tables = ret["keys"] + return [key.split(APPL_DB_SEPARATOR)[-1] for key in appliance_tables] + + def execute(self, params): + self.ret_temp = create_template_dict(dbs=["APPL_DB"]) + dash_app_table_name = params[self.ARG_NAME] + self.ns = params["namespace"] + self.init_dash_app_table_appl_info(dash_app_table_name) + return self.ret_temp + + def init_dash_app_table_appl_info(self, dash_app_table_name): + req = MatchRequest(db="APPL_DB", table="DASH_APPLIANCE_TABLE", key_pattern=dash_app_table_name, ns=self.ns) + ret = self.match_engine.fetch(req) + self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) + + def return_pb2_obj(self): + return Appliance() diff --git a/dump/plugins/dash_eni.py b/dump/plugins/dash_eni.py new file mode 100644 index 0000000000..3fdc2e6456 --- /dev/null +++ b/dump/plugins/dash_eni.py @@ -0,0 +1,91 @@ +from dump.helper import create_template_dict +from dump.match_infra import MatchRequest +from swsscommon.swsscommon import SonicDBConfig +from dash_api.eni_pb2 import Eni +from dash_api.vnet_pb2 import Vnet +from .executor import Executor + + +APPL_DB_SEPARATOR = SonicDBConfig.getSeparator("APPL_DB") + + +class Dash_Eni(Executor): + """ + Debug Dump Plugin for DASH VNET Mapping + """ + ARG_NAME = "dash_eni_value" + + def __init__(self, match_engine=None): + super().__init__(match_engine) + self.is_dash_object = True + self.vnet = None + self.underlay_ip = None + self.eni_oid = None + + def get_all_args(self, ns=""): + req = MatchRequest(db="APPL_DB", table="DASH_ENI_TABLE", key_pattern="*", ns=ns) + self.ns = ns + ret = self.match_engine.fetch(req) + appliance_tables = ret["keys"] + return [key.split(APPL_DB_SEPARATOR)[-1] for key in appliance_tables] + + def execute(self, params): + self.ret_temp = create_template_dict(dbs=["APPL_DB", "ASIC_DB"]) + dash_eni_table_name = params[self.ARG_NAME] + self.ns = params["namespace"] + self.init_dash_eni_table_appl_info(dash_eni_table_name) + self.init_dash_eni_table_asic_info() + return self.ret_temp + + def init_dash_eni_table_appl_info(self, dash_eni_table_name): + req = MatchRequest(db="APPL_DB", table="DASH_ENI_TABLE", + key_pattern=dash_eni_table_name, + return_fields=["vnet", "underlay_ip"], + ns=self.ns, pb=Eni()) + ret = self.match_engine.fetch(req) + self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) + if (len(ret['keys'])): + self.vnet = ret['return_values'][ret['keys'][0]]['vnet'] + self.underlay_ip = ret['return_values'][ret['keys'][0]]['underlay_ip'] + + def init_dash_eni_table_asic_info(self): + if not self.vnet: + return + self.ret_temp["ASIC_DB"]["tables_not_found"].append("ASIC_STATE:SAI_OBJECT_TYPE_ENI") + req = MatchRequest(db="APPL_DB", table="DASH_VNET_TABLE", + key_pattern=self.vnet, return_fields=["vni"], + ns=self.ns, pb=Vnet()) + ret = self.match_engine.fetch(req) + if not (len(ret['keys'])): + return + vni = ret['return_values'][ret['keys'][0]]['vni'] + req = MatchRequest(db="ASIC_DB", table="ASIC_STATE:SAI_OBJECT_TYPE_VNET", + key_pattern="*", field="SAI_VNET_ATTR_VNI", + value=str(vni), ns=self.ns) + ret = self.match_engine.fetch(req) + if not (len(ret['keys'])): + return + oid = ret['keys'][0] + oid_key = "oid" + oid.split("oid")[-1] + req = MatchRequest(db="ASIC_DB", table="ASIC_STATE", + key_pattern="SAI_OBJECT_TYPE_ENI:*", + field="SAI_ENI_ATTR_VNET_ID", + return_fields=["SAI_ENI_ATTR_VM_UNDERLAY_DIP"], + value=str(oid_key), ns=self.ns) + ret = self.match_engine.fetch(req) + filtered_keys = [] + eni_key = None + if not ret["error"] and len(ret['keys']) > 0: + for key in ret["keys"]: + underlay_dip = ret.get("return_values", {}).get(key, {}).get("SAI_ENI_ATTR_VM_UNDERLAY_DIP", "") + if underlay_dip == self.underlay_ip: + self.ret_temp["ASIC_DB"]["tables_not_found"].remove("ASIC_STATE:SAI_OBJECT_TYPE_ENI") + filtered_keys.append(key) + eni_key = key + break + self.add_to_ret_template(req.table, req.db, filtered_keys, ret["error"]) + if eni_key: + self.eni_oid = eni_key.split(":")[-1] + + def return_pb2_obj(self): + return Eni() diff --git a/dump/plugins/dash_prefix_tag.py b/dump/plugins/dash_prefix_tag.py new file mode 100644 index 0000000000..8ade929f49 --- /dev/null +++ b/dump/plugins/dash_prefix_tag.py @@ -0,0 +1,40 @@ +from dump.helper import create_template_dict +from dump.match_infra import MatchRequest +from swsscommon.swsscommon import SonicDBConfig +from dash_api.prefix_tag_pb2 import PrefixTag +from .executor import Executor + + +APPL_DB_SEPARATOR = SonicDBConfig.getSeparator("APPL_DB") + + +class Dash_Prefix_Tag(Executor): + """ + Debug Dump Plugin for DASH Prefix + """ + ARG_NAME = "dash_prefix_tag" + + def __init__(self, match_engine=None): + super().__init__(match_engine) + self.is_dash_object = True + + def get_all_args(self, ns=""): + req = MatchRequest(db="APPL_DB", table="DASH_PREFIX_TAG_TABLE", key_pattern="*", ns=ns) + ret = self.match_engine.fetch(req) + appliance_tables = ret["keys"] + return [key.split(APPL_DB_SEPARATOR, 1)[1] for key in appliance_tables] + + def execute(self, params): + self.ret_temp = create_template_dict(dbs=["APPL_DB"]) + dash_prefix_table_name = params[self.ARG_NAME] + self.ns = params["namespace"] + self.init_dash_prefix_tag_table_appl_info(dash_prefix_table_name) + return self.ret_temp + + def init_dash_prefix_tag_table_appl_info(self, dash_prefix_table_name): + req = MatchRequest(db="APPL_DB", table="DASH_PREFIX_TAG_TABLE", key_pattern=dash_prefix_table_name, ns=self.ns) + ret = self.match_engine.fetch(req) + self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) + + def return_pb2_obj(self): + return PrefixTag() diff --git a/dump/plugins/dash_qos.py b/dump/plugins/dash_qos.py new file mode 100644 index 0000000000..28372b7808 --- /dev/null +++ b/dump/plugins/dash_qos.py @@ -0,0 +1,42 @@ +from dump.helper import create_template_dict +from dump.match_infra import MatchRequest +from swsscommon.swsscommon import SonicDBConfig +from dash_api.qos_pb2 import Qos +from .executor import Executor + + +APPL_DB_SEPARATOR = SonicDBConfig.getSeparator("APPL_DB") + + +class Dash_Qos(Executor): + """ + Debug Dump Plugin for DASH VNET Mapping + """ + ARG_NAME = "dash_qos" + + def __init__(self, match_engine=None): + super().__init__(match_engine) + self.is_dash_object = True + + def get_all_args(self, ns=""): + req = MatchRequest(db="APPL_DB", table="DASH_QOS_TABLE", key_pattern="*", ns=ns) + ret = self.match_engine.fetch(req) + appliance_tables = ret["keys"] + return [key.split(APPL_DB_SEPARATOR)[-1] for key in appliance_tables] + + def execute(self, params): + self.ret_temp = create_template_dict(dbs=["APPL_DB"]) + dash_qos_table_name = params[self.ARG_NAME] + self.ns = params["namespace"] + self.init_dash_qos_table_appl_info(dash_qos_table_name) + return self.ret_temp + + def init_dash_qos_table_appl_info(self, dash_qos_table_name): + req = MatchRequest(db="APPL_DB", table="DASH_QOS_TABLE", + key_pattern=dash_qos_table_name, + return_fields=["type"], ns=self.ns) + ret = self.match_engine.fetch(req) + self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) + + def return_pb2_obj(self): + return Qos() diff --git a/dump/plugins/dash_route.py b/dump/plugins/dash_route.py new file mode 100644 index 0000000000..08cbddd584 --- /dev/null +++ b/dump/plugins/dash_route.py @@ -0,0 +1,73 @@ +from dump.helper import create_template_dict +from dump.match_infra import MatchRequest +from swsscommon.swsscommon import SonicDBConfig +from dash_api.route_pb2 import Route +from .executor import Executor +from .dash_eni import Dash_Eni + +APPL_DB_SEPARATOR = SonicDBConfig.getSeparator("APPL_DB") + + +def get_route_pattern(dest, eni_oid): + return "*\"destination\":\"" + dest + "\",\"eni_id\":\"oid:" + eni_oid + "\"*" + + +class Dash_Route(Executor): + """ + Debug Dump Plugin for DASH Route + """ + ARG_NAME = "dash_route_name" + + def __init__(self, match_engine=None): + super().__init__(match_engine) + self.is_dash_object = True + self.dest = None + if match_engine: + self.eni_obj = Dash_Eni(match_engine) + else: + self.eni_obj = None + + def get_all_args(self, ns=""): + req = MatchRequest(db="APPL_DB", table="DASH_ROUTE_TABLE", key_pattern="*", ns=ns) + ret = self.match_engine.fetch(req) + appliance_tables = ret["keys"] + return [key.split(APPL_DB_SEPARATOR, 1)[1] for key in appliance_tables] + + def execute(self, params): + self.ret_temp = create_template_dict(dbs=["APPL_DB", "ASIC_DB"]) + dash_route_table_name = params[self.ARG_NAME] + self.ns = params["namespace"] + self.init_dash_route_table_appl_info(dash_route_table_name) + self.init_dash_route_table_asic_info() + return self.ret_temp + + def init_dash_route_table_appl_info(self, dash_route_table_name): + req = MatchRequest(db="APPL_DB", table="DASH_ROUTE_TABLE", key_pattern=dash_route_table_name, ns=self.ns) + ret = self.match_engine.fetch(req) + if not ret["error"] and len(ret["keys"]) != 0: + split_key = ret["keys"][0].split(":") + self.dest = split_key[-1] + self.eni = split_key[-2] + self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) + + def init_dash_route_table_asic_info(self): + if not self.dest: + return + if not self.eni_obj: + self.eni_obj = Dash_Eni() + params = {Dash_Eni.ARG_NAME: self.eni, "namespace": self.ns} + self.eni_obj.execute(params) + eni_oid = self.eni_obj.eni_oid + if not eni_oid: + self.ret_temp["ASIC_DB"]["tables_not_found"].append("ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_ENTRY") + return + req = MatchRequest(db="ASIC_DB", + table="ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_ENTRY", + key_pattern=get_route_pattern(self.dest, eni_oid), + ns=self.ns) + ret = self.match_engine.fetch(req) + self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) + return + + def return_pb2_obj(self): + return Route() diff --git a/dump/plugins/dash_route_rule.py b/dump/plugins/dash_route_rule.py new file mode 100644 index 0000000000..02466e157f --- /dev/null +++ b/dump/plugins/dash_route_rule.py @@ -0,0 +1,94 @@ +from dump.helper import create_template_dict +from dump.match_infra import MatchRequest +from swsscommon.swsscommon import SonicDBConfig +from dash_api.route_rule_pb2 import RouteRule +from .executor import Executor +from .dash_eni import Dash_Eni +import ipaddress + +APPL_DB_SEPARATOR = SonicDBConfig.getSeparator("APPL_DB") + + +def get_route_rule_pattern(cidr_src_ip, eni_oid, vni, prio): + network = ipaddress.IPv4Network(cidr_src_ip) + ip_address = str(network.network_address) + mask = str(network.netmask) + ret_string = ( + f"*\"eni_id\":\"oid:{eni_oid}\"," + f"\"priority\":\"{prio}\"," + f"\"sip\":\"{ip_address}\"," + f"\"sip_mask\":\"{mask}\"," + f"*\"vni\":\"{vni}\"*") + return ret_string + + +class Dash_Route_Rule(Executor): + """ + Debug Dump Plugin for DASH Route Rule + """ + ARG_NAME = "dash_route_rule_name" + + def __init__(self, match_engine=None): + super().__init__(match_engine) + self.is_dash_object = True + self.src_ip = None + self.eni = None + self.vni = None + if match_engine: + self.eni_obj = Dash_Eni(match_engine) + else: + self.eni_obj = None + + def get_all_args(self, ns=""): + req = MatchRequest(db="APPL_DB", table="DASH_ROUTE_RULE_TABLE", key_pattern="*", ns=ns) + ret = self.match_engine.fetch(req) + route_rule_tables = ret["keys"] + return [key.split(APPL_DB_SEPARATOR, 1)[1] for key in route_rule_tables] + + def execute(self, params): + self.ret_temp = create_template_dict(dbs=["APPL_DB", "ASIC_DB"]) + dash_route_rule_table_name = params[self.ARG_NAME] + self.ns = params["namespace"] + self.init_dash_route_table_appl_info(dash_route_rule_table_name) + self.init_dash_route_table_asic_info() + return self.ret_temp + + def init_dash_route_table_appl_info(self, dash_route_rule_table_name): + req = MatchRequest(db="APPL_DB", + table="DASH_ROUTE_RULE_TABLE", + key_pattern=dash_route_rule_table_name, + return_fields=["priority"], ns=self.ns, pb=RouteRule()) + ret = self.match_engine.fetch(req) + if not ret["error"] and len(ret["keys"]) != 0: + split_key = ret["keys"][0].split(":") + self.src_ip = split_key[-1] + self.vni = split_key[-2] + self.eni = split_key[-3] + self.priority = str(ret['return_values'][ret['keys'][0]]['priority']) + ret = self.match_engine.fetch(req) + self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) + + def init_dash_route_table_asic_info(self): + if not self.src_ip: + return + print(self.eni) + if not self.eni_obj: + self.eni_obj = Dash_Eni() + params = {Dash_Eni.ARG_NAME: self.eni, "namespace": self.ns} + self.eni_obj.execute(params) + eni_oid = self.eni_obj.eni_oid + if not eni_oid: + self.ret_temp["ASIC_DB"]["tables_not_found"].append("ASIC_STATE:SAI_OBJECT_TYPE_INBOUND_ROUTING_ENTRY") + return + req = MatchRequest(db="ASIC_DB", + table="ASIC_STATE:SAI_OBJECT_TYPE_INBOUND_ROUTING_ENTRY", + key_pattern=get_route_rule_pattern(self.src_ip, + eni_oid, + self.vni, + self.priority), + ns=self.ns) + ret = self.match_engine.fetch(req) + self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) + + def return_pb2_obj(self): + return RouteRule() diff --git a/dump/plugins/dash_vnet.py b/dump/plugins/dash_vnet.py new file mode 100644 index 0000000000..1389451347 --- /dev/null +++ b/dump/plugins/dash_vnet.py @@ -0,0 +1,58 @@ +from dump.helper import create_template_dict +from dump.match_infra import MatchRequest +from swsscommon.swsscommon import SonicDBConfig +from dash_api.vnet_pb2 import Vnet +from .executor import Executor + + +APPL_DB_SEPARATOR = SonicDBConfig.getSeparator("APPL_DB") + + +class Dash_Vnet(Executor): + """ + Debug Dump Plugin for DASH VNET + """ + ARG_NAME = "dash_vnet_name" + + def __init__(self, match_engine=None): + super().__init__(match_engine) + self.is_dash_object = True + self.vni = None + + def get_all_args(self, ns=""): + self.ns = ns + req = MatchRequest(db="APPL_DB", table="DASH_VNET_TABLE", key_pattern="*", ns=ns) + ret = self.match_engine.fetch(req) + vnet_tables = ret["keys"] + return [key.split(APPL_DB_SEPARATOR, 1)[1] for key in vnet_tables] + + def execute(self, params): + self.ret_temp = create_template_dict(dbs=["APPL_DB", "ASIC_DB"]) + dash_vnet_table_name = params[self.ARG_NAME] + self.ns = params["namespace"] + self.init_dash_vnet_table_appl_info(dash_vnet_table_name) + self.init_dash_vnet_table_asic_info() + return self.ret_temp + + def init_dash_vnet_table_appl_info(self, dash_vnet_table_name): + req = MatchRequest(db="APPL_DB", + table="DASH_VNET_TABLE", + key_pattern=dash_vnet_table_name, + return_fields=["vni"], ns=self.ns, pb=Vnet()) + ret = self.match_engine.fetch(req) + self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) + if not ret["error"] and len(ret["keys"]) != 0: + self.vni = str(ret['return_values'][ret['keys'][0]]['vni']) + + def init_dash_vnet_table_asic_info(self): + if not self.vni: + return + req = MatchRequest(db="ASIC_DB", + table="ASIC_STATE:SAI_OBJECT_TYPE_VNET", + key_pattern="*", field="SAI_VNET_ATTR_VNI", + value=str(self.vni), ns=self.ns) + ret = self.match_engine.fetch(req) + self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) + + def return_pb2_obj(self): + return Vnet() diff --git a/dump/plugins/dash_vnet_mapping.py b/dump/plugins/dash_vnet_mapping.py new file mode 100644 index 0000000000..14b2b45390 --- /dev/null +++ b/dump/plugins/dash_vnet_mapping.py @@ -0,0 +1,42 @@ +from dump.helper import create_template_dict +from dump.match_infra import MatchRequest +from swsscommon.swsscommon import SonicDBConfig +from dash_api.vnet_mapping_pb2 import VnetMapping +from .executor import Executor + +APPL_DB_SEPARATOR = SonicDBConfig.getSeparator("APPL_DB") + + +class Dash_Vnet_mapping(Executor): + """ + Debug Dump Plugin for DASH VNET Mapping + """ + ARG_NAME = "dash_vnet_mapping" + + def __init__(self, match_engine=None): + super().__init__(match_engine) + self.is_dash_object = True + + def get_all_args(self, ns=""): + req = MatchRequest(db="APPL_DB", table="DASH_VNET_MAPPING_TABLE", key_pattern="*", ns=ns) + ret = self.match_engine.fetch(req) + appliance_tables = ret["keys"] + return [key.split(APPL_DB_SEPARATOR, 1)[1] for key in appliance_tables] + + def execute(self, params): + self.ret_temp = create_template_dict(dbs=["APPL_DB"]) + dash_vnet_mapping_table_name = params[self.ARG_NAME] + self.ns = params["namespace"] + self.init_dash_vnet_mapping_table_appl_info(dash_vnet_mapping_table_name) + return self.ret_temp + + def init_dash_vnet_mapping_table_appl_info(self, dash_vnet_mapping_table_name): + req = MatchRequest(db="APPL_DB", + table="DASH_VNET_MAPPING_TABLE", + key_pattern=dash_vnet_mapping_table_name, + ns=self.ns) + ret = self.match_engine.fetch(req) + self.add_to_ret_template(req.table, req.db, ret["keys"], ret["error"]) + + def return_pb2_obj(self): + return VnetMapping() diff --git a/dump/plugins/executor.py b/dump/plugins/executor.py index 458dcbc63a..f613c2f233 100644 --- a/dump/plugins/executor.py +++ b/dump/plugins/executor.py @@ -25,7 +25,10 @@ def execute(self, params): @abstractmethod def get_all_args(self, ns): pass - + + def return_pb2_obj(self): + return None + def add_to_ret_template(self, table, db, keys, err, add_to_tables_not_found=True): if db not in self.ret_temp: return [] diff --git a/generic_config_updater/change_applier.py b/generic_config_updater/change_applier.py index 8d8d23f87a..b5712d024f 100644 --- a/generic_config_updater/change_applier.py +++ b/generic_config_updater/change_applier.py @@ -9,6 +9,7 @@ from swsscommon.swsscommon import ConfigDBConnector from sonic_py_common import multi_asic from .gu_common import GenericConfigUpdaterError, genericUpdaterLogging +from .gu_common import get_config_db_as_json SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) UPDATER_CONF_FILE = f"{SCRIPT_DIR}/gcu_services_validator.conf.json" @@ -137,7 +138,7 @@ def _report_mismatch(self, run_data, upd_data): str(jsondiff.diff(run_data, upd_data))[0:40])) def apply(self, change): - run_data = self._get_running_config() + run_data = get_config_db_as_json(self.scope) upd_data = prune_empty_table(change.apply(copy.deepcopy(run_data))) upd_keys = defaultdict(dict) @@ -146,7 +147,7 @@ def apply(self, change): ret = self._services_validate(run_data, upd_data, upd_keys) if not ret: - run_data = self._get_running_config() + run_data = get_config_db_as_json(self.scope) self.remove_backend_tables_from_config(upd_data) self.remove_backend_tables_from_config(run_data) if upd_data != run_data: @@ -159,31 +160,3 @@ def apply(self, change): def remove_backend_tables_from_config(self, data): for key in self.backend_tables: data.pop(key, None) - - def _get_running_config(self): - _, fname = tempfile.mkstemp(suffix="_changeApplier") - - if self.scope: - cmd = ['sonic-cfggen', '-d', '--print-data', '-n', self.scope] - else: - cmd = ['sonic-cfggen', '-d', '--print-data'] - - with open(fname, "w") as file: - result = subprocess.Popen(cmd, stdout=file, stderr=subprocess.PIPE, text=True) - _, err = result.communicate() - - return_code = result.returncode - if return_code: - os.remove(fname) - raise GenericConfigUpdaterError( - f"Failed to get running config for scope: {self.scope}," + - f"Return code: {return_code}, Error: {err}") - - run_data = {} - try: - with open(fname, "r") as file: - run_data = json.load(file) - finally: - if os.path.isfile(fname): - os.remove(fname) - return run_data diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index 75a2d03a00..9084a5ee96 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -22,7 +22,7 @@ "spc2": [ "ACS-MSN3800", "Mellanox-SN3800-D112C8", "ACS-MSN3420", "ACS-MSN3700C", "ACS-MSN3700", "Mellanox-SN3800-C64", "Mellanox-SN3800-D100C12S2", "Mellanox-SN3800-D24C52", "Mellanox-SN3800-D28C49S1", "Mellanox-SN3800-D28C50" ], "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "ACS-SN4280", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40","Mellanox-SN4700-O32","Mellanox-SN4700-V64", "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32", "Mellanox-SN4280-O28"], - "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "Mellanox-SN5600-C256X1", "ACS-SN5400" ], + "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "Mellanox-SN5600-C256S1", "ACS-SN5400", "Mellanox-SN5600-C224O8" ], "spc5": ["ACS-SN5640"] }, "broadcom_asics": { diff --git a/generic_config_updater/generic_updater.py b/generic_config_updater/generic_updater.py index 8ce27455bb..e8bb021808 100644 --- a/generic_config_updater/generic_updater.py +++ b/generic_config_updater/generic_updater.py @@ -17,9 +17,11 @@ def extract_scope(path): if not path: - raise Exception("Wrong patch with empty path.") + raise GenericConfigUpdaterError("Wrong patch with empty path.") pointer = jsonpointer.JsonPointer(path) - parts = pointer.parts + + # Re-escapes + parts = [jsonpointer.escape(part) for part in pointer.parts] if not parts: raise GenericConfigUpdaterError("Wrong patch with empty path.") if parts[0].startswith("asic"): diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index 452bad1ee7..7821557e71 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -53,6 +53,28 @@ def __eq__(self, other): return self.patch == other.patch return False + +def get_config_db_as_json(scope=None): + text = get_config_db_as_text(scope=scope) + config_db_json = json.loads(text) + config_db_json.pop("bgpraw", None) + return config_db_json + + +def get_config_db_as_text(scope=None): + if scope is not None and scope != multi_asic.DEFAULT_NAMESPACE: + cmd = ['sonic-cfggen', '-d', '--print-data', '-n', scope] + else: + cmd = ['sonic-cfggen', '-d', '--print-data'] + result = subprocess.Popen(cmd, shell=False, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + text, err = result.communicate() + return_code = result.returncode + if return_code: + raise GenericConfigUpdaterError(f"Failed to get running config for namespace: {scope}," + f" Return code: {return_code}, Error: {err}") + return text + + class ConfigWrapper: def __init__(self, yang_dir=YANG_DIR, scope=multi_asic.DEFAULT_NAMESPACE): self.scope = scope @@ -60,24 +82,10 @@ def __init__(self, yang_dir=YANG_DIR, scope=multi_asic.DEFAULT_NAMESPACE): self.sonic_yang_with_loaded_models = None def get_config_db_as_json(self): - text = self._get_config_db_as_text() - config_db_json = json.loads(text) - config_db_json.pop("bgpraw", None) - return config_db_json + return get_config_db_as_json(self.scope) def _get_config_db_as_text(self): - if self.scope is not None and self.scope != multi_asic.DEFAULT_NAMESPACE: - cmd = ['sonic-cfggen', '-d', '--print-data', '-n', self.scope] - else: - cmd = ['sonic-cfggen', '-d', '--print-data'] - - result = subprocess.Popen(cmd, shell=False, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - text, err = result.communicate() - return_code = result.returncode - if return_code: # non-zero means failure - raise GenericConfigUpdaterError(f"Failed to get running config for namespace: {self.scope}," - f" Return code: {return_code}, Error: {err}") - return text + return get_config_db_as_text(self.scope) def get_sonic_yang_as_json(self): config_db_json = self.get_config_db_as_json() @@ -239,8 +247,7 @@ def validate_lanes(self, config_db): for port in port_to_lanes_map: lanes = port_to_lanes_map[port] for lane in lanes: - # default lane would be 0, it does not need validate duplication. - if lane in existing and lane != '0': + if lane in existing: return False, f"'{lane}' lane is used multiple times in PORT: {set([port, existing[lane]])}" existing[lane] = port return True, None diff --git a/scripts/config_validator.py b/scripts/config_validator.py new file mode 100755 index 0000000000..ee5789e95a --- /dev/null +++ b/scripts/config_validator.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python3 +import json +import argparse +import sonic_yang + +from sonic_py_common import logger + +YANG_MODELS_DIR = "/usr/local/yang-models" +SYSLOG_IDENTIFIER = 'config_validator' + +# Global logger instance +log = logger.Logger(SYSLOG_IDENTIFIER) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('-c', + dest='config', + metavar='config file', + type=str, + required=True, + help='the config file to be validated', + default=None) + + args = parser.parse_args() + config_file = args.config + with open(config_file) as fp: + config = json.load(fp) + # Run yang validation + yang_parser = sonic_yang.SonicYang(YANG_MODELS_DIR) + yang_parser.loadYangModel() + try: + yang_parser.loadData(configdbJson=config) + yang_parser.validate_data_tree() + except sonic_yang.SonicYangException as e: + log.log_error("Yang validation failed: " + str(e)) + raise + if len(yang_parser.tablesWithOutYang): + log.log_error("Tables without yang models: " + str(yang_parser.tablesWithOutYang)) + raise Exception("Tables without yang models: " + str(yang_parser.tablesWithOutYang)) + + +if __name__ == "__main__": + main() diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index fcfce83f64..7f699d6892 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -6,6 +6,7 @@ import sys import traceback import re +import subprocess from sonic_py_common import device_info, logger from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector, SonicDBConfig @@ -1301,6 +1302,34 @@ def migrate(self): version = next_version # Perform common migration ops self.common_migration_ops() + # Perform yang validation + self.validate() + + def validate(self): + config = self.configDB.get_config() + # Fix table key in tuple + for table_name, table in config.items(): + new_table = {} + hit = False + for table_key, table_val in table.items(): + if isinstance(table_key, tuple): + new_key = "|".join(table_key) + new_table[new_key] = table_val + hit = True + else: + new_table[table_key] = table_val + if hit: + config[table_name] = new_table + config_file = "/tmp/validate.json" + with open(config_file, 'w') as fp: + json.dump(config, fp) + process = subprocess.Popen(["config_validator.py", "-c", config_file]) + # Check validation result for unit test + # Check validation result for end to end test + mark_file = "/etc/sonic/mgmt_test_mark" + if os.environ.get("UTILITIES_UNIT_TESTING", "0") == "2" or os.path.exists(mark_file): + ret = process.wait() + assert ret == 0, "Yang validation failed" def main(): try: diff --git a/scripts/express-reboot b/scripts/express-reboot new file mode 120000 index 0000000000..c912fdc7e5 --- /dev/null +++ b/scripts/express-reboot @@ -0,0 +1 @@ +fast-reboot \ No newline at end of file diff --git a/scripts/fast-reboot b/scripts/fast-reboot index b43f604e0d..24b818a25d 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -170,7 +170,7 @@ function init_warm_reboot_states() # If the current running instance was booted up with warm reboot. Then # the current DB contents will likely mark warm reboot is done. # Clear these states so that the next boot up image won't get confused. - if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "fast-reboot" ]]; then + if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "express-reboot" || "$REBOOT_TYPE" = "fast-reboot" ]]; then sonic-db-cli STATE_DB eval " for _, key in ipairs(redis.call('keys', 'WARM_RESTART_TABLE|*')) do redis.call('hdel', key, 'state') @@ -196,8 +196,13 @@ function request_pre_shutdown() debug "Requesting platform reboot pre-check ..." ${DEVPATH}/${PLATFORM}/${PLATFORM_REBOOT_PRE_CHECK} ${REBOOT_TYPE} fi - debug "Requesting pre-shutdown ..." - STATE=$(timeout 5s docker exec syncd /usr/bin/syncd_request_shutdown --pre &> /dev/null; if [[ $? == 124 ]]; then echo "timed out"; fi) + if [[ "$REBOOT_TYPE" = "express-reboot" ]]; then + debug "Requesting express boot pre-shutdown ..." + STATE=$(timeout 5s docker exec syncd /usr/bin/syncd_request_shutdown --pxe &> /dev/null; if [[ $? == 124 ]]; then echo "timed out"; fi) + else + debug "Requesting pre-shutdown ..." + STATE=$(timeout 5s docker exec syncd /usr/bin/syncd_request_shutdown --pre &> /dev/null; if [[ $? == 124 ]]; then echo "timed out"; fi) + fi if [[ x"${STATE}" == x"timed out" ]]; then error "Failed to request pre-shutdown" fi @@ -244,7 +249,7 @@ function backup_database() { debug "Backing up database ..." - if [[ "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "fast-reboot" ]]; then + if [[ "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "express-reboot" || "$REBOOT_TYPE" = "fast-reboot" ]]; then # Advanced reboot: dump state to host disk sonic-db-cli ASIC_DB FLUSHDB > /dev/null sonic-db-cli COUNTERS_DB FLUSHDB > /dev/null @@ -379,27 +384,58 @@ function setup_reboot_variables() { # Kernel and initrd image HWSKU=$(show platform summary --json | python -c 'import sys, json; print(json.load(sys.stdin)["hwsku"])') + CURR_SONIC_IMAGE=$(sonic-installer list | grep "Current: " | cut -d ' ' -f 2) NEXT_SONIC_IMAGE=$(sonic-installer list | grep "Next: " | cut -d ' ' -f 2) IMAGE_PATH="/host/image-${NEXT_SONIC_IMAGE#SONiC-OS-}" + if [ "$NEXT_SONIC_IMAGE" = "$CURR_SONIC_IMAGE" ]; then + if [[ -f ${DEVPATH}/${PLATFORM}/installer.conf ]]; then + . ${DEVPATH}/${PLATFORM}/installer.conf + fi + else + tmp_dir=`mktemp -d` + mount -o ro $IMAGE_PATH/fs.squashfs $tmp_dir + if [[ -f $tmp_dir/${DEVPATH}/${PLATFORM}/installer.conf ]]; then + . $tmp_dir/${DEVPATH}/${PLATFORM}/installer.conf + fi + umount $tmp_dir + rm -rf $tmp_dir + fi + if grep -q aboot_platform= /host/machine.conf; then if is_secureboot; then KERNEL_IMAGE="" BOOT_OPTIONS="SONIC_BOOT_TYPE=${BOOT_TYPE_ARG} secure_boot_enable=1" else KERNEL_IMAGE="$(ls $IMAGE_PATH/boot/vmlinuz-*)" - BOOT_OPTIONS="$(cat "$IMAGE_PATH/kernel-cmdline" | tr '\n' ' ') SONIC_BOOT_TYPE=${BOOT_TYPE_ARG}" + BOOT_OPTIONS="$(cat "$IMAGE_PATH/kernel-cmdline" | tr '\n' ' ') ${KEXEC_LOAD_EXTRA_CMDLINE_LINUX} SONIC_BOOT_TYPE=${BOOT_TYPE_ARG}" fi INITRD=$(echo $KERNEL_IMAGE | sed 's/vmlinuz/initrd.img/g') elif grep -q onie_platform= /host/machine.conf; then if [ -r /host/grub/grub.cfg ]; then KERNEL_OPTIONS=$(cat /host/grub/grub.cfg | sed "/$NEXT_SONIC_IMAGE'/,/}/"'!'"g" | grep linux) KERNEL_IMAGE="/host$(echo $KERNEL_OPTIONS | cut -d ' ' -f 2)" - BOOT_OPTIONS="$(echo $KERNEL_OPTIONS | sed -e 's/\s*linux\s*/BOOT_IMAGE=/') SONIC_BOOT_TYPE=${BOOT_TYPE_ARG}" + BOOT_OPTIONS="$(echo $KERNEL_OPTIONS | sed -e 's/\s*linux\s*/BOOT_IMAGE=/') ${KEXEC_LOAD_EXTRA_CMDLINE_LINUX} SONIC_BOOT_TYPE=${BOOT_TYPE_ARG}" INITRD=$(echo $KERNEL_IMAGE | sed 's/vmlinuz/initrd.img/g') # Handle architectures supporting Device Tree elif [ -f /sys/firmware/devicetree/base/chosen/bootargs ]; then KERNEL_IMAGE="$(ls $IMAGE_PATH/boot/vmlinuz-*)" - BOOT_OPTIONS="$(cat /sys/firmware/devicetree/base/chosen/bootargs | sed 's/.$//') SONIC_BOOT_TYPE=${BOOT_TYPE_ARG}" + # Fetch next_boot variable + SONIC_IMAGE_NAME="$( fw_printenv boot_next | cut -d '=' -f 2- )" + SUFFIX="" + if [[ ${SONIC_IMAGE_NAME} == "run sonic_image_2" ]]; then + SUFFIX="_old" + fi + SONIC_BOOTARGS="$(fw_printenv sonic_bootargs${SUFFIX} | cut -d '=' -f 2- )" + if [[ ! -z "${SONIC_BOOTARGS}" ]]; then + LINUX_BOOTARGS="$( fw_printenv linuxargs${SUFFIX} | cut -d '=' -f 2- )" + BAUDRATE="$( fw_printenv baudrate | cut -d '=' -f 2- )" + BOOT_OPTIONS="$(echo $SONIC_BOOTARGS | sed -e "s/\${baudrate}/$BAUDRATE/g")" + BOOT_OPTIONS="$(echo $BOOT_OPTIONS | sed -e "s@\${linuxargs$SUFFIX}@$LINUX_BOOTARGS@g")" + BOOT_OPTIONS="$(echo $BOOT_OPTIONS | sed -e 's/.$//') ${KEXEC_LOAD_EXTRA_CMDLINE_LINUX} SONIC_BOOT_TYPE=${BOOT_TYPE_ARG}" + else + # Fetch bootargs from device tree of the current image + BOOT_OPTIONS="$(cat /sys/firmware/devicetree/base/chosen/bootargs | sed 's/.$//') ${KEXEC_LOAD_EXTRA_CMDLINE_LINUX} SONIC_BOOT_TYPE=${BOOT_TYPE_ARG}" + fi INITRD=$(echo $KERNEL_IMAGE | sed 's/vmlinuz/initrd.img/g') # If initrd is a U-Boot uImage, remove the uImage header @@ -421,6 +457,12 @@ function setup_reboot_variables() local fstype=$(blkid -o value -s TYPE ${sonic_dev}) BOOT_OPTIONS="${BOOT_OPTIONS} ssd-upgrader-part=${sonic_dev},${fstype}" fi + + if [[ "$sonic_asic_type" == "mellanox" ]]; then + # Set governor to performance to speed up boot process. + # The governor is reset back to kernel default in warmboot-finalizer script. + BOOT_OPTIONS="${BOOT_OPTIONS} cpufreq.default_governor=performance" + fi } function check_docker_exec() @@ -437,7 +479,7 @@ function check_docker_exec() function check_db_integrity() { - if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "fast-reboot" ]]; then + if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "express-reboot" || "$REBOOT_TYPE" = "fast-reboot" ]]; then CHECK_DB_INTEGRITY=0 /usr/local/bin/check_db_integrity.py || CHECK_DB_INTEGRITY=$? if [[ CHECK_DB_INTEGRITY -ne 0 ]]; then @@ -482,7 +524,7 @@ function reboot_pre_check() # Make sure ASIC configuration has not changed between images ASIC_CONFIG_CHECK_SCRIPT="/usr/local/bin/asic_config_check" ASIC_CONFIG_CHECK_SUCCESS=0 - if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" ]]; then + if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "express-reboot" ]]; then ASIC_CONFIG_CHECK_EXIT_CODE=0 ${ASIC_CONFIG_CHECK_SCRIPT} || ASIC_CONFIG_CHECK_EXIT_CODE=$? @@ -566,6 +608,9 @@ function check_conflict_boot_in_fw_update() { "warm-reboot") FW_AU_TASK_FILE_EXP="${FIRMWARE_AU_STATUS_DIR}/warm_fw_au_task" ;; + "express-reboot") + FW_AU_TASK_FILE_EXP="${FIRMWARE_AU_STATUS_DIR}/express_fw_au_task" + ;; esac FW_AU_TASK_FILE=$(compgen -G ${FW_AU_TASK_FILE_REGEX}) || true if [[ -n "${FW_AU_TASK_FILE}" ]] && [[ ! -f "${FW_AU_TASK_FILE_EXP}" ]]; then @@ -593,12 +638,15 @@ if [[ x"${DETACH}" == x"yes" && x"${ALREADY_DETACHED}" == x"" ]]; then exit $? fi +sonic_asic_type=$(sonic-cfggen -y /etc/sonic/sonic_version.yml -v asic_type) +if [[ "$REBOOT_TYPE" = "express-reboot" ]] && [[ "$sonic_asic_type" != "cisco-8000" ]]; then + echo "eXpress Boot is not supported" + exit "${EXIT_FAILURE}" +fi check_conflict_boot_in_fw_update -sonic_asic_type=$(sonic-cfggen -y /etc/sonic/sonic_version.yml -v asic_type) - # Check reboot type supported BOOT_TYPE_ARG="cold" case "$REBOOT_TYPE" in @@ -624,6 +672,12 @@ case "$REBOOT_TYPE" in trap clear_boot EXIT HUP INT QUIT TERM KILL ABRT ALRM config warm_restart enable system ;; + "express-reboot") + check_warm_restart_in_progress + BOOT_TYPE_ARG="express" + trap clear_boot EXIT HUP INT QUIT TERM KILL ABRT ALRM + config warm_restart enable system + ;; *) error "Not supported reboot type: $REBOOT_TYPE" exit "${EXIT_NOT_SUPPORTED}" @@ -672,7 +726,6 @@ if [[ "$sonic_asic_type" == "mellanox" ]]; then fi fi - if is_secureboot && grep -q aboot_machine= /host/machine.conf; then load_aboot_secureboot_kernel else @@ -693,7 +746,7 @@ init_warm_reboot_states setup_control_plane_assistant TEAMD_INCREASE_RETRY_COUNT=0 -if [[ "${REBOOT_TYPE}" = "warm-reboot" || "${REBOOT_TYPE}" = "fastfast-reboot" ]]; then +if [[ "${REBOOT_TYPE}" = "warm-reboot" || "${REBOOT_TYPE}" = "fastfast-reboot" || "$REBOOT_TYPE" = "express-reboot" ]]; then TEAMD_RETRY_COUNT_PROBE_RC=0 /usr/local/bin/teamd_increase_retry_count.py --probe-only || TEAMD_RETRY_COUNT_PROBE_RC=$? if [[ ${TEAMD_RETRY_COUNT_PROBE_RC} -ne 0 ]]; then @@ -708,7 +761,7 @@ if [[ "${REBOOT_TYPE}" = "warm-reboot" || "${REBOOT_TYPE}" = "fastfast-reboot" ] fi fi -if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "fast-reboot" ]]; then +if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "express-reboot" || "$REBOOT_TYPE" = "fast-reboot" ]]; then # Freeze orchagent for warm restart # Ask orchagent_restart_check to try freeze 5 times with interval of 2 seconds, # it is possible that the orchagent is in transient state and no opportunity to freeze @@ -725,7 +778,7 @@ if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" || "$ fi fi -if [[ ( "${REBOOT_TYPE}" = "warm-reboot" || "${REBOOT_TYPE}" = "fastfast-reboot" ) && "${TEAMD_INCREASE_RETRY_COUNT}" -eq 1 ]]; then +if [[ ( "${REBOOT_TYPE}" = "warm-reboot" || "${REBOOT_TYPE}" = "fastfast-reboot" || "${REBOOT_TYPE}" = "express-reboot" ) && "${TEAMD_INCREASE_RETRY_COUNT}" -eq 1 ]]; then /usr/local/bin/teamd_increase_retry_count.py fi @@ -768,6 +821,10 @@ for timer in ${TIMERS}; do debug "Stopped ${timer} ..." done +if [[ "${REBOOT_TYPE}" == "express-reboot" ]]; then + SHUTDOWN_ORDER_FILE="/etc/sonic/warm-reboot_order" +fi + if [[ -f ${SHUTDOWN_ORDER_FILE} ]]; then SERVICES_TO_STOP="$(cat ${SHUTDOWN_ORDER_FILE})" else @@ -804,7 +861,7 @@ for service in ${SERVICES_TO_STOP}; do debug "Stopped ${service}" if [[ "${service}" = "swss" ]]; then - if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" ]]; then + if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "express-reboot" ]]; then # Pre-shutdown syncd initialize_pre_shutdown diff --git a/scripts/generate_dump b/scripts/generate_dump index 38774c4a37..6011344fe3 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -1082,18 +1082,24 @@ save_file() { fi if $do_gzip; then - gz_path="${gz_path}.gz" - tar_path="${tar_path}.gz" - if $NOOP; then + if [ ! -d "$path" ]; then + gz_path="${gz_path}.gz" + tar_path="${tar_path}.gz" + + if $NOOP; then echo "gzip -c $orig_path > $gz_path" - else + else gzip -c $orig_path > $gz_path - fi - else - if $NOOP; then - echo "cp $orig_path $gz_path" + fi else - cp $orig_path $gz_path + gz_path="${gz_path}.tar.gz" + tar_path="${tar_path}.tar.gz" + + if $NOOP; then + echo "tar -czvf $gz_path -C $(dirname $orig_path) $(basename $orig_path)" + else + tar -czvf "$gz_path" -C "$(dirname "$orig_path")" "$(basename "$orig_path")" + fi fi fi @@ -1241,6 +1247,18 @@ collect_mellanox() { fi fi + # collect the sdk dump + local sdk_dbg_folder="/var/log/sdk_dbg" + for file in $(find $sdk_dbg_folder -name "sx_sdk_*") + do + if [[ $file != *.gz ]] + then + save_file $file sai_sdk_dump true + else + save_file $file sai_sdk_dump false + fi + done + # run 'hw-management-generate-dump.sh' script and save the result file HW_DUMP_FILE=/usr/bin/hw-management-generate-dump.sh if [ -f "$HW_DUMP_FILE" ]; then @@ -1613,7 +1631,7 @@ collect_cisco_8000() { } ############################################################################## -# collect_innovium +# collect_marvell_teralynx # Globals: # None # Arguments: @@ -1621,7 +1639,7 @@ collect_cisco_8000() { # Retuens: # None ############################################################################## -collect_innovium() { +collect_marvell_teralynx() { save_cmd "ivmcmd 'show techsupport -i /innovium/show_techsupport_infile'" "show_techsupport_op_ifcs.log" save_cmd "ivmcmd 'show techsupport -i /innovium/show_techsupport_infile_iSAI'" "show_techsupport_op_iSAI.log" } @@ -1676,6 +1694,45 @@ collect_nvidia_bluefield() { fi } +############################################################################### +# Collect Pensando specific information +# Globals: +# MKDIR +# V +# NOOP +# RM +# Arguments: +# None +# Returns: +# None +############################################################################### +collect_pensando() { + trap 'handle_error $? $LINENO' ERR + platform=$(grep 'onie_platform=' /host/machine.conf | cut -d '=' -f 2) + pipeline=`cat /usr/share/sonic/device/${platform}/default_pipeline` + if [ ${pipeline} = "polaris" ]; then + dpu_container_name="polaris" + else + dpu_container_name="dpu" + fi + local dpu_dump_folder="/root/dpu_dump" + $MKDIR $V -p $dpu_dump_folder + if $NOOP; then + echo "docker exec ${dpu_container_name} /nic/tools/collect_techsupport.sh" + else + output=$(docker exec ${dpu_container_name} /nic/tools/collect_techsupport.sh 2>&1) + if echo "${output}" | grep -q "Techsupport collected at"; then + file_path=$(echo "${output}" | grep -oP '(?<=Techsupport collected at ).*') + file_name=$(basename "${file_path}") + copy_from_docker ${dpu_container_name} ${file_path} ${dpu_dump_folder} + save_file ${dpu_dump_folder}/${file_name} ${dpu_container_name}_techsupport false + else + echo "Failed to collect ${dpu_container_name} container techsupport..." + fi + fi + $RM $V -rf $dpu_dump_folder +} + ############################################################################### # Save log file # Globals: @@ -2034,6 +2091,12 @@ main() { fi wait + save_cmd "stpctl all" "stp.log" + save_cmd "show spanning_tree" "stp.show" + save_cmd "show spanning_tree statistics" "stp.stats" + save_cmd "show spanning_tree bpdu_guard" "stp.bg" + save_cmd "show spanning_tree root_guard" "stp.rg" + save_cmd "ps aux" "ps.aux" & save_cmd "top -b -n 1" "top" & save_cmd "free" "free" & @@ -2102,14 +2165,18 @@ main() { collect_nvidia_bluefield fi - if [ "$asic" = "innovium" ]; then - collect_innovium + if [ "$asic" = "marvell-teralynx" ]; then + collect_marvell_teralynx fi if [ "$asic" = "marvell" ]; then collect_marvell fi + if [ "$asic" = "pensando" ]; then + collect_pensando + fi + # 2nd counter snapshot late. Need 2 snapshots to make sense of counters trend. save_counter_snapshot $asic 2 @@ -2168,6 +2235,9 @@ finalize() { else echo "WARNING: gzip operation appears to have failed." >&2 fi + # sometimes gzip takes more than 20 sec to finish, causing file time create validation + # to fail. touching the tarfile created to refresh modify time. + touch ${TARFILE} fi # Invoke the TechSupport Cleanup Hook diff --git a/scripts/route_check.py b/scripts/route_check.py index a1abd3c352..56c845424c 100755 --- a/scripts/route_check.py +++ b/scripts/route_check.py @@ -46,6 +46,7 @@ import signal import traceback import subprocess +import concurrent.futures from ipaddress import ip_network from swsscommon import swsscommon @@ -338,10 +339,18 @@ def is_suppress_fib_pending_enabled(namespace): return state == 'enabled' -def get_frr_routes(namespace): +def fetch_routes(cmd): """ - Read routes from zebra through CLI command - :return frr routes dictionary + Fetch routes using the given command. + """ + output = subprocess.check_output(cmd, text=True) + return json.loads(output) + + +def get_frr_routes_parallel(namespace): + """ + Read routes from zebra through CLI command for IPv4 and IPv6 in parallel + :return combined IPv4 and IPv6 routes dictionary. """ if namespace == multi_asic.DEFAULT_NAMESPACE: v4_route_cmd = ['show', 'ip', 'route', 'json'] @@ -350,12 +359,18 @@ def get_frr_routes(namespace): v4_route_cmd = ['show', 'ip', 'route', '-n', namespace, 'json'] v6_route_cmd = ['show', 'ipv6', 'route', '-n', namespace, 'json'] - output = subprocess.check_output(v4_route_cmd, text=True) - routes = json.loads(output) - output = subprocess.check_output(v6_route_cmd, text=True) - routes.update(json.loads(output)) - print_message(syslog.LOG_DEBUG, "FRR Routes: namespace={}, routes={}".format(namespace, routes)) - return routes + with concurrent.futures.ThreadPoolExecutor() as executor: + future_v4 = executor.submit(fetch_routes, v4_route_cmd) + future_v6 = executor.submit(fetch_routes, v6_route_cmd) + + # Wait for both results to complete + v4_routes = future_v4.result() + v6_routes = future_v6.result() + + # Combine both IPv4 and IPv6 routes + v4_routes.update(v6_routes) + print_message(syslog.LOG_DEBUG, "FRR Routes: namespace={}, routes={}".format(namespace, v4_routes)) + return v4_routes def get_interfaces(namespace): @@ -556,7 +571,7 @@ def check_frr_pending_routes(namespace): retries = FRR_CHECK_RETRIES for i in range(retries): missed_rt = [] - frr_routes = get_frr_routes(namespace) + frr_routes = get_frr_routes_parallel(namespace) for _, entries in frr_routes.items(): for entry in entries: @@ -689,8 +704,9 @@ def _filter_out_neigh_route(routes, neighs): return rt_appl_miss, rt_asic_miss -def check_routes(namespace): +def check_routes_for_namespace(namespace): """ + Process a Single Namespace: The heart of this script which runs the checks. Read APPL-DB & ASIC-DB, the relevant tables for route checking. Checkout routes in ASIC-DB to match APPL-DB, discounting local & @@ -708,98 +724,113 @@ def check_routes(namespace): :return (0, None) on sucess, else (-1, results) where results holds the unjustifiable entries. """ - namespace_list = [] - if namespace is not multi_asic.DEFAULT_NAMESPACE and namespace in multi_asic.get_namespace_list(): - namespace_list.append(namespace) - else: - namespace_list = multi_asic.get_namespace_list() - print_message(syslog.LOG_INFO, "Checking routes for namespaces: ", namespace_list) results = {} - adds = {} - deletes = {} - for namespace in namespace_list: - intf_appl_miss = [] - rt_appl_miss = [] - rt_asic_miss = [] - rt_frr_miss = [] - adds[namespace] = [] - deletes[namespace] = [] + adds = [] + deletes = [] + intf_appl_miss = [] + rt_appl_miss = [] + rt_asic_miss = [] + rt_frr_miss = [] - selector, subs, rt_asic = get_asicdb_routes(namespace) + selector, subs, rt_asic = get_asicdb_routes(namespace) - rt_appl = get_appdb_routes(namespace) - intf_appl = get_interfaces(namespace) + rt_appl = get_appdb_routes(namespace) + intf_appl = get_interfaces(namespace) - # Diff APPL-DB routes & ASIC-DB routes - rt_appl_miss, rt_asic_miss = diff_sorted_lists(rt_appl, rt_asic) + # Diff APPL-DB routes & ASIC-DB routes + rt_appl_miss, rt_asic_miss = diff_sorted_lists(rt_appl, rt_asic) - # Check missed ASIC routes against APPL-DB INTF_TABLE - _, rt_asic_miss = diff_sorted_lists(intf_appl, rt_asic_miss) - rt_asic_miss = filter_out_default_routes(rt_asic_miss) - rt_asic_miss = filter_out_vnet_routes(namespace, rt_asic_miss) - rt_asic_miss = filter_out_standalone_tunnel_routes(namespace, rt_asic_miss) - rt_asic_miss = filter_out_soc_ip_routes(namespace, rt_asic_miss) + # Check missed ASIC routes against APPL-DB INTF_TABLE + _, rt_asic_miss = diff_sorted_lists(intf_appl, rt_asic_miss) + rt_asic_miss = filter_out_default_routes(rt_asic_miss) + rt_asic_miss = filter_out_vnet_routes(namespace, rt_asic_miss) + rt_asic_miss = filter_out_standalone_tunnel_routes(namespace, rt_asic_miss) + rt_asic_miss = filter_out_soc_ip_routes(namespace, rt_asic_miss) + # Check APPL-DB INTF_TABLE with ASIC table route entries + intf_appl_miss, _ = diff_sorted_lists(intf_appl, rt_asic) - # Check APPL-DB INTF_TABLE with ASIC table route entries - intf_appl_miss, _ = diff_sorted_lists(intf_appl, rt_asic) + if rt_appl_miss: + rt_appl_miss = filter_out_local_interfaces(namespace, rt_appl_miss) - if rt_appl_miss: - rt_appl_miss = filter_out_local_interfaces(namespace, rt_appl_miss) + if rt_appl_miss: + rt_appl_miss = filter_out_voq_neigh_routes(namespace, rt_appl_miss) - if rt_appl_miss: - rt_appl_miss = filter_out_voq_neigh_routes(namespace, rt_appl_miss) + # NOTE: On dualtor environment, ignore any route miss for the + # neighbors learned from the vlan subnet. + if rt_appl_miss or rt_asic_miss: + rt_appl_miss, rt_asic_miss = filter_out_vlan_neigh_route_miss(namespace, rt_appl_miss, rt_asic_miss) - # NOTE: On dualtor environment, ignore any route miss for the - # neighbors learned from the vlan subnet. - if rt_appl_miss or rt_asic_miss: - rt_appl_miss, rt_asic_miss = filter_out_vlan_neigh_route_miss(namespace, rt_appl_miss, rt_asic_miss) + if rt_appl_miss or rt_asic_miss: + # Look for subscribe updates for a second + adds, deletes = get_subscribe_updates(selector, subs) - if rt_appl_miss or rt_asic_miss: - # Look for subscribe updates for a second - adds[namespace], deletes[namespace] = get_subscribe_updates(selector, subs) + # Drop all those for which SET received + rt_appl_miss, _ = diff_sorted_lists(rt_appl_miss, adds) - # Drop all those for which SET received - rt_appl_miss, _ = diff_sorted_lists(rt_appl_miss, adds[namespace]) + # Drop all those for which DEL received + rt_asic_miss, _ = diff_sorted_lists(rt_asic_miss, deletes) - # Drop all those for which DEL received - rt_asic_miss, _ = diff_sorted_lists(rt_asic_miss, deletes[namespace]) + if rt_appl_miss: + results["missed_ROUTE_TABLE_routes"] = rt_appl_miss - if rt_appl_miss: - if namespace not in results: - results[namespace] = {} - results[namespace]["missed_ROUTE_TABLE_routes"] = rt_appl_miss + if intf_appl_miss: + results["missed_INTF_TABLE_entries"] = intf_appl_miss - if intf_appl_miss: - if namespace not in results: - results[namespace] = {} - results[namespace]["missed_INTF_TABLE_entries"] = intf_appl_miss + if rt_asic_miss: + results["Unaccounted_ROUTE_ENTRY_TABLE_entries"] = rt_asic_miss - if rt_asic_miss: - if namespace not in results: - results[namespace] = {} - results[namespace]["Unaccounted_ROUTE_ENTRY_TABLE_entries"] = rt_asic_miss + rt_frr_miss = check_frr_pending_routes(namespace) - rt_frr_miss = check_frr_pending_routes(namespace) + if rt_frr_miss: + results["missed_FRR_routes"] = rt_frr_miss - if rt_frr_miss: - if namespace not in results: - results[namespace] = {} - results[namespace]["missed_FRR_routes"] = rt_frr_miss + if results: + if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: + print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR{} \ + but all routes in APPL_DB and ASIC_DB are in sync".format(namespace)) + if is_suppress_fib_pending_enabled(namespace): + mitigate_installed_not_offloaded_frr_routes(namespace, rt_frr_miss, rt_appl) + + return results, adds, deletes - if results: - if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: - print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR{} \ - but all routes in APPL_DB and ASIC_DB are in sync".format(namespace)) - if is_suppress_fib_pending_enabled(namespace): - mitigate_installed_not_offloaded_frr_routes(namespace, rt_frr_miss, rt_appl) + +def check_routes(namespace): + """ + Main function to parallelize route checks across all namespaces. + """ + namespace_list = [] + if namespace is not multi_asic.DEFAULT_NAMESPACE and namespace in multi_asic.get_namespace_list(): + namespace_list.append(namespace) + else: + namespace_list = multi_asic.get_namespace_list() + print_message(syslog.LOG_INFO, "Checking routes for namespaces: ", namespace_list) + + results = {} + all_adds = {} + all_deletes = {} + + # Use ThreadPoolExecutor to parallelize the check for each namespace + with concurrent.futures.ThreadPoolExecutor() as executor: + futures = {executor.submit(check_routes_for_namespace, ns): ns for ns in namespace_list} + + for future in concurrent.futures.as_completed(futures): + ns = futures[future] + try: + result, adds, deletes = future.result() + if result: + results[ns] = result + all_adds[ns] = adds + all_deletes[ns] = deletes + except Exception as e: + print_message(syslog.LOG_ERR, "Error processing namespace {}: {}".format(ns, e)) if results: print_message(syslog.LOG_WARNING, "Failure results: {", json.dumps(results, indent=4), "}") print_message(syslog.LOG_WARNING, "Failed. Look at reported mismatches above") - print_message(syslog.LOG_WARNING, "add: ", json.dumps(adds, indent=4)) - print_message(syslog.LOG_WARNING, "del: ", json.dumps(deletes, indent=4)) + print_message(syslog.LOG_WARNING, "add: ", json.dumps(all_adds, indent=4)) + print_message(syslog.LOG_WARNING, "del: ", json.dumps(all_deletes, indent=4)) return -1, results else: print_message(syslog.LOG_INFO, "All good!") @@ -862,6 +893,5 @@ def main(): return ret, res - if __name__ == "__main__": sys.exit(main()[0]) diff --git a/scripts/vnet_route_check.py b/scripts/vnet_route_check.py index d925427d40..c747bf7efb 100755 --- a/scripts/vnet_route_check.py +++ b/scripts/vnet_route_check.py @@ -74,7 +74,7 @@ def print_message(lvl, *args): def check_vnet_cfg(): ''' Returns True if VNET is configured in APP_DB or False if no VNET configuration. ''' - db = swsscommon.DBConnector('APPL_DB', 0) + db = swsscommon.DBConnector('APPL_DB', 0, True) vnet_db_keys = swsscommon.Table(db, 'VNET_TABLE').getKeys() @@ -85,7 +85,7 @@ def get_vnet_intfs(): ''' Returns dictionary of VNETs and related VNET interfaces. Format: { : [ ] } ''' - db = swsscommon.DBConnector('APPL_DB', 0) + db = swsscommon.DBConnector('APPL_DB', 0, True) intfs_table = swsscommon.Table(db, 'INTF_TABLE') intfs_keys = swsscommon.Table(db, 'INTF_TABLE').getKeys() @@ -109,7 +109,7 @@ def get_all_rifs_oids(): ''' Returns dictionary of all router interfaces and their OIDs. Format: { : } ''' - db = swsscommon.DBConnector('COUNTERS_DB', 0) + db = swsscommon.DBConnector('COUNTERS_DB', 0, True) rif_table = swsscommon.Table(db, 'COUNTERS_RIF_NAME_MAP') rif_name_oid_map = dict(rif_table.get('')[1]) @@ -140,7 +140,7 @@ def get_vrf_entries(): ''' Returns dictionary of VNET interfaces and corresponding VRF OIDs. Format: { : } ''' - db = swsscommon.DBConnector('ASIC_DB', 0) + db = swsscommon.DBConnector('ASIC_DB', 0, True) rif_table = swsscommon.Table(db, 'ASIC_STATE') vnet_rifs_oids = get_vnet_rifs_oids() @@ -162,7 +162,7 @@ def filter_out_vnet_ip2me_routes(vnet_routes): ''' Filters out IP2ME routes from the provided dictionary with VNET routes Format: { : { 'routes': [ ], 'vrf_oid': } } ''' - db = swsscommon.DBConnector('APPL_DB', 0) + db = swsscommon.DBConnector('APPL_DB', 0, True) all_rifs_db_keys = swsscommon.Table(db, 'INTF_TABLE').getKeys() vnet_intfs = get_vnet_intfs() @@ -198,7 +198,7 @@ def get_vnet_routes_from_app_db(): ''' Returns dictionary of VNET routes configured per each VNET in APP_DB. Format: { : { 'routes': [ ], 'vrf_oid': } } ''' - db = swsscommon.DBConnector('APPL_DB', 0) + db = swsscommon.DBConnector('APPL_DB', 0, True) vnet_intfs = get_vnet_intfs() vnet_vrfs = get_vrf_entries() @@ -245,7 +245,7 @@ def get_vnet_routes_from_asic_db(): ''' Returns dictionary of VNET routes configured per each VNET in ASIC_DB. Format: { : { 'routes': [ ], 'vrf_oid': } } ''' - db = swsscommon.DBConnector('ASIC_DB', 0) + db = swsscommon.DBConnector('ASIC_DB', 0, True) tbl = swsscommon.Table(db, 'ASIC_STATE') @@ -363,7 +363,7 @@ def main(): # Don't run VNET routes consistancy logic if there is no VNET configuration if not check_vnet_cfg(): return rc - asic_db = swsscommon.DBConnector('ASIC_DB', 0) + asic_db = swsscommon.DBConnector('ASIC_DB', 0, True) virtual_router = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER') if virtual_router.getKeys() != []: global default_vrf_oid diff --git a/setup.py b/setup.py index dc5fa4a9b4..4a11624a87 100644 --- a/setup.py +++ b/setup.py @@ -116,6 +116,7 @@ 'scripts/buffershow', 'scripts/coredump-compress', 'scripts/configlet', + 'scripts/config_validator.py', 'scripts/db_migrator.py', 'scripts/decode-syseeprom', 'scripts/dropcheck', @@ -127,6 +128,7 @@ 'scripts/dump_nat_entries.py', 'scripts/debug_voq_chassis_packet_drops.sh', 'scripts/ecnconfig', + 'scripts/express-reboot', 'scripts/fabricstat', 'scripts/fanshow', 'scripts/fast-reboot', @@ -237,6 +239,7 @@ 'filelock>=3.0.12', 'enlighten>=1.8.0', 'ipaddress>=1.0.23', + 'protobuf', 'jinja2>=2.11.3', 'jsondiff>=1.2.0', 'jsonpatch>=1.32.0', diff --git a/sfputil/main.py b/sfputil/main.py index 58c6855abe..80a5bcb3f2 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -1591,7 +1591,9 @@ def download_firmware(port_name, filepath): 1 = Hitless Reset to Inactive Image (Default)\n \ 2 = Attempt non-hitless Reset to Running Image\n \ 3 = Attempt Hitless Reset to Running Image\n") -def run(port_name, mode): +@click.option('--delay', metavar='', type=click.IntRange(0, 10), default=5, + help="Delay time before updating firmware information to STATE_DB") +def run(port_name, mode, delay): """Run the firmware with default mode=0""" if is_port_type_rj45(port_name): @@ -1607,6 +1609,11 @@ def run(port_name, mode): click.echo('Failed to run firmware in mode={}! CDB status: {}'.format(mode, status)) sys.exit(EXIT_FAIL) + # The cable firmware can be still under initialization immediately after run_firmware + # We put a delay here to avoid potential error message in accessing the cable EEPROM + if delay: + time.sleep(delay) + update_firmware_info_to_state_db(port_name) click.echo("Firmware run in mode={} success".format(mode)) diff --git a/show/main.py b/show/main.py index b7e75b24cf..971a3bc1cc 100755 --- a/show/main.py +++ b/show/main.py @@ -67,6 +67,7 @@ from . import syslog from . import dns from . import bgp_cli +from . import stp # Global Variables PLATFORM_JSON = 'platform.json' @@ -318,6 +319,7 @@ def cli(ctx): cli.add_command(system_health.system_health) cli.add_command(warm_restart.warm_restart) cli.add_command(dns.dns) +cli.add_command(stp.spanning_tree) # syslog module cli.add_command(syslog.syslog) @@ -1143,6 +1145,111 @@ def route_map(route_map_name, verbose): cmd[-1] += ' {}'.format(route_map_name) run_command(cmd, display_cmd=verbose) + +# +# 'vrrp' group ("show vrrp ...") +# +@cli.group(cls=clicommon.AliasedGroup, invoke_without_command="true") +@click.pass_context +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def vrrp(ctx, verbose): + """Show vrrp commands""" + if ctx.invoked_subcommand is not None: + return + + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', 'show vrrp'] + run_command(cmd, display_cmd=verbose) + + +# 'interface' command +@vrrp.command('interface') +@click.pass_context +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrid', metavar='', required=False) +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def vrrp_interface(ctx, interface_name, vrid, verbose): + """show vrrp interface """ + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', 'show vrrp'] + if vrid is not None: + cmd[-1] += ' interface {} {}'.format(interface_name, vrid) + else: + cmd[-1] += ' interface {}'.format(interface_name) + run_command(cmd, display_cmd=verbose) + + +# 'vrid' command +@vrrp.command('vrid') +@click.pass_context +@click.argument('vrid', metavar='', required=True) +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def vrrp_vrid(ctx, vrid, verbose): + """show vrrp vrid """ + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', 'show vrrp {}'.format(vrid)] + run_command(cmd, display_cmd=verbose) + + +# 'summary' command +@vrrp.command('summary') +@click.pass_context +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def vrrp_summary(ctx, verbose): + """show vrrp summary""" + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', 'show vrrp summary'] + run_command(cmd, display_cmd=verbose) + + +# +# 'vrrp6' group ("show vrrp6 ...") +# +@cli.group(cls=clicommon.AliasedGroup, invoke_without_command="true") +@click.pass_context +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def vrrp6(ctx, verbose): + """Show vrrp6 commands""" + if ctx.invoked_subcommand is not None: + return + + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', 'show vrrp6'] + run_command(cmd, display_cmd=verbose) + + +# 'interface' command +@vrrp6.command('interface') +@click.pass_context +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrid', metavar='', required=False) +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def vrrp6_interface(ctx, interface_name, vrid, verbose): + """show vrrp6 interface """ + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', 'show vrrp6'] + if vrid is not None: + cmd[-1] += ' interface {} {}'.format(interface_name, vrid) + else: + cmd[-1] += ' interface {}'.format(interface_name) + run_command(cmd, display_cmd=verbose) + + +# 'vrid' command +@vrrp6.command('vrid') +@click.pass_context +@click.argument('vrid', metavar='', required=True) +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def vrrp6_vrid(ctx, vrid, verbose): + """show vrrp6 vrid """ + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', 'show vrrp6 {}'.format(vrid)] + run_command(cmd, display_cmd=verbose) + + +# 'summary' command +@vrrp6.command('summary') +@click.pass_context +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def vrrp6_summary(ctx, verbose): + """show vrrp6 summary""" + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', 'show vrrp6 summary'] + run_command(cmd, display_cmd=verbose) + + # # 'ip' group ("show ip ...") # @@ -1887,6 +1994,16 @@ def syslog(verbose): click.echo(tabulate(body, header, tablefmt="simple", stralign="left", missingval="")) +# 'spanning-tree' subcommand ("show runningconfiguration spanning_tree") +@runningconfiguration.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def spanning_tree(verbose): + """Show spanning_tree running configuration""" + stp_list = ["STP", "STP_PORT", "STP_VLAN", "STP_VLAN_PORT"] + for key in stp_list: + cmd = ['sudo', 'sonic-cfggen', '-d', '--var-json', key] + run_command(cmd, display_cmd=verbose) + # # 'startupconfiguration' group ("show startupconfiguration ...") # @@ -2303,8 +2420,8 @@ def bmp_neighbor_table(db): values["peer_addr"], values["peer_asn"], values["peer_rd"], - values["peer_port"], - values["local_addr"], + values["remote_port"], + values["local_ip"], values["local_asn"], values["local_port"], values["sent_cap"], @@ -2616,6 +2733,26 @@ def ssh(db): click.echo(tabulate(configuration, headers=hdrs, tablefmt='simple', missingval='')) +# +# 'banner' command group ("show banner ...") +# +@cli.group('banner', invoke_without_command=True) +@clicommon.pass_db +def banner(db): + """Show banner messages""" + + banner_table = db.cfgdb.get_entry('BANNER_MESSAGE', 'global') + + hdrs = ['state', 'login', 'motd', 'logout'] + data = [] + + for key in hdrs: + data.append(banner_table.get(key, '').replace('\\n', '\n')) + + messages = [data] + click.echo(tabulate(messages, headers=hdrs, tablefmt='simple', missingval='')) + + # Load plugins and register them helper = util_base.UtilHelper() helper.load_and_register_plugins(plugins, cli) diff --git a/show/plugins/mlnx.py b/show/plugins/mlnx.py index 04d6a78b0a..09eacbc70a 100644 --- a/show/plugins/mlnx.py +++ b/show/plugins/mlnx.py @@ -132,20 +132,6 @@ def is_issu_status_enabled(): return issu_enabled - -@mlnx.command('sniffer') -def sniffer_status(): - """ Show sniffer status """ - components = ['sdk'] - env_variable_strings = [ENV_VARIABLE_SX_SNIFFER] - for index in range(len(components)): - enabled = sniffer_status_get(env_variable_strings[index]) - if enabled is True: - click.echo(components[index] + " sniffer is enabled") - else: - click.echo(components[index] + " sniffer is disabled") - - @mlnx.command('issu') def issu_status(): """ Show ISSU status """ diff --git a/show/stp.py b/show/stp.py new file mode 100644 index 0000000000..a64d9764f5 --- /dev/null +++ b/show/stp.py @@ -0,0 +1,403 @@ +import re +import click +# import subprocess +import utilities_common.cli as clicommon +from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector + + +############################################################################## +# 'spanning_tree' group ("show spanning_tree ...") +############################################################################### +# STP show commands:- +# show spanning_tree +# show spanning_tree vlan +# show spanning_tree vlan interface +# show spanning_tree bpdu_guard +# show spanning_tree statistics +# show spanning_tree statistics vlan +# +############################################################################### +g_stp_vlanid = 0 +# +# Utility API's +# + + +def is_stp_docker_running(): + return True +# running_docker = subprocess.check_output('docker ps', shell=True) +# if running_docker.find("docker-stp".encode()) == -1: +# return False +# else: +# return True + + +def connect_to_cfg_db(): + config_db = ConfigDBConnector() + config_db.connect() + return config_db + + +def connect_to_appl_db(): + appl_db = SonicV2Connector(host="127.0.0.1") + appl_db.connect(appl_db.APPL_DB) + return appl_db + + +# Redis DB only supports limiter pattern search wildcards. +# check https://redis.io/commands/KEYS before using this api +# Redis-db uses glob-style patterns not regex +def stp_get_key_from_pattern(db_connect, db, pattern): + keys = db_connect.keys(db, pattern) + if keys: + return keys[0] + else: + return None + + +# get_all doesnt accept regex patterns, it requires exact key +def stp_get_all_from_pattern(db_connect, db, pattern): + key = stp_get_key_from_pattern(db_connect, db, pattern) + if key: + entry = db_connect.get_all(db, key) + return entry + + +def stp_is_port_fast_enabled(ifname): + app_db_entry = stp_get_all_from_pattern( + g_stp_appl_db, g_stp_appl_db.APPL_DB, "*STP_PORT_TABLE:{}".format(ifname)) + if (not app_db_entry or not ('port_fast' in app_db_entry) or app_db_entry['port_fast'] == 'no'): + return False + return True + + +def stp_is_uplink_fast_enabled(ifname): + entry = g_stp_cfg_db.get_entry("STP_PORT", ifname) + if (entry and ('uplink_fast' in entry) and entry['uplink_fast'] == 'true'): + return True + return False + + +def stp_get_entry_from_vlan_tb(db, vlanid): + entry = stp_get_all_from_pattern(db, db.APPL_DB, "*STP_VLAN_TABLE:Vlan{}".format(vlanid)) + if not entry: + return entry + + if 'bridge_id' not in entry: + entry['bridge_id'] = 'NA' + if 'max_age' not in entry: + entry['max_age'] = '0' + if 'hello_time' not in entry: + entry['hello_time'] = '0' + if 'forward_delay' not in entry: + entry['forward_delay'] = '0' + if 'hold_time' not in entry: + entry['hold_time'] = '0' + if 'last_topology_change' not in entry: + entry['last_topology_change'] = '0' + if 'topology_change_count' not in entry: + entry['topology_change_count'] = '0' + if 'root_bridge_id' not in entry: + entry['root_bridge_id'] = 'NA' + if 'root_path_cost' not in entry: + entry['root_path_cost'] = '0' + if 'desig_bridge_id' not in entry: + entry['desig_bridge_id'] = 'NA' + if 'root_port' not in entry: + entry['root_port'] = 'NA' + if 'root_max_age' not in entry: + entry['root_max_age'] = '0' + if 'root_hello_time' not in entry: + entry['root_hello_time'] = '0' + if 'root_forward_delay' not in entry: + entry['root_forward_delay'] = '0' + if 'stp_instance' not in entry: + entry['stp_instance'] = '65535' + + return entry + + +def stp_get_entry_from_vlan_intf_tb(db, vlanid, ifname): + entry = stp_get_all_from_pattern(db, db.APPL_DB, "*STP_VLAN_PORT_TABLE:Vlan{}:{}".format(vlanid, ifname)) + if not entry: + return entry + + if 'port_num' not in entry: + entry['port_num'] = 'NA' + if 'priority' not in entry: + entry['priority'] = '0' + if 'path_cost' not in entry: + entry['path_cost'] = '0' + if 'root_guard' not in entry: + entry['root_guard'] = 'NA' + if 'bpdu_guard' not in entry: + entry['bpdu_guard'] = 'NA' + if 'port_state' not in entry: + entry['port_state'] = 'NA' + if 'desig_cost' not in entry: + entry['desig_cost'] = '0' + if 'desig_root' not in entry: + entry['desig_root'] = 'NA' + if 'desig_bridge' not in entry: + entry['desig_bridge'] = 'NA' + + return entry + + +# +# This group houses Spanning_tree commands and subgroups +@click.group(cls=clicommon.AliasedGroup, invoke_without_command=True) +@click.pass_context +def spanning_tree(ctx): + """Show spanning_tree commands""" + global g_stp_appl_db + global g_stp_cfg_db + + if not is_stp_docker_running(): + ctx.fail("STP docker is not running") + + g_stp_appl_db = connect_to_appl_db() + g_stp_cfg_db = connect_to_cfg_db() + + global_cfg = g_stp_cfg_db.get_entry("STP", "GLOBAL") + if not global_cfg: + click.echo("Spanning-tree is not configured") + return + + global g_stp_mode + if 'pvst' == global_cfg['mode']: + g_stp_mode = 'PVST' + + if ctx.invoked_subcommand is None: + keys = g_stp_appl_db.keys(g_stp_appl_db.APPL_DB, "*STP_VLAN_TABLE:Vlan*") + if not keys: + return + vlan_list = [] + for key in keys: + result = re.search('.STP_VLAN_TABLE:Vlan(.*)', key) + vlanid = result.group(1) + vlan_list.append(int(vlanid)) + vlan_list.sort() + for vlanid in vlan_list: + ctx.invoke(show_stp_vlan, vlanid=vlanid) + + +@spanning_tree.group('vlan', cls=clicommon.AliasedGroup, invoke_without_command=True) +@click.argument('vlanid', metavar='', required=True, type=int) +@click.pass_context +def show_stp_vlan(ctx, vlanid): + """Show spanning_tree vlan information""" + global g_stp_vlanid + g_stp_vlanid = vlanid + + vlan_tb_entry = stp_get_entry_from_vlan_tb(g_stp_appl_db, g_stp_vlanid) + if not vlan_tb_entry: + return + + global g_stp_mode + if g_stp_mode: + click.echo("Spanning-tree Mode: {}".format(g_stp_mode)) + # reset so we dont print again + g_stp_mode = '' + + click.echo("") + click.echo("VLAN {} - STP instance {}".format(g_stp_vlanid, vlan_tb_entry['stp_instance'])) + click.echo("--------------------------------------------------------------------") + click.echo("STP Bridge Parameters:") + + click.echo("{:17}{:7}{:7}{:7}{:6}{:13}{}".format( + "Bridge", "Bridge", "Bridge", "Bridge", "Hold", "LastTopology", "Topology")) + click.echo("{:17}{:7}{:7}{:7}{:6}{:13}{}".format( + "Identifier", "MaxAge", "Hello", "FwdDly", "Time", "Change", "Change")) + click.echo("{:17}{:7}{:7}{:7}{:6}{:13}{}".format("hex", "sec", "sec", "sec", "sec", "sec", "cnt")) + click.echo("{:17}{:7}{:7}{:7}{:6}{:13}{}".format( + vlan_tb_entry['bridge_id'], + vlan_tb_entry['max_age'], + vlan_tb_entry['hello_time'], + vlan_tb_entry['forward_delay'], + vlan_tb_entry['hold_time'], + vlan_tb_entry['last_topology_change'], + vlan_tb_entry['topology_change_count'])) + + click.echo("") + click.echo("{:17}{:10}{:18}{:19}{:4}{:4}{}".format( + "RootBridge", "RootPath", "DesignatedBridge", "RootPort", "Max", "Hel", "Fwd")) + click.echo("{:17}{:10}{:18}{:19}{:4}{:4}{}".format("Identifier", "Cost", "Identifier", "", "Age", "lo", "Dly")) + click.echo("{:17}{:10}{:18}{:19}{:4}{:4}{}".format("hex", "", "hex", "", "sec", "sec", "sec")) + click.echo("{:17}{:10}{:18}{:19}{:4}{:4}{}".format( + vlan_tb_entry['root_bridge_id'], + vlan_tb_entry['root_path_cost'], + vlan_tb_entry['desig_bridge_id'], + vlan_tb_entry['root_port'], + vlan_tb_entry['root_max_age'], + vlan_tb_entry['root_hello_time'], + vlan_tb_entry['root_forward_delay'])) + + click.echo("") + click.echo("STP Port Parameters:") + click.echo("{:17}{:5}{:10}{:5}{:7}{:14}{:12}{:17}{}".format( + "Port", "Prio", "Path", "Port", "Uplink", "State", "Designated", "Designated", "Designated")) + click.echo("{:17}{:5}{:10}{:5}{:7}{:14}{:12}{:17}{}".format( + "Name", "rity", "Cost", "Fast", "Fast", "", "Cost", "Root", "Bridge")) + if ctx.invoked_subcommand is None: + keys = g_stp_appl_db.keys(g_stp_appl_db.APPL_DB, "*STP_VLAN_PORT_TABLE:Vlan{}:*".format(vlanid)) + if not keys: + return + intf_list = [] + for key in keys: + result = re.search('.STP_VLAN_PORT_TABLE:Vlan{}:(.*)'.format(vlanid), key) + ifname = result.group(1) + intf_list.append(ifname) + eth_list = [ifname[len("Ethernet"):] for ifname in intf_list if ifname.startswith("Ethernet")] + po_list = [ifname[len("PortChannel"):] for ifname in intf_list if ifname.startswith("PortChannel")] + + eth_list.sort() + po_list.sort() + for port_num in eth_list: + ctx.invoke(show_stp_interface, ifname="Ethernet"+str(port_num)) + for port_num in po_list: + ctx.invoke(show_stp_interface, ifname="PortChannel"+port_num) + + +@show_stp_vlan.command('interface') +@click.argument('ifname', metavar='', required=True) +@click.pass_context +def show_stp_interface(ctx, ifname): + """Show spanning_tree vlan interface information""" + + vlan_intf_tb_entry = stp_get_entry_from_vlan_intf_tb(g_stp_appl_db, g_stp_vlanid, ifname) + if not vlan_intf_tb_entry: + return + + click.echo("{:17}{:5}{:10}{:5}{:7}{:14}{:12}{:17}{}".format( + ifname, + vlan_intf_tb_entry['priority'], + vlan_intf_tb_entry['path_cost'], + 'Y' if (stp_is_port_fast_enabled(ifname)) else 'N', + 'Y' if (stp_is_uplink_fast_enabled(ifname)) else 'N', + vlan_intf_tb_entry['port_state'], + vlan_intf_tb_entry['desig_cost'], + vlan_intf_tb_entry['desig_root'], + vlan_intf_tb_entry['desig_bridge'] + )) + + +@spanning_tree.command('bpdu_guard') +@click.pass_context +def show_stp_bpdu_guard(ctx): + """Show spanning_tree bpdu_guard""" + + print_header = 1 + ifname_all = g_stp_cfg_db.get_keys("STP_PORT") + for ifname in ifname_all: + cfg_entry = g_stp_cfg_db.get_entry("STP_PORT", ifname) + if cfg_entry['bpdu_guard'] == 'true' and cfg_entry['enabled'] == 'true': + if print_header: + click.echo("{:17}{:13}{}".format("PortNum", "Shutdown", "Port Shut")) + click.echo("{:17}{:13}{}".format("", "Configured", "due to BPDU guard")) + click.echo("-------------------------------------------") + print_header = 0 + + if cfg_entry['bpdu_guard_do_disable'] == 'true': + disabled = 'No' + keys = g_stp_appl_db.keys(g_stp_appl_db.APPL_DB, "*STP_PORT_TABLE:{}".format(ifname)) + # only 1 key per ifname is expected in BPDU_GUARD_TABLE. + if keys: + appdb_entry = g_stp_appl_db.get_all(g_stp_appl_db.APPL_DB, keys[0]) + if appdb_entry and 'bpdu_guard_shutdown' in appdb_entry: + if appdb_entry['bpdu_guard_shutdown'] == 'yes': + disabled = 'Yes' + click.echo("{:17}{:13}{}".format(ifname, "Yes", disabled)) + else: + click.echo("{:17}{:13}{}".format(ifname, "No", "NA")) + + +@spanning_tree.command('root_guard') +@click.pass_context +def show_stp_root_guard(ctx): + """Show spanning_tree root_guard""" + + print_header = 1 + ifname_all = g_stp_cfg_db.get_keys("STP_PORT") + for ifname in ifname_all: + entry = g_stp_cfg_db.get_entry("STP_PORT", ifname) + if entry['root_guard'] == 'true' and entry['enabled'] == 'true': + if print_header: + global_entry = g_stp_cfg_db.get_entry("STP", "GLOBAL") + click.echo("Root guard timeout: {} secs".format(global_entry['rootguard_timeout'])) + click.echo("") + click.echo("{:17}{:7}{}".format("Port", "VLAN", "Current State")) + click.echo("-------------------------------------------") + print_header = 0 + + state = '' + vlanid = '' + keys = g_stp_appl_db.keys(g_stp_appl_db.APPL_DB, "*STP_VLAN_PORT_TABLE:*:{}".format(ifname)) + if keys: + for key in keys: + entry = g_stp_appl_db.get_all(g_stp_appl_db.APPL_DB, key) + if entry and 'root_guard_timer' in entry: + if entry['root_guard_timer'] == '0': + state = 'Consistent state' + else: + state = 'Inconsistent state ({} seconds left on timer)'.format(entry['root_guard_timer']) + + vlanid = re.search(':Vlan(.*):', key) + if vlanid: + click.echo("{:17}{:7}{}".format(ifname, vlanid.group(1), state)) + else: + click.echo("{:17}{:7}{}".format(ifname, vlanid, state)) + + +@spanning_tree.group('statistics', cls=clicommon.AliasedGroup, invoke_without_command=True) +@click.pass_context +def show_stp_statistics(ctx): + """Show spanning_tree statistics""" + + if ctx.invoked_subcommand is None: + keys = g_stp_appl_db.keys(g_stp_appl_db.APPL_DB, "*STP_VLAN_TABLE:Vlan*") + if not keys: + return + + vlan_list = [] + for key in keys: + result = re.search('.STP_VLAN_TABLE:Vlan(.*)', key) + vlanid = result.group(1) + vlan_list.append(int(vlanid)) + vlan_list.sort() + for vlanid in vlan_list: + ctx.invoke(show_stp_vlan_statistics, vlanid=vlanid) + + +@show_stp_statistics.command('vlan') +@click.argument('vlanid', metavar='', required=True, type=int) +@click.pass_context +def show_stp_vlan_statistics(ctx, vlanid): + """Show spanning_tree statistics vlan""" + + stp_inst_entry = stp_get_all_from_pattern( + g_stp_appl_db, g_stp_appl_db.APPL_DB, "*STP_VLAN_TABLE:Vlan{}".format(vlanid)) + if not stp_inst_entry: + return + + click.echo("VLAN {} - STP instance {}".format(vlanid, stp_inst_entry['stp_instance'])) + click.echo("--------------------------------------------------------------------") + click.echo("{:17}{:15}{:15}{:15}{}".format("PortNum", "BPDU Tx", "BPDU Rx", "TCN Tx", "TCN Rx")) + keys = g_stp_appl_db.keys(g_stp_appl_db.APPL_DB, "*STP_VLAN_PORT_TABLE:Vlan{}:*".format(vlanid)) + if keys: + for key in keys: + result = re.search('.STP_VLAN_PORT_TABLE:Vlan(.*):(.*)', key) + ifname = result.group(2) + entry = g_stp_appl_db.get_all(g_stp_appl_db.APPL_DB, key) + if entry: + if 'bpdu_sent' not in entry: + entry['bpdu_sent'] = '-' + if 'bpdu_received' not in entry: + entry['bpdu_received'] = '-' + if 'tc_sent' not in entry: + entry['tc_sent'] = '-' + if 'tc_received' not in entry: + entry['tc_received'] = '-' + + click.echo("{:17}{:15}{:15}{:15}{}".format( + ifname, entry['bpdu_sent'], entry['bpdu_received'], entry['tc_sent'], entry['tc_received'])) diff --git a/sonic_installer/main.py b/sonic_installer/main.py index c5d3a256f2..d85e3731aa 100644 --- a/sonic_installer/main.py +++ b/sonic_installer/main.py @@ -763,6 +763,7 @@ def cleanup(): "radv", "restapi", "sflow", + "stp", "snmp", "swss", "syncd", diff --git a/sonic_package_manager/manager.py b/sonic_package_manager/manager.py index a052479607..b6a3be50c3 100644 --- a/sonic_package_manager/manager.py +++ b/sonic_package_manager/manager.py @@ -1017,8 +1017,10 @@ def _get_installed_packages_except(self, package: Package) -> Dict[str, Package] def _stop_feature(self, package: Package): self._systemctl_action(package, 'stop') + self._systemctl_action(package, 'disable') def _start_feature(self, package: Package): + self._systemctl_action(package, 'enable') self._systemctl_action(package, 'start') def _systemctl_action(self, package: Package, action: str): diff --git a/ssdutil/main.py b/ssdutil/main.py index 7b6f2c1ca1..460c7f769a 100755 --- a/ssdutil/main.py +++ b/ssdutil/main.py @@ -6,21 +6,61 @@ # try: - import argparse import os import sys + import argparse + import psutil + from blkinfo import BlkDiskInfo from sonic_py_common import device_info, logger except ImportError as e: raise ImportError("%s - required module not found" % str(e)) -DEFAULT_DEVICE="/dev/sda" +DEFAULT_DEVICE = "/dev/sda" SYSLOG_IDENTIFIER = "ssdutil" +DISK_TYPE_SSD = "sata" # Global logger instance log = logger.Logger(SYSLOG_IDENTIFIER) +def get_default_disk(): + """Check default disk""" + default_device = DEFAULT_DEVICE + host_mnt = '/host' + host_partition = None + partitions = psutil.disk_partitions() + + if partitions is None: + return (default_device, None) + + for parts in partitions: + if parts.mountpoint == host_mnt: + host_partition = parts + break + + disk_major = os.major(os.stat(host_partition.device).st_rdev) + filters = { + 'maj:min': '{}:0'.format(disk_major) + } + + myblkd = BlkDiskInfo() + my_filtered_disks = myblkd.get_disks(filters) + + if my_filtered_disks is None: + return (default_device, None) + + json_output = my_filtered_disks[0] + blkdev = json_output['name'] + disk_type = json_output['tran'] + default_device = os.path.join("/dev/", blkdev) + + # Disk Type Support for eMMC devices + disk_type = 'eMMC' if len(disk_type) == 0 and 'mmcblk' in host_partition.device else disk_type # noqa: E501 + + return default_device, disk_type + + def import_ssd_api(diskdev): """ Loads platform specific or generic ssd_util module from source @@ -37,15 +77,16 @@ def import_ssd_api(diskdev): sys.path.append(os.path.abspath(platform_plugins_path)) from ssd_util import SsdUtil except ImportError as e: - log.log_warning("Platform specific SsdUtil module not found. Falling down to the generic implementation") + log.log_warning("Platform specific SsdUtil module not found. Falling down to the generic implementation") # noqa: E501 try: from sonic_platform_base.sonic_storage.ssd import SsdUtil except ImportError as e: - log.log_error("Failed to import default SsdUtil. Error: {}".format(str(e)), True) + log.log_error("Failed to import default SsdUtil. Error: {}".format(str(e)), True) # noqa: E501 raise e return SsdUtil(diskdev) + def is_number(s): try: float(s) @@ -53,6 +94,7 @@ def is_number(s): except ValueError: return False + # ==================== Entry point ==================== def ssdutil(): if os.geteuid() != 0: @@ -60,21 +102,24 @@ def ssdutil(): sys.exit(1) parser = argparse.ArgumentParser() - parser.add_argument("-d", "--device", help="Device name to show health info", default=DEFAULT_DEVICE) - parser.add_argument("-v", "--verbose", action="store_true", default=False, help="Show verbose output (some additional parameters)") - parser.add_argument("-e", "--vendor", action="store_true", default=False, help="Show vendor output (extended output if provided by platform vendor)") + (default_device, disk_type) = get_default_disk() + parser.add_argument("-d", "--device", help="Device name to show health info", default=default_device) # noqa: E501 + parser.add_argument("-v", "--verbose", action="store_true", default=False, help="Show verbose output (some additional parameters)") # noqa: E501 + parser.add_argument("-e", "--vendor", action="store_true", default=False, help="Show vendor output (extended output if provided by platform vendor)") # noqa: E501 args = parser.parse_args() + print("Disk Type : {0}".format(disk_type.upper())) ssd = import_ssd_api(args.device) print("Device Model : {}".format(ssd.get_model())) if args.verbose: print("Firmware : {}".format(ssd.get_firmware())) print("Serial : {}".format(ssd.get_serial())) - print("Health : {}{}".format(ssd.get_health(), "%" if is_number(ssd.get_health()) else "")) - print("Temperature : {}{}".format(ssd.get_temperature(), "C" if is_number(ssd.get_temperature()) else "")) + print("Health : {}{}".format(ssd.get_health(), "%" if is_number(ssd.get_health()) else "")) # noqa: E501 + print("Temperature : {}{}".format(ssd.get_temperature(), "C" if is_number(ssd.get_temperature()) else "")) # noqa: E501 if args.vendor: print(ssd.get_vendor_output()) + if __name__ == '__main__': ssdutil() diff --git a/tests/bmp_input/bmp.json b/tests/bmp_input/bmp.json new file mode 100644 index 0000000000..6f3583f549 --- /dev/null +++ b/tests/bmp_input/bmp.json @@ -0,0 +1,9 @@ +{ + "BMP": { + "table": { + "bgp_neighbor_table": "false", + "bgp_rib_in_table": "false", + "bgp_rib_out_table": "false" + } + } +} diff --git a/tests/bmp_input/bmp_invalid.json b/tests/bmp_input/bmp_invalid.json new file mode 100644 index 0000000000..87a4f937da --- /dev/null +++ b/tests/bmp_input/bmp_invalid.json @@ -0,0 +1,6 @@ +{ + "BMP": { + "table": { + } + } +} diff --git a/tests/config_mlnx_test.py b/tests/config_mlnx_test.py deleted file mode 100644 index 0cf2e117b4..0000000000 --- a/tests/config_mlnx_test.py +++ /dev/null @@ -1,47 +0,0 @@ -import sys -import click -import pytest -import config.plugins.mlnx as config -from unittest.mock import patch, Mock -from click.testing import CliRunner -from utilities_common.db import Db - - -@patch('config.plugins.mlnx.sniffer_env_variable_set', Mock(return_value=False)) -@patch('config.plugins.mlnx.sniffer_filename_generate', Mock(return_value="sdk_file_name")) -class TestConfigMlnx(object): - def setup(self): - print('SETUP') - - - @patch('config.plugins.mlnx.restart_swss', Mock(return_value=0)) - def test_config_sniffer_enable(self): - db = Db() - runner = CliRunner() - result = runner.invoke(config.mlnx.commands["sniffer"].commands["sdk"].commands["enable"],["-y"]) - assert "SDK sniffer is Enabled, recording file is sdk_file_name." in result.output - - @patch('config.plugins.mlnx.restart_swss', Mock(return_value=0)) - def test_config_sniffer_disble(self): - db = Db() - runner = CliRunner() - result = runner.invoke(config.mlnx.commands["sniffer"].commands["sdk"].commands["disable"],["-y"]) - assert "SDK sniffer is Disabled." in result.output - - @patch('config.plugins.mlnx.restart_swss', Mock(return_value=1)) - def test_config_sniffer_enable_fail(self): - db = Db() - runner = CliRunner() - result = runner.invoke(config.mlnx.commands["sniffer"].commands["sdk"].commands["enable"],["-y"]) - assert "SDK sniffer is Enabled, recording file is sdk_file_name." not in result.output - - @patch('config.plugins.mlnx.restart_swss', Mock(return_value=1)) - def test_config_sniffer_disble_fail(self): - db = Db() - runner = CliRunner() - result = runner.invoke(config.mlnx.commands["sniffer"].commands["sdk"].commands["disable"],["-y"]) - assert "SDK sniffer is Disabled." not in result.output - - def teardown(self): - print('TEARDOWN') - diff --git a/tests/config_test.py b/tests/config_test.py index 21eb095789..6763fb7723 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -40,6 +40,9 @@ # Config Reload input Path mock_db_path = os.path.join(test_path, "config_reload_input") +mock_bmp_db_path = os.path.join(test_path, "bmp_input") + + # Load minigraph input Path load_minigraph_input_path = os.path.join(test_path, "load_minigraph_input") load_minigraph_platform_path = os.path.join(load_minigraph_input_path, "platform") @@ -702,6 +705,51 @@ def teardown_class(cls): dbconnector.load_namespace_config() +class TestBMPConfig(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ['UTILITIES_UNIT_TESTING'] = "1" + yield + print("TEARDOWN") + os.environ["UTILITIES_UNIT_TESTING"] = "0" + + @pytest.mark.parametrize("table_name", [ + "bgp-neighbor-table", + "bgp-rib-in-table", + "bgp-rib-out-table" + ]) + @pytest.mark.parametrize("enabled", ["true", "false"]) + @pytest.mark.parametrize("filename", ["bmp_invalid.json", "bmp.json"]) + def test_enable_disable_table( + self, + get_cmd_module, + setup_single_broadcom_asic, + table_name, + enabled, + filename): + (config, show) = get_cmd_module + jsonfile_config = os.path.join(mock_bmp_db_path, filename) + config.DEFAULT_CONFIG_DB_FILE = jsonfile_config + runner = CliRunner() + db = Db() + + # Enable table + result = runner.invoke(config.config.commands["bmp"].commands["enable"], + [table_name], obj=db) + assert result.exit_code == 0 + + # Disable table + result = runner.invoke(config.config.commands["bmp"].commands["disable"], + [table_name], obj=db) + assert result.exit_code == 0 + + # Enable table again + result = runner.invoke(config.config.commands["bmp"].commands["enable"], + [table_name], obj=db) + assert result.exit_code == 0 + + class TestConfigReloadMasic(object): @classmethod def setup_class(cls): @@ -1384,6 +1432,34 @@ def test_reload_yang_config(self, get_cmd_module, assert "\n".join([l.rstrip() for l in result.output.split('\n')]) \ == RELOAD_YANG_CFG_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) + def test_reload_config_fails_yang_validation(self, get_cmd_module, setup_single_broadcom_asic): + with open(self.dummy_cfg_file, 'w') as f: + device_metadata = { + "DEVICE_METADATA": { + "localhost": { + "invalid_hwsku": "some_hwsku" + } + } + } + f.write(json.dumps(device_metadata)) + + with mock.patch( + "utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect) + ): + (config, _) = get_cmd_module + runner = CliRunner() + + result = runner.invoke( + config.config.commands["reload"], + [self.dummy_cfg_file, '-y', '-f']) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code != 0 + assert "fails YANG validation! Error" in result.output + @classmethod def teardown_class(cls): os.environ['UTILITIES_UNIT_TESTING'] = "0" @@ -2771,6 +2847,13 @@ def test_add_loopback_with_invalid_name_adhoc_validation(self): assert result.exit_code != 0 assert "Error: Loopbax1 is invalid, name should have prefix 'Loopback' and suffix '<0-999>'" in result.output + result = runner.invoke(config.config.commands["loopback"].commands["add"], ["Loopback0000"], obj=obj) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Error: Loopback0000 is invalid, name should have prefix 'Loopback' and suffix '<0-999>' and " \ + "should not exceed 15 characters" in result.output + def test_del_nonexistent_loopback_adhoc_validation(self): config.ADHOC_VALIDATION = True runner = CliRunner() @@ -3877,3 +3960,63 @@ def teardown_class(cls): from .mock_tables import mock_single_asic importlib.reload(mock_single_asic) dbconnector.load_database_config() + + +class TestConfigBanner(object): + @classmethod + def setup_class(cls): + print('SETUP') + import config.main + importlib.reload(config.main) + + @patch('utilities_common.cli.run_command', + mock.MagicMock(side_effect=mock_run_command_side_effect)) + def test_banner_state(self): + runner = CliRunner() + obj = {'db': Db().cfgdb} + + result = runner.invoke( + config.config.commands['banner'].commands['state'], + ['enabled'], obj=obj) + + assert result.exit_code == 0 + + @patch('utilities_common.cli.run_command', + mock.MagicMock(side_effect=mock_run_command_side_effect)) + def test_banner_login(self): + runner = CliRunner() + obj = {'db': Db().cfgdb} + + result = runner.invoke( + config.config.commands['banner'].commands['login'], + ['Login message'], obj=obj) + + assert result.exit_code == 0 + + @patch('utilities_common.cli.run_command', + mock.MagicMock(side_effect=mock_run_command_side_effect)) + def test_banner_logout(self): + runner = CliRunner() + obj = {'db': Db().cfgdb} + + result = runner.invoke( + config.config.commands['banner'].commands['logout'], + ['Logout message'], obj=obj) + + assert result.exit_code == 0 + + @patch('utilities_common.cli.run_command', + mock.MagicMock(side_effect=mock_run_command_side_effect)) + def test_banner_motd(self): + runner = CliRunner() + obj = {'db': Db().cfgdb} + + result = runner.invoke( + config.config.commands['banner'].commands['motd'], + ['Motd message'], obj=obj) + + assert result.exit_code == 0 + + @classmethod + def teardown_class(cls): + print('TEARDOWN') diff --git a/tests/db_migrator_input/config_db/cross_branch_upgrade_flex_counters_expected.json b/tests/db_migrator_input/config_db/cross_branch_upgrade_flex_counters_expected.json index 68d08afce3..648d343c0d 100644 --- a/tests/db_migrator_input/config_db/cross_branch_upgrade_flex_counters_expected.json +++ b/tests/db_migrator_input/config_db/cross_branch_upgrade_flex_counters_expected.json @@ -3,14 +3,14 @@ "VERSION": "version_202411_01" }, "FLEX_COUNTER_TABLE|ACL": { - "FLEX_COUNTER_STATUS": "true", + "FLEX_COUNTER_STATUS": "enable", "POLL_INTERVAL": "10000" }, "FLEX_COUNTER_TABLE|QUEUE": { - "FLEX_COUNTER_STATUS": "true", + "FLEX_COUNTER_STATUS": "enable", "POLL_INTERVAL": "10000" }, "FLEX_COUNTER_TABLE|PG_WATERMARK": { - "FLEX_COUNTER_STATUS": "false" + "FLEX_COUNTER_STATUS": "disable" } } diff --git a/tests/db_migrator_input/config_db/cross_branch_upgrade_flex_counters_input.json b/tests/db_migrator_input/config_db/cross_branch_upgrade_flex_counters_input.json index 07ce763683..e2d8d04588 100644 --- a/tests/db_migrator_input/config_db/cross_branch_upgrade_flex_counters_input.json +++ b/tests/db_migrator_input/config_db/cross_branch_upgrade_flex_counters_input.json @@ -3,16 +3,16 @@ "VERSION": "version_1_0_1" }, "FLEX_COUNTER_TABLE|ACL": { - "FLEX_COUNTER_STATUS": "true", + "FLEX_COUNTER_STATUS": "enable", "FLEX_COUNTER_DELAY_STATUS": "true", "POLL_INTERVAL": "10000" }, "FLEX_COUNTER_TABLE|QUEUE": { - "FLEX_COUNTER_STATUS": "true", + "FLEX_COUNTER_STATUS": "enable", "FLEX_COUNTER_DELAY_STATUS": "false", "POLL_INTERVAL": "10000" }, "FLEX_COUNTER_TABLE|PG_WATERMARK": { - "FLEX_COUNTER_STATUS": "false" + "FLEX_COUNTER_STATUS": "disable" } } diff --git a/tests/db_migrator_input/config_db/portchannel-expected.json b/tests/db_migrator_input/config_db/portchannel-expected.json index 2644e5f4e9..f380c75363 100644 --- a/tests/db_migrator_input/config_db/portchannel-expected.json +++ b/tests/db_migrator_input/config_db/portchannel-expected.json @@ -1,28 +1,24 @@ { "PORTCHANNEL|PortChannel0": { "admin_status": "up", - "members@": "Ethernet0,Ethernet4", "min_links": "2", "mtu": "9100", "lacp_key": "auto" }, "PORTCHANNEL|PortChannel1": { "admin_status": "up", - "members@": "Ethernet8,Ethernet12", "min_links": "2", "mtu": "9100", "lacp_key": "auto" }, "PORTCHANNEL|PortChannel0123": { "admin_status": "up", - "members@": "Ethernet16", "min_links": "1", "mtu": "9100", "lacp_key": "auto" }, "PORTCHANNEL|PortChannel0011": { "admin_status": "up", - "members@": "Ethernet20,Ethernet24", "min_links": "2", "mtu": "9100", "lacp_key": "auto" diff --git a/tests/db_migrator_input/config_db/portchannel-input.json b/tests/db_migrator_input/config_db/portchannel-input.json index 753a88601d..43a9fabdb5 100644 --- a/tests/db_migrator_input/config_db/portchannel-input.json +++ b/tests/db_migrator_input/config_db/portchannel-input.json @@ -1,25 +1,21 @@ { "PORTCHANNEL|PortChannel0": { "admin_status": "up", - "members@": "Ethernet0,Ethernet4", "min_links": "2", "mtu": "9100" }, "PORTCHANNEL|PortChannel1": { "admin_status": "up", - "members@": "Ethernet8,Ethernet12", "min_links": "2", "mtu": "9100" }, "PORTCHANNEL|PortChannel0123": { "admin_status": "up", - "members@": "Ethernet16", "min_links": "1", "mtu": "9100" }, "PORTCHANNEL|PortChannel0011": { "admin_status": "up", - "members@": "Ethernet20,Ethernet24", "min_links": "2", "mtu": "9100" }, diff --git a/tests/db_migrator_input/config_db/qos_map_table_expected.json b/tests/db_migrator_input/config_db/qos_map_table_expected.json index 47381ec550..f84c1a900b 100644 --- a/tests/db_migrator_input/config_db/qos_map_table_expected.json +++ b/tests/db_migrator_input/config_db/qos_map_table_expected.json @@ -29,6 +29,14 @@ "pfc_to_queue_map": "AZURE", "tc_to_pg_map": "AZURE", "tc_to_queue_map": "AZURE" - } + }, + "TC_TO_QUEUE_MAP|AZURE": {"0": "0"}, + "TC_TO_PRIORITY_GROUP_MAP|AZURE": {"0": "0"}, + "MAP_PFC_PRIORITY_TO_QUEUE|AZURE": {"0": "0"}, + "DSCP_TO_TC_MAP|AZURE": {"0": "0"}, + "PORT|Ethernet0": {"lanes": "0", "speed": "1000"}, + "PORT|Ethernet92": {"lanes": "92", "speed": "1000"}, + "PORT|Ethernet96": {"lanes": "96", "speed": "1000"}, + "PORT|Ethernet100": {"lanes": "100", "speed": "1000"} } diff --git a/tests/db_migrator_input/config_db/qos_map_table_input.json b/tests/db_migrator_input/config_db/qos_map_table_input.json index c62e293daf..3c288b9534 100644 --- a/tests/db_migrator_input/config_db/qos_map_table_input.json +++ b/tests/db_migrator_input/config_db/qos_map_table_input.json @@ -27,5 +27,13 @@ "pfc_to_queue_map": "AZURE", "tc_to_pg_map": "AZURE", "tc_to_queue_map": "AZURE" - } + }, + "TC_TO_QUEUE_MAP|AZURE": {"0": "0"}, + "TC_TO_PRIORITY_GROUP_MAP|AZURE": {"0": "0"}, + "MAP_PFC_PRIORITY_TO_QUEUE|AZURE": {"0": "0"}, + "DSCP_TO_TC_MAP|AZURE": {"0": "0"}, + "PORT|Ethernet0": {"lanes": "0", "speed": "1000"}, + "PORT|Ethernet92": {"lanes": "92", "speed": "1000"}, + "PORT|Ethernet96": {"lanes": "96", "speed": "1000"}, + "PORT|Ethernet100": {"lanes": "100", "speed": "1000"} } diff --git a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-expected.json b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-expected.json index 5181daa057..b969575c78 100644 --- a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-expected.json +++ b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-expected.json @@ -12,7 +12,7 @@ "profile": "NULL" }, "BUFFER_PG|Ethernet8|3-4": { - "profile": "customized_lossless_profile" + "profile": "customized_ingress_lossless_profile" }, "BUFFER_PG|Ethernet12|0": { "profile": "ingress_lossy_profile" @@ -103,6 +103,11 @@ "BUFFER_PORT_INGRESS_PROFILE_LIST|Ethernet24": { "profile_list": "ingress_lossless_profile,ingress_lossy_profile" }, + "BUFFER_PROFILE|customized_egress_lossless_profile": { + "dynamic_th": "7", + "pool": "egress_lossless_pool", + "size": "0" + }, "BUFFER_PROFILE|egress_lossless_profile": { "dynamic_th": "7", "pool": "egress_lossless_pool", @@ -118,6 +123,11 @@ "pool": "ingress_lossless_pool", "size": "0" }, + "BUFFER_PROFILE|customized_ingress_lossless_profile": { + "dynamic_th": "7", + "pool": "ingress_lossless_pool", + "size": "0" + }, "BUFFER_PROFILE|ingress_lossy_profile": { "dynamic_th": "3", "pool": "ingress_lossy_pool", diff --git a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-input.json b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-input.json index d8deef194f..d3337ccadb 100644 --- a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-input.json +++ b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-input.json @@ -3,7 +3,7 @@ "profile": "NULL" }, "BUFFER_PG|Ethernet8|3-4": { - "profile": "customized_lossless_profile" + "profile": "customized_ingress_lossless_profile" }, "BUFFER_PG|Ethernet12|0": { "profile": "ingress_lossy_profile" @@ -55,6 +55,11 @@ "BUFFER_PORT_INGRESS_PROFILE_LIST|Ethernet24": { "profile_list": "ingress_lossless_profile,ingress_lossy_profile" }, + "BUFFER_PROFILE|customized_egress_lossless_profile": { + "dynamic_th": "7", + "pool": "egress_lossless_pool", + "size": "0" + }, "BUFFER_PROFILE|egress_lossless_profile": { "dynamic_th": "7", "pool": "egress_lossless_pool", @@ -65,6 +70,11 @@ "pool": "egress_lossy_pool", "size": "9216" }, + "BUFFER_PROFILE|customized_ingress_lossless_profile": { + "dynamic_th": "7", + "pool": "ingress_lossless_pool", + "size": "0" + }, "BUFFER_PROFILE|ingress_lossless_profile": { "dynamic_th": "7", "pool": "ingress_lossless_pool", diff --git a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-expected.json b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-expected.json index 278a40bc0a..3572be8b69 100644 --- a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-expected.json +++ b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-expected.json @@ -12,7 +12,7 @@ "profile": "NULL" }, "BUFFER_PG|Ethernet8|3-4": { - "profile": "customized_lossless_profile" + "profile": "customized_ingress_lossless_profile" }, "BUFFER_PG|Ethernet12|0": { "profile": "ingress_lossy_profile" @@ -99,6 +99,11 @@ "BUFFER_PORT_INGRESS_PROFILE_LIST|Ethernet24": { "profile_list": "ingress_lossless_profile" }, + "BUFFER_PROFILE|customized_egress_lossless_profile": { + "dynamic_th": "7", + "pool": "egress_lossless_pool", + "size": "0" + }, "BUFFER_PROFILE|egress_lossless_profile": { "dynamic_th": "7", "pool": "egress_lossless_pool", @@ -109,6 +114,11 @@ "pool": "egress_lossy_pool", "size": "9216" }, + "BUFFER_PROFILE|customized_ingress_lossless_profile": { + "dynamic_th": "7", + "pool": "ingress_lossless_pool", + "size": "0" + }, "BUFFER_PROFILE|ingress_lossless_profile": { "dynamic_th": "7", "pool": "ingress_lossless_pool", diff --git a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-input.json b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-input.json index b3bda32f23..60f4455cad 100644 --- a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-input.json +++ b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-input.json @@ -3,7 +3,7 @@ "profile": "NULL" }, "BUFFER_PG|Ethernet8|3-4": { - "profile": "customized_lossless_profile" + "profile": "customized_ingress_lossless_profile" }, "BUFFER_PG|Ethernet12|0": { "profile": "ingress_lossy_profile" @@ -51,6 +51,11 @@ "BUFFER_PORT_INGRESS_PROFILE_LIST|Ethernet24": { "profile_list": "ingress_lossless_profile" }, + "BUFFER_PROFILE|customized_egress_lossless_profile": { + "dynamic_th": "7", + "pool": "egress_lossless_pool", + "size": "0" + }, "BUFFER_PROFILE|egress_lossless_profile": { "dynamic_th": "7", "pool": "egress_lossless_pool", @@ -61,6 +66,11 @@ "pool": "egress_lossy_pool", "size": "9216" }, + "BUFFER_PROFILE|customized_ingress_lossless_profile": { + "dynamic_th": "7", + "pool": "ingress_lossless_pool", + "size": "0" + }, "BUFFER_PROFILE|ingress_lossless_profile": { "dynamic_th": "7", "pool": "ingress_lossless_pool", diff --git a/tests/dump_input/dash/appl_db.json b/tests/dump_input/dash/appl_db.json new file mode 100644 index 0000000000..8dd7745e8e --- /dev/null +++ b/tests/dump_input/dash/appl_db.json @@ -0,0 +1,53 @@ +{ + "DASH_ACL_GROUP_TABLE:group1": { + "pb":"CAESEgoQ6roHCSZkQ9GIMjmqpWE/IQ==" + }, + "DASH_ACL_RULE_TABLE:group1:rule1":{ + "pb": + "EAIYASoOCgUNAAAAABIFDQAAAAAyDgoFDQAAAAASBQ0AAAAAOgIIUEIDCOsp" + } + , + "DASH_ENI_TABLE:eni0":{ + "pb": + "CiQzODZlMTJjNi1lMGFiLTNiODgtYTg3ZS00OGIzNjBkN2I2YWMSBgAAAACqABoGcW9zMTAwIgUNCgABAigCMgVWbmV0NQ==" + }, + "DASH_ENI_TABLE:F4939FEFC47E":{ + "pb": + "CiQ0OTdmMjNkNy1mMGFjLTRjOTktYTk4Zi01OWI0NzBlOGM3YmQSBvSTn+/EfhoGcW9zMTAwIgUNCgABAigCMgVWbmV0MQ==" + }, + "DASH_ROUTE_TABLE:eni0:12.1.1.0/24":{ + "pb": + "CAISBVZuZXQx" + }, + "DASH_ROUTE_TABLE:F4939FEFC47E:20.2.2.0/24":{ + "pb": + "CAISBVZuZXQy" + }, + "DASH_ACL_OUT_TABLE:ENI0:1":{ + "pb":"CgZncm91cDE=" + }, + "DASH_QOS_TABLE:qos100":{ + "pb":"CgMxMDAQkE4Y6AcgCg==" + }, + "DASH_VNET_MAPPING_TABLE:Vnet1:12.1.1.1":{ + "pb":"CAQSBQ0KAAICGgYAAAAAqgEwAQ==" + }, + "DASH_VNET_TABLE:Vnet1":{ + "pb":"COgHEhIKEFWcbOgmq0GTuUbMxuj5MLI=" + }, + "DASH_VNET_TABLE:Vnet2":{ + "pb":"CNAPEhIKEGWcbOgmq0GTuUbMxuj5MLI=" + }, + "DASH_APPLIANCE_TABLE:123":{ + "pb":"CgUNCgEAIBBl" + }, + "DASH_ROUTE_RULE_TABLE:F4939FEFC47E:2000:10.0.2.0/24":{ + "pb":"CAQQASIFVm5ldDIoAQ==" + }, + "DASH_ACL_IN_TABLE:F4939FEFC47E:1":{ + "pb":"ChFkZWZhdWx0X2FjbF9ncm91cA==" + }, + "DASH_PREFIX_TAG_TABLE:AclTagScale1798":{ + "pb":"CAESDgoFDQgAAGsSBQ3/////" + } +} diff --git a/tests/dump_input/dash/asic_db.json b/tests/dump_input/dash/asic_db.json new file mode 100644 index 0000000000..b61ea14a78 --- /dev/null +++ b/tests/dump_input/dash/asic_db.json @@ -0,0 +1,19 @@ +{ + "ASIC_STATE:SAI_OBJECT_TYPE_VNET:oid:0x7a000000000021": { + "SAI_VNET_ATTR_VNI": "1000" + }, + "ASIC_STATE:SAI_OBJECT_TYPE_ENI:oid:0x73000000000023": { + "SAI_ENI_ATTR_VNET_ID": "oid:0x7a000000000021", + "SAI_ENI_ATTR_ADMIN_STATE": "true", + "SAI_ENI_ATTR_VM_UNDERLAY_DIP": "10.0.1.2", + "SAI_ENI_ATTR_VM_VNI": "101" + }, + "ASIC_STATE:SAI_OBJECT_TYPE_INBOUND_ROUTING_ENTRY:{\"eni_id\":\"oid:0x73000000000023\",\"priority\":\"1\",\"sip\":\"10.0.2.0\",\"sip_mask\":\"255.255.255.0\",\"switch_id\":\"oid:0x21000000000000\",\"vni\":\"2000\"}": { + "SAI_INBOUND_ROUTING_ENTRY_ATTR_ACTION": "SAI_INBOUND_ROUTING_ENTRY_ACTION_VXLAN_DECAP_PA_VALIDATE", + "SAI_INBOUND_ROUTING_ENTRY_ATTR_SRC_VNET_ID": "oid:0x7a000000000022" + }, + "ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_ENTRY:{\"destination\":\"20.2.2.0/24\",\"eni_id\":\"oid:0x73000000000023\",\"switch_id\":\"oid:0x21000000000000\"}": { + "SAI_OUTBOUND_ROUTING_ENTRY_ATTR_ACTION": "SAI_OUTBOUND_ROUTING_ENTRY_ACTION_ROUTE_VNET", + "SAI_OUTBOUND_ROUTING_ENTRY_ATTR_DST_VNET_ID": "oid:0x7a000000000022" + } +} diff --git a/tests/dump_tests/dash_object_test.py b/tests/dump_tests/dash_object_test.py new file mode 100644 index 0000000000..300f421e21 --- /dev/null +++ b/tests/dump_tests/dash_object_test.py @@ -0,0 +1,483 @@ +import os +import pytest +from dump.match_infra import MatchEngine, MatchRequest, ConnectionPool, CONN +try: + from dump.plugins.dash_acl_out import Dash_Acl_Out + from dump.plugins.dash_acl_in import Dash_Acl_In + from dump.plugins.dash_acl_group import Dash_Acl_Group + from dump.plugins.dash_prefix_tag import Dash_Prefix_Tag + from dump.plugins.dash_acl_rule import Dash_Acl_Rule + from dump.plugins.dash_appliance import Dash_Appliance + from dump.plugins.dash_eni import Dash_Eni + from dump.plugins.dash_qos import Dash_Qos + from dump.plugins.dash_vnet import Dash_Vnet + from dump.plugins.dash_vnet_mapping import Dash_Vnet_mapping + from dump.plugins.dash_route import Dash_Route +except ModuleNotFoundError: + pytest.skip("Skipping Dash tests since it is not supported in this Platform", allow_module_level=True) +from utilities_common.constants import DEFAULT_NAMESPACE +from dump.helper import populate_mock +from .mock_redis import RedisMock +from click.testing import CliRunner +from .dump_state_test import compare_json_output +import dump.main as dump + +from swsscommon.swsscommon import SonicV2Connector +from ..mock_tables import dbconnector + +test_path = os.path.join(os.path.dirname(__file__), "../") +dump_test_input = os.path.join(test_path, "dump_input") + + +@pytest.fixture(scope="module", autouse=True) +def match_engine(): + print("SETUP") + os.environ["VERBOSE"] = "1" + dbconnector.load_namespace_config() + + dump_input = os.path.join(os.path.dirname(__file__), "../dump_input/") + dedicated_dbs = {} + redisMock = RedisMock() + + conn = SonicV2Connector() + # popualate the db ,with mock data + db_names = list(dedicated_dbs.keys()) + try: + populate_mock(conn, db_names, dedicated_dbs) + except Exception as e: + assert False, "Mock initialization failed: " + str(e) + conn_pool = ConnectionPool() + dedicated_dbs['APPL_DB'] = os.path.join(dump_input, "dash/appl_db.json") + dedicated_dbs['ASIC_DB'] = os.path.join(dump_input, "dash/asic_db.json") + redisMock.load_file(dedicated_dbs['APPL_DB']) + conn_pool.fill(DEFAULT_NAMESPACE, conn_pool.initialize_connector(DEFAULT_NAMESPACE), list(dedicated_dbs.keys())) + conn_pool.fill(DEFAULT_NAMESPACE, redisMock, None, dash_object=True) + populate_mock(conn_pool.cache[DEFAULT_NAMESPACE][CONN], list(dedicated_dbs.keys()), dedicated_dbs) + match_engine = MatchEngine(conn_pool) + yield match_engine + print("TEARDOWN") + + +@pytest.mark.usefixtures("match_engine") +class TestMatchEngineDash: + + def test_acl_out(self, match_engine): + req = MatchRequest(db="APPL_DB", table="DASH_ACL_OUT_TABLE", key_pattern="*", pb=Dash_Acl_Out()) + ret = match_engine.fetch(req) + assert ret["error"] == "" + assert len(ret["keys"]) == 1 + assert "DASH_ACL_OUT_TABLE:ENI0:1" in ret['keys'] + runner = CliRunner() + result = runner.invoke(dump.state, ["dash_acl_out", "all"], obj=match_engine) + assert result.exit_code == 0, ( + "exit code: {}, Exception: {}, Traceback: {}".format + (result.exit_code, result.exception, result.exc_info) + ) + expected = {"ENI0:1": + { + "APPL_DB": + {"keys": + [{"DASH_ACL_OUT_TABLE:ENI0:1": + {"v4_acl_group_id": "group1"}}], + "tables_not_found": [] + } + } + } + ddiff = compare_json_output(expected, result.output) + assert not ddiff, ddiff + + def test_acl_in(self, match_engine): + req = MatchRequest(db="APPL_DB", table="DASH_ACL_IN_TABLE", key_pattern="*", pb=Dash_Acl_In()) + ret = match_engine.fetch(req) + assert ret["error"] == "" + assert len(ret["keys"]) == 1 + assert "DASH_ACL_IN_TABLE:F4939FEFC47E:1" in ret['keys'] + runner = CliRunner() + result = runner.invoke(dump.state, ["dash_acl_in", "all"], obj=match_engine) + assert result.exit_code == 0, ( + "exit code: {}, Exception: {}, Traceback: {}".format + (result.exit_code, result.exception, result.exc_info) + ) + expected = {"F4939FEFC47E:1": + { + "APPL_DB": + {"keys": + [{"DASH_ACL_IN_TABLE:F4939FEFC47E:1": + {"v4_acl_group_id": "default_acl_group"}}], + "tables_not_found": [] + } + } + } + ddiff = compare_json_output(expected, result.output) + assert not ddiff, ddiff + + def test_acl_group(self, match_engine): + req = MatchRequest(db="APPL_DB", table="DASH_ACL_GROUP_TABLE", key_pattern="*", pb=Dash_Acl_Group()) + ret = match_engine.fetch(req) + assert ret["error"] == "" + assert len(ret["keys"]) == 1 + assert "DASH_ACL_GROUP_TABLE:group1" in ret['keys'] + runner = CliRunner() + result = runner.invoke(dump.state, ["dash_acl_group", "all"], obj=match_engine) + assert result.exit_code == 0, ( + "exit code: {}, Exception: {}, Traceback: {}".format + (result.exit_code, result.exception, result.exc_info) + ) + expected = {"group1": + { + "APPL_DB": + {"keys": + [{"DASH_ACL_GROUP_TABLE:group1": + {"ip_version": "IP_VERSION_IPV4", + "guid": "eaba0709-2664-43d1-8832-39aaa5613f21" + }}], + "tables_not_found": [] + } + } + } + ddiff = compare_json_output(expected, result.output) + assert not ddiff, ddiff + + def test_acl_rule(self, match_engine): + req = MatchRequest(db="APPL_DB", table="DASH_ACL_RULE_TABLE", key_pattern="*", pb=Dash_Acl_Rule()) + ret = match_engine.fetch(req) + assert ret["error"] == "" + assert len(ret["keys"]) == 1 + assert "DASH_ACL_RULE_TABLE:group1:rule1" in ret['keys'] + runner = CliRunner() + result = runner.invoke(dump.state, ["dash_acl_rule", "all"], obj=match_engine) + assert result.exit_code == 0, ( + "exit code: {}, Exception: {}, Traceback: {}".format + (result.exit_code, result.exception, result.exc_info) + ) + expected = {"group1:rule1": + { + "APPL_DB": + { + "keys": + [ + { + "DASH_ACL_RULE_TABLE:group1:rule1": + { + "action": "ACTION_PERMIT", + "terminating": True, + "src_addr": ["0.0.0.0/0"], + "dst_addr": ["0.0.0.0/0"], + "src_port": [{"value": 80}], + "dst_port": [{"value": 5355}], + } + } + ], + "tables_not_found": [] + } + } + } + ddiff = compare_json_output(expected, result.output) + assert not ddiff, ddiff + + def test_appliance(self, match_engine): + req = MatchRequest(db="APPL_DB", table="DASH_APPLIANCE_TABLE", key_pattern="*", pb=Dash_Appliance()) + ret = match_engine.fetch(req) + assert ret["error"] == "" + assert len(ret["keys"]) == 1 + assert "DASH_APPLIANCE_TABLE:123" in ret['keys'] + runner = CliRunner() + result = runner.invoke(dump.state, ["dash_appliance", "all"], obj=match_engine) + assert result.exit_code == 0, ( + "exit code: {}, Exception: {}, Traceback: {}".format + (result.exit_code, result.exception, result.exc_info) + ) + expected = {"123": + { + "APPL_DB": + { + "keys": + [ + { + "DASH_APPLIANCE_TABLE:123": + { + "sip": "10.1.0.32", + "vm_vni": 101, + } + } + ], + "tables_not_found": [] + } + } + } + ddiff = compare_json_output(expected, result.output) + assert not ddiff, ddiff + + def test_eni(self, match_engine): + req = MatchRequest(db="APPL_DB", table="DASH_ENI_TABLE", key_pattern="*", pb=Dash_Eni()) + ret = match_engine.fetch(req) + assert ret["error"] == "" + assert len(ret["keys"]) == 2 + assert "DASH_ENI_TABLE:eni0" in ret['keys'] + assert "DASH_ENI_TABLE:F4939FEFC47E" in ret['keys'] + runner = CliRunner() + result = runner.invoke(dump.state, ["dash_eni", "all"], obj=match_engine) + assert result.exit_code == 0, ( + "exit code: {}, Exception: {}, Traceback: {}".format + (result.exit_code, result.exception, result.exc_info) + ) + expected = {"eni0": + { + "APPL_DB": + {"keys": + [ + {"DASH_ENI_TABLE:eni0": + {"eni_id": "386e12c6-e0ab-3b88-a87e-48b360d7b6ac", + "mac_address": "00:00:00:00:aa:00", + "qos": "qos100", + "underlay_ip": "10.0.1.2", + "admin_state": "STATE_ENABLED", + "vnet": "Vnet5", + }}], + "tables_not_found": [] + }, + "ASIC_DB": + {"keys": + [], + "tables_not_found": ["ASIC_STATE:SAI_OBJECT_TYPE_ENI"], + } + }, + "F4939FEFC47E": + { + "APPL_DB": + {"keys": + [ + {"DASH_ENI_TABLE:F4939FEFC47E": + {"eni_id": "497f23d7-f0ac-4c99-a98f-59b470e8c7bd", + "mac_address": "f4:93:9f:ef:c4:7e", + "qos": "qos100", + "underlay_ip": "10.0.1.2", + "admin_state": "STATE_ENABLED", + "vnet": "Vnet1", + }}], + "tables_not_found": [] + }, + "ASIC_DB": + {"keys": + [ + {"ASIC_STATE:SAI_OBJECT_TYPE_ENI:oid:0x73000000000023": + {"SAI_ENI_ATTR_ADMIN_STATE": "true", + "SAI_ENI_ATTR_VM_UNDERLAY_DIP": "10.0.1.2", + "SAI_ENI_ATTR_VM_VNI": "101", + "SAI_ENI_ATTR_VNET_ID": "oid:0x7a000000000021", + }}], + "tables_not_found": [], + "vidtorid": + { + "oid:0x73000000000023": "Real ID Not Found" + } + } + } + } + ddiff = compare_json_output(expected, result.output) + assert not ddiff, ddiff + + def test_qos(self, match_engine): + req = MatchRequest(db="APPL_DB", table="DASH_QOS_TABLE", key_pattern="*", pb=Dash_Qos()) + ret = match_engine.fetch(req) + assert ret["error"] == "" + assert len(ret["keys"]) == 1 + assert "DASH_QOS_TABLE:qos100" in ret['keys'] + runner = CliRunner() + result = runner.invoke(dump.state, ["dash_qos", "all"], obj=match_engine) + assert result.exit_code == 0, ( + "exit code: {}, Exception: {}, Traceback: {}".format + (result.exit_code, result.exception, result.exc_info) + ) + expected = {"qos100": + { + "APPL_DB": + {"keys": + [ + {"DASH_QOS_TABLE:qos100": + {"qos_id": "100", + "bw": 10000, + "cps": 1000, + "flows": 10, + }}], + "tables_not_found": [] + } + } + } + ddiff = compare_json_output(expected, result.output) + assert not ddiff, ddiff + + def test_route(self, match_engine): + req = MatchRequest(db="APPL_DB", table="DASH_ROUTE_TABLE", key_pattern="*", pb=Dash_Route()) + ret = match_engine.fetch(req) + assert ret["error"] == "" + assert len(ret["keys"]) == 2 + assert "DASH_ROUTE_TABLE:eni0:12.1.1.0/24" in ret['keys'] + assert "DASH_ROUTE_TABLE:F4939FEFC47E:20.2.2.0/24" in ret['keys'] + runner = CliRunner() + result = runner.invoke(dump.state, ["dash_route", "all"], obj=match_engine) + assert result.exit_code == 0, ( + "exit code: {}, Exception: {}, Traceback: {}".format + (result.exit_code, result.exception, result.exc_info) + ) + expected = {"eni0:12.1.1.0/24": + { + "APPL_DB": + {"keys": + [ + {"DASH_ROUTE_TABLE:eni0:12.1.1.0/24": + {"action_type": "ROUTING_TYPE_VNET", + "vnet": "Vnet1", + }}], + "tables_not_found": [] + }, + "ASIC_DB": + {"keys": + [], + "tables_not_found": ["ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_ENTRY"], + } + }, + "F4939FEFC47E:20.2.2.0/24": + { + "APPL_DB": + {"keys": + [ + {"DASH_ROUTE_TABLE:F4939FEFC47E:20.2.2.0/24": + {"action_type": "ROUTING_TYPE_VNET", + "vnet": "Vnet2", + }}], + "tables_not_found": [] + }, + "ASIC_DB": + {"keys": + [ + {"ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_ENTRY:" + "{\"destination\":\"20.2.2.0/24\",\"eni_id\":\"oid:0x73000000000023\"," + "\"switch_id\":\"oid:0x21000000000000\"}": { + "SAI_OUTBOUND_ROUTING_ENTRY_ATTR_ACTION": + "SAI_OUTBOUND_ROUTING_ENTRY_ACTION_ROUTE_VNET", + "SAI_OUTBOUND_ROUTING_ENTRY_ATTR_DST_VNET_ID": "oid:0x7a000000000022" + }} + ], + "tables_not_found": [], + } + } + } + ddiff = compare_json_output(expected, result.output) + assert not ddiff, ddiff + + def test_vnet_mapping(self, match_engine): + req = MatchRequest(db="APPL_DB", table="DASH_VNET_MAPPING_TABLE", key_pattern="*", pb=Dash_Vnet_mapping()) + ret = match_engine.fetch(req) + assert ret["error"] == "" + assert len(ret["keys"]) == 1 + assert "DASH_VNET_MAPPING_TABLE:Vnet1:12.1.1.1" in ret['keys'] + runner = CliRunner() + result = runner.invoke(dump.state, ["dash_vnet_mapping", "all"], obj=match_engine) + assert result.exit_code == 0, ( + "exit code: {}, Exception: {}, Traceback: {}".format + (result.exit_code, result.exception, result.exc_info) + ) + expected = {"Vnet1:12.1.1.1": + { + "APPL_DB": + {"keys": + [ + {"DASH_VNET_MAPPING_TABLE:Vnet1:12.1.1.1": + {"action_type": "ROUTING_TYPE_VNET_ENCAP", + "underlay_ip": "10.0.2.2", + "mac_address": "00:00:00:00:aa:01", + "use_dst_vni": True, + }}], + "tables_not_found": [] + } + } + } + ddiff = compare_json_output(expected, result.output) + assert not ddiff, ddiff + + def test_acl_prefix_tag(self, match_engine): + req = MatchRequest(db="APPL_DB", table="DASH_PREFIX_TAG_TABLE", key_pattern="*", pb=Dash_Prefix_Tag()) + ret = match_engine.fetch(req) + assert ret["error"] == "" + assert len(ret["keys"]) == 1 + assert "DASH_PREFIX_TAG_TABLE:AclTagScale1798" in ret['keys'] + runner = CliRunner() + result = runner.invoke(dump.state, ["dash_prefix_tag", "all"], obj=match_engine) + assert result.exit_code == 0, ( + "exit code: {}, Exception: {}, Traceback: {}".format + (result.exit_code, result.exception, result.exc_info) + ) + expected = {"AclTagScale1798": + { + "APPL_DB": + {"keys": + [ + {"DASH_PREFIX_TAG_TABLE:AclTagScale1798": + {"ip_version": "IP_VERSION_IPV4", + "prefix_list": [ + "8.0.0.107/32" + ] + }}], + "tables_not_found": [] + } + } + } + ddiff = compare_json_output(expected, result.output) + assert not ddiff, ddiff + + def test_vnet(self, match_engine): + req = MatchRequest(db="APPL_DB", table="DASH_VNET_TABLE", key_pattern="*", pb=Dash_Vnet()) + ret = match_engine.fetch(req) + assert ret["error"] == "" + assert len(ret["keys"]) == 2 + assert "DASH_VNET_TABLE:Vnet1" in ret['keys'] + runner = CliRunner() + result = runner.invoke(dump.state, ["dash_vnet", "all"], obj=match_engine) + assert result.exit_code == 0, ( + "exit code: {}, Exception: {}, Traceback: {}".format + (result.exit_code, result.exception, result.exc_info) + ) + expected = {"Vnet1": + { + "APPL_DB": + {"keys": + [ + {"DASH_VNET_TABLE:Vnet1": + {"vni": 1000, + "guid": "559c6ce8-26ab-4193-b946-ccc6e8f930b2", + }}], + "tables_not_found": [] + }, + "ASIC_DB": + {"keys": + [ + {"ASIC_STATE:SAI_OBJECT_TYPE_VNET:oid:0x7a000000000021": + {"SAI_VNET_ATTR_VNI": "1000", + }}], + "tables_not_found": [], + "vidtorid": { + "oid:0x7a000000000021": "Real ID Not Found" + } + } + }, + "Vnet2": + { + "APPL_DB": + {"keys": + [ + {"DASH_VNET_TABLE:Vnet2": + {"vni": 2000, + "guid": "659c6ce8-26ab-4193-b946-ccc6e8f930b2", + }}], + "tables_not_found": [] + }, + "ASIC_DB": + {"keys": + [], + "tables_not_found": ["ASIC_STATE:SAI_OBJECT_TYPE_VNET"], + } + } + } + ddiff = compare_json_output(expected, result.output) + assert not ddiff, ddiff diff --git a/tests/dump_tests/mock_redis.py b/tests/dump_tests/mock_redis.py new file mode 100644 index 0000000000..32c4900f4a --- /dev/null +++ b/tests/dump_tests/mock_redis.py @@ -0,0 +1,35 @@ +import json +import fnmatch +import base64 + + +class RedisMock(): + + def __init__(self, host="None", port=0, db=0): + return + + def load_file(self, file_name): + with open(file_name) as fp: + try: + self.data = json.load(fp) + except json.JSONDecodeError: + print("Json decode error") + self.data = self.encode_data(self.data) + + def encode_data(self, json_data): + new_data = {} + for key, value in json_data.items(): + new_value = {} + for pb, bin_data in value.items(): + bin_pb = pb.encode() + new_value[bin_pb] = base64.b64decode(bin_data.encode()) + new_data[key] = new_value + return new_data + + def hgetall(self, key): + return self.data[key] + + def keys(self, match): + kp = match.replace("[^", "[!") + kys = fnmatch.filter(self.data.keys(), kp) + return [ky.encode() for ky in kys] diff --git a/tests/dump_tests/module_tests/dash_acl_group_test.py b/tests/dump_tests/module_tests/dash_acl_group_test.py new file mode 100644 index 0000000000..ecf2c166e6 --- /dev/null +++ b/tests/dump_tests/module_tests/dash_acl_group_test.py @@ -0,0 +1,78 @@ +import os +import pytest +from deepdiff import DeepDiff +from dump.helper import create_template_dict, populate_mock +try: + from dump.plugins.dash_acl_group import Dash_Acl_Group +except ModuleNotFoundError: + pytest.skip("Skipping Dash tests since it is not supported in this Platform", allow_module_level=True) +from dump.match_infra import MatchEngine, ConnectionPool +from swsscommon.swsscommon import SonicV2Connector +from utilities_common.constants import DEFAULT_NAMESPACE + +# Location for dedicated db's used for UT +module_tests_path = os.path.dirname(__file__) +dump_tests_path = os.path.join(module_tests_path, "../") +tests_path = os.path.join(dump_tests_path, "../") +dump_test_input = os.path.join(tests_path, "dump_input") +dash_input_files_path = os.path.join(dump_test_input, "dash") + +# Define the mock files to read from +dedicated_dbs = {} +dedicated_dbs['APPL_DB'] = os.path.join(dash_input_files_path, "appl_db.json") +dedicated_dbs['ASIC_DB'] = os.path.join(dash_input_files_path, "asic_db.json") + + +@pytest.fixture(scope="class", autouse=True) +def match_engine(): + + print("SETUP") + os.environ["VERBOSE"] = "1" + + # Monkey Patch the SonicV2Connector Object + db = SonicV2Connector() + + # popualate the db with mock data + db_names = list(dedicated_dbs.keys()) + try: + populate_mock(db, db_names, dedicated_dbs) + except Exception as e: + assert False, "Mock initialization failed: " + str(e) + + # Initialize connection pool + conn_pool = ConnectionPool() + conn_pool.fill(DEFAULT_NAMESPACE, db, db_names) + + # Initialize match_engine + match_engine = MatchEngine(conn_pool) + yield match_engine + print("TEARDOWN") + os.environ["VERBOSE"] = "0" + + +@pytest.mark.usefixtures("match_engine") +class TestDashAclGroupModule: + def test_working_state(self, match_engine): + """ + Scenario: When the appl info is properly applied and propagated + """ + params = {Dash_Acl_Group.ARG_NAME: "group1", "namespace": ""} + m_dash_acl_group = Dash_Acl_Group(match_engine) + returned = m_dash_acl_group.execute(params) + expect = create_template_dict(dbs=["APPL_DB"]) + expect["APPL_DB"]["keys"].append("DASH_ACL_GROUP_TABLE:group1") + ddiff = DeepDiff(returned, expect, ignore_order=True) + assert not ddiff, ddiff + + def test_not_working_state(self, match_engine): + """ + Scenario: Missing Keys + """ + params = {Dash_Acl_Group.ARG_NAME: "group1:rule2", "namespace": ""} + m_dash_acl_group = Dash_Acl_Group(match_engine) + returned = m_dash_acl_group.execute(params) + expect = create_template_dict(dbs=["APPL_DB"]) + expect["APPL_DB"]["tables_not_found"].append("DASH_ACL_GROUP_TABLE") + ddiff = DeepDiff(returned, expect, ignore_order=True) + print(returned) + assert not ddiff, ddiff diff --git a/tests/dump_tests/module_tests/dash_acl_in_test.py b/tests/dump_tests/module_tests/dash_acl_in_test.py new file mode 100644 index 0000000000..d2344cd126 --- /dev/null +++ b/tests/dump_tests/module_tests/dash_acl_in_test.py @@ -0,0 +1,78 @@ +import os +import pytest +from deepdiff import DeepDiff +from dump.helper import create_template_dict, populate_mock +try: + from dump.plugins.dash_acl_in import Dash_Acl_In +except ModuleNotFoundError: + pytest.skip("Skipping Dash tests since it is not supported in this Platform", allow_module_level=True) +from dump.match_infra import MatchEngine, ConnectionPool +from swsscommon.swsscommon import SonicV2Connector +from utilities_common.constants import DEFAULT_NAMESPACE + +# Location for dedicated db's used for UT +module_tests_path = os.path.dirname(__file__) +dump_tests_path = os.path.join(module_tests_path, "../") +tests_path = os.path.join(dump_tests_path, "../") +dump_test_input = os.path.join(tests_path, "dump_input") +dash_input_files_path = os.path.join(dump_test_input, "dash") + +# Define the mock files to read from +dedicated_dbs = {} +dedicated_dbs['APPL_DB'] = os.path.join(dash_input_files_path, "appl_db.json") +dedicated_dbs['ASIC_DB'] = os.path.join(dash_input_files_path, "asic_db.json") + + +@pytest.fixture(scope="class", autouse=True) +def match_engine(): + + print("SETUP") + os.environ["VERBOSE"] = "1" + + # Monkey Patch the SonicV2Connector Object + db = SonicV2Connector() + + # popualate the db with mock data + db_names = list(dedicated_dbs.keys()) + try: + populate_mock(db, db_names, dedicated_dbs) + except Exception as e: + assert False, "Mock initialization failed: " + str(e) + + # Initialize connection pool + conn_pool = ConnectionPool() + conn_pool.fill(DEFAULT_NAMESPACE, db, db_names) + + # Initialize match_engine + match_engine = MatchEngine(conn_pool) + yield match_engine + print("TEARDOWN") + os.environ["VERBOSE"] = "0" + + +@pytest.mark.usefixtures("match_engine") +class TestDashAclInModule: + def test_working_state(self, match_engine): + """ + Scenario: When the appl info is properly applied and propagated + """ + params = {Dash_Acl_In.ARG_NAME: "F4939FEFC47E:1", "namespace": ""} + m_dash_acl_in = Dash_Acl_In(match_engine) + returned = m_dash_acl_in.execute(params) + expect = create_template_dict(dbs=["APPL_DB"]) + expect["APPL_DB"]["keys"].append("DASH_ACL_IN_TABLE:F4939FEFC47E:1") + ddiff = DeepDiff(returned, expect, ignore_order=True) + assert not ddiff, ddiff + + def test_not_working_state(self, match_engine): + """ + Scenario: Missing Keys + """ + params = {Dash_Acl_In.ARG_NAME: "F4939FEFC47E:2", "namespace": ""} + m_dash_acl_in = Dash_Acl_In(match_engine) + returned = m_dash_acl_in.execute(params) + expect = create_template_dict(dbs=["APPL_DB"]) + expect["APPL_DB"]["tables_not_found"].append("DASH_ACL_IN_TABLE") + ddiff = DeepDiff(returned, expect, ignore_order=True) + print(returned) + assert not ddiff, ddiff diff --git a/tests/dump_tests/module_tests/dash_acl_out_test.py b/tests/dump_tests/module_tests/dash_acl_out_test.py new file mode 100644 index 0000000000..cbbde5a6d7 --- /dev/null +++ b/tests/dump_tests/module_tests/dash_acl_out_test.py @@ -0,0 +1,78 @@ +import os +import pytest +from deepdiff import DeepDiff +from dump.helper import create_template_dict, populate_mock +try: + from dump.plugins.dash_acl_out import Dash_Acl_Out +except ModuleNotFoundError: + pytest.skip("Skipping Dash tests since it is not supported in this Platform", allow_module_level=True) +from dump.match_infra import MatchEngine, ConnectionPool +from swsscommon.swsscommon import SonicV2Connector +from utilities_common.constants import DEFAULT_NAMESPACE + +# Location for dedicated db's used for UT +module_tests_path = os.path.dirname(__file__) +dump_tests_path = os.path.join(module_tests_path, "../") +tests_path = os.path.join(dump_tests_path, "../") +dump_test_input = os.path.join(tests_path, "dump_input") +dash_input_files_path = os.path.join(dump_test_input, "dash") + +# Define the mock files to read from +dedicated_dbs = {} +dedicated_dbs['APPL_DB'] = os.path.join(dash_input_files_path, "appl_db.json") +dedicated_dbs['ASIC_DB'] = os.path.join(dash_input_files_path, "asic_db.json") + + +@pytest.fixture(scope="class", autouse=True) +def match_engine(): + + print("SETUP") + os.environ["VERBOSE"] = "1" + + # Monkey Patch the SonicV2Connector Object + db = SonicV2Connector() + + # popualate the db with mock data + db_names = list(dedicated_dbs.keys()) + try: + populate_mock(db, db_names, dedicated_dbs) + except Exception as e: + assert False, "Mock initialization failed: " + str(e) + + # Initialize connection pool + conn_pool = ConnectionPool() + conn_pool.fill(DEFAULT_NAMESPACE, db, db_names) + + # Initialize match_engine + match_engine = MatchEngine(conn_pool) + yield match_engine + print("TEARDOWN") + os.environ["VERBOSE"] = "0" + + +@pytest.mark.usefixtures("match_engine") +class TestDashAclOutModule: + def test_working_state(self, match_engine): + """ + Scenario: When the appl info is properly applied and propagated + """ + params = {Dash_Acl_Out.ARG_NAME: "ENI0:1", "namespace": ""} + m_dash_acl_out = Dash_Acl_Out(match_engine) + returned = m_dash_acl_out.execute(params) + expect = create_template_dict(dbs=["APPL_DB"]) + expect["APPL_DB"]["keys"].append("DASH_ACL_OUT_TABLE:ENI0:1") + ddiff = DeepDiff(returned, expect, ignore_order=True) + assert not ddiff, ddiff + + def test_not_working_state(self, match_engine): + """ + Scenario: Missing Keys + """ + params = {Dash_Acl_Out.ARG_NAME: "ENI0:2", "namespace": ""} + m_dash_acl_out = Dash_Acl_Out(match_engine) + returned = m_dash_acl_out.execute(params) + expect = create_template_dict(dbs=["APPL_DB"]) + expect["APPL_DB"]["tables_not_found"].append("DASH_ACL_OUT_TABLE") + ddiff = DeepDiff(returned, expect, ignore_order=True) + print(returned) + assert not ddiff, ddiff diff --git a/tests/dump_tests/module_tests/dash_acl_rule_test.py b/tests/dump_tests/module_tests/dash_acl_rule_test.py new file mode 100644 index 0000000000..f2a1365560 --- /dev/null +++ b/tests/dump_tests/module_tests/dash_acl_rule_test.py @@ -0,0 +1,81 @@ +import os +import pytest +from deepdiff import DeepDiff +from dump.helper import create_template_dict, populate_mock +try: + from dump.plugins.dash_acl_rule import Dash_Acl_Rule +except ModuleNotFoundError: + pytest.skip("Skipping Dash tests since it is not supported in this Platform", allow_module_level=True) +from dump.match_infra import MatchEngine, ConnectionPool +from swsscommon.swsscommon import SonicV2Connector +from utilities_common.constants import DEFAULT_NAMESPACE +import redis + +# Location for dedicated db's used for UT +module_tests_path = os.path.dirname(__file__) +dump_tests_path = os.path.join(module_tests_path, "../") +tests_path = os.path.join(dump_tests_path, "../") +dump_test_input = os.path.join(tests_path, "dump_input") +dash_input_files_path = os.path.join(dump_test_input, "dash") + +# Define the mock files to read from +dedicated_dbs = {} +dedicated_dbs['APPL_DB'] = os.path.join(dash_input_files_path, "appl_db.json") +dedicated_dbs['ASIC_DB'] = os.path.join(dash_input_files_path, "asic_db.json") + + +@pytest.fixture(scope="class", autouse=True) +def match_engine(): + + print("SETUP") + os.environ["VERBOSE"] = "1" + + # Monkey Patch the SonicV2Connector Object + db = SonicV2Connector() + from ...dump_tests import mock_redis + redis.Redis = mock_redis.RedisMock + + # popualate the db with mock data + db_names = list(dedicated_dbs.keys()) + try: + populate_mock(db, db_names, dedicated_dbs) + except Exception as e: + assert False, "Mock initialization failed: " + str(e) + + # Initialize connection pool + conn_pool = ConnectionPool() + conn_pool.fill(DEFAULT_NAMESPACE, db, db_names) + + # Initialize match_engine + match_engine = MatchEngine(conn_pool) + yield match_engine + print("TEARDOWN") + os.environ["VERBOSE"] = "0" + + +@pytest.mark.usefixtures("match_engine") +class TestDashAclRuleModule: + def test_working_state(self, match_engine): + """ + Scenario: When the appl info is properly applied and propagated + """ + params = {Dash_Acl_Rule.ARG_NAME: "group1:rule1", "namespace": ""} + m_dash_acl_rule = Dash_Acl_Rule(match_engine) + returned = m_dash_acl_rule.execute(params) + expect = create_template_dict(dbs=["APPL_DB"]) + expect["APPL_DB"]["keys"].append("DASH_ACL_RULE_TABLE:group1:rule1") + ddiff = DeepDiff(returned, expect, ignore_order=True) + assert not ddiff, ddiff + + def test_not_working_state(self, match_engine): + """ + Scenario: Missing Keys + """ + params = {Dash_Acl_Rule.ARG_NAME: "group1:rule2", "namespace": ""} + m_dash_acl_rule = Dash_Acl_Rule(match_engine) + returned = m_dash_acl_rule.execute(params) + expect = create_template_dict(dbs=["APPL_DB"]) + expect["APPL_DB"]["tables_not_found"].append("DASH_ACL_RULE_TABLE") + ddiff = DeepDiff(returned, expect, ignore_order=True) + print(returned) + assert not ddiff, ddiff diff --git a/tests/dump_tests/module_tests/dash_appliance_test.py b/tests/dump_tests/module_tests/dash_appliance_test.py new file mode 100644 index 0000000000..0563b09836 --- /dev/null +++ b/tests/dump_tests/module_tests/dash_appliance_test.py @@ -0,0 +1,81 @@ +import os +import pytest +from deepdiff import DeepDiff +from dump.helper import create_template_dict, populate_mock +try: + from dump.plugins.dash_appliance import Dash_Appliance +except ModuleNotFoundError: + pytest.skip("Skipping Dash tests since it is not supported in this Platform", allow_module_level=True) +from dump.match_infra import MatchEngine, ConnectionPool +from swsscommon.swsscommon import SonicV2Connector +from utilities_common.constants import DEFAULT_NAMESPACE +import redis + +# Location for dedicated db's used for UT +module_tests_path = os.path.dirname(__file__) +dump_tests_path = os.path.join(module_tests_path, "../") +tests_path = os.path.join(dump_tests_path, "../") +dump_test_input = os.path.join(tests_path, "dump_input") +dash_input_files_path = os.path.join(dump_test_input, "dash") + +# Define the mock files to read from +dedicated_dbs = {} +dedicated_dbs['APPL_DB'] = os.path.join(dash_input_files_path, "appl_db.json") +dedicated_dbs['ASIC_DB'] = os.path.join(dash_input_files_path, "asic_db.json") + + +@pytest.fixture(scope="class", autouse=True) +def match_engine(): + + print("SETUP") + os.environ["VERBOSE"] = "1" + + # Monkey Patch the SonicV2Connector Object + db = SonicV2Connector() + from ...dump_tests import mock_redis + redis.Redis = mock_redis.RedisMock + + # popualate the db with mock data + db_names = list(dedicated_dbs.keys()) + try: + populate_mock(db, db_names, dedicated_dbs) + except Exception as e: + assert False, "Mock initialization failed: " + str(e) + + # Initialize connection pool + conn_pool = ConnectionPool() + conn_pool.fill(DEFAULT_NAMESPACE, db, db_names) + + # Initialize match_engine + match_engine = MatchEngine(conn_pool) + yield match_engine + print("TEARDOWN") + os.environ["VERBOSE"] = "0" + + +@pytest.mark.usefixtures("match_engine") +class TestDashAppplianceModule: + def test_working_state(self, match_engine): + """ + Scenario: When the appl info is properly applied and propagated + """ + params = {Dash_Appliance.ARG_NAME: "123", "namespace": ""} + m_dash_appliance = Dash_Appliance(match_engine) + returned = m_dash_appliance.execute(params) + expect = create_template_dict(dbs=["APPL_DB"]) + expect["APPL_DB"]["keys"].append("DASH_APPLIANCE_TABLE:123") + ddiff = DeepDiff(returned, expect, ignore_order=True) + assert not ddiff, ddiff + + def test_not_working_state(self, match_engine): + """ + Scenario: Missing Keys + """ + params = {Dash_Appliance.ARG_NAME: "144", "namespace": ""} + m_dash_appliance = Dash_Appliance(match_engine) + returned = m_dash_appliance.execute(params) + expect = create_template_dict(dbs=["APPL_DB"]) + expect["APPL_DB"]["tables_not_found"].append("DASH_APPLIANCE_TABLE") + ddiff = DeepDiff(returned, expect, ignore_order=True) + print(returned) + assert not ddiff, ddiff diff --git a/tests/dump_tests/module_tests/dash_eni_test.py b/tests/dump_tests/module_tests/dash_eni_test.py new file mode 100644 index 0000000000..b11e00485a --- /dev/null +++ b/tests/dump_tests/module_tests/dash_eni_test.py @@ -0,0 +1,96 @@ +import os +import pytest +from deepdiff import DeepDiff +from dump.helper import create_template_dict, populate_mock +try: + from dump.plugins.dash_eni import Dash_Eni +except ModuleNotFoundError: + pytest.skip("Skipping Dash tests since it is not supported in this Platform", allow_module_level=True) +from dump.match_infra import MatchEngine, ConnectionPool +from swsscommon.swsscommon import SonicV2Connector +from utilities_common.constants import DEFAULT_NAMESPACE + +# Location for dedicated db's used for UT +module_tests_path = os.path.dirname(__file__) +dump_tests_path = os.path.join(module_tests_path, "../") +tests_path = os.path.join(dump_tests_path, "../") +dump_test_input = os.path.join(tests_path, "dump_input") +dash_input_files_path = os.path.join(dump_test_input, "dash") + +# Define the mock files to read from +dedicated_dbs = {} +dedicated_dbs['APPL_DB'] = os.path.join(dash_input_files_path, "appl_db.json") +dedicated_dbs['ASIC_DB'] = os.path.join(dash_input_files_path, "asic_db.json") + + +@pytest.fixture(scope="class", autouse=True) +def match_engine(): + + print("SETUP") + os.environ["VERBOSE"] = "1" + + # Monkey Patch the SonicV2Connector Object + db = SonicV2Connector() + from ...dump_tests import mock_redis + redis_obj = mock_redis.RedisMock() + redis_obj.load_file(dedicated_dbs['APPL_DB']) + + # popualate the db with mock data + db_names = list(dedicated_dbs.keys()) + try: + populate_mock(db, db_names, dedicated_dbs) + except Exception as e: + assert False, "Mock initialization failed: " + str(e) + + # Initialize connection pool + conn_pool = ConnectionPool() + conn_pool.fill(DEFAULT_NAMESPACE, db, db_names) + conn_pool.fill(DEFAULT_NAMESPACE, redis_obj, None, dash_object=True) + + # Initialize match_engine + match_engine = MatchEngine(conn_pool) + yield match_engine + print("TEARDOWN") + os.environ["VERBOSE"] = "0" + + +@pytest.mark.usefixtures("match_engine") +class TestDashEniModule: + def test_working_state(self, match_engine): + """ + Scenario: When the appl info is properly applied and propagated to ASIC_DB + """ + params = {Dash_Eni.ARG_NAME: "F4939FEFC47E", "namespace": ""} + m_dash_vnet = Dash_Eni(match_engine) + returned = m_dash_vnet.execute(params) + expect = create_template_dict(dbs=["APPL_DB", "ASIC_DB"]) + expect["APPL_DB"]["keys"].append("DASH_ENI_TABLE:F4939FEFC47E") + expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_ENI:oid:0x73000000000023") + ddiff = DeepDiff(returned, expect, ignore_order=True) + assert not ddiff, ddiff + + def test_missing_asic_state(self, match_engine): + """ + Scenario: When the appl info is properly applied but not propagated to ASIC_DB + """ + params = {Dash_Eni.ARG_NAME: "eni0", "namespace": ""} + m_dash_vnet = Dash_Eni(match_engine) + returned = m_dash_vnet.execute(params) + expect = create_template_dict(dbs=["APPL_DB", "ASIC_DB"]) + expect["APPL_DB"]["keys"].append("DASH_ENI_TABLE:eni0") + expect["ASIC_DB"]["tables_not_found"].append("ASIC_STATE:SAI_OBJECT_TYPE_ENI") + ddiff = DeepDiff(returned, expect, ignore_order=True) + assert not ddiff, ddiff + + def test_not_working_state(self, match_engine): + """ + Scenario: Missing Keys + """ + params = {Dash_Eni.ARG_NAME: "Vnet2", "namespace": ""} + m_dash_vnet = Dash_Eni(match_engine) + returned = m_dash_vnet.execute(params) + expect = create_template_dict(dbs=["APPL_DB", "ASIC_DB"]) + expect["APPL_DB"]["tables_not_found"].append("DASH_ENI_TABLE") + ddiff = DeepDiff(returned, expect, ignore_order=True) + print(returned) + assert not ddiff, ddiff diff --git a/tests/dump_tests/module_tests/dash_prefix_test.py b/tests/dump_tests/module_tests/dash_prefix_test.py new file mode 100644 index 0000000000..f2c5e3d350 --- /dev/null +++ b/tests/dump_tests/module_tests/dash_prefix_test.py @@ -0,0 +1,78 @@ +import os +import pytest +from deepdiff import DeepDiff +from dump.helper import create_template_dict, populate_mock +from dump.match_infra import MatchEngine, ConnectionPool +from swsscommon.swsscommon import SonicV2Connector +try: + from dump.plugins.dash_prefix_tag import Dash_Prefix_Tag +except ModuleNotFoundError: + pytest.skip("Skipping Dash tests since it is not supported in this Platform", allow_module_level=True) +from utilities_common.constants import DEFAULT_NAMESPACE + +# Location for dedicated db's used for UT +module_tests_path = os.path.dirname(__file__) +dump_tests_path = os.path.join(module_tests_path, "../") +tests_path = os.path.join(dump_tests_path, "../") +dump_test_input = os.path.join(tests_path, "dump_input") +dash_input_files_path = os.path.join(dump_test_input, "dash") + +# Define the mock files to read from +dedicated_dbs = {} +dedicated_dbs['APPL_DB'] = os.path.join(dash_input_files_path, "appl_db.json") +dedicated_dbs['ASIC_DB'] = os.path.join(dash_input_files_path, "asic_db.json") + + +@pytest.fixture(scope="class", autouse=True) +def match_engine(): + + print("SETUP") + os.environ["VERBOSE"] = "1" + + # Monkey Patch the SonicV2Connector Object + db = SonicV2Connector() + + # popualate the db with mock data + db_names = list(dedicated_dbs.keys()) + try: + populate_mock(db, db_names, dedicated_dbs) + except Exception as e: + assert False, "Mock initialization failed: " + str(e) + + # Initialize connection pool + conn_pool = ConnectionPool() + conn_pool.fill(DEFAULT_NAMESPACE, db, db_names) + + # Initialize match_engine + match_engine = MatchEngine(conn_pool) + yield match_engine + print("TEARDOWN") + os.environ["VERBOSE"] = "0" + + +@pytest.mark.usefixtures("match_engine") +class TestDashAclOutModule: + def test_working_state(self, match_engine): + """ + Scenario: When the appl info is properly applied and propagated + """ + params = {Dash_Prefix_Tag.ARG_NAME: "AclTagScale1798", "namespace": ""} + m_dash_prefix_tag = Dash_Prefix_Tag(match_engine) + returned = m_dash_prefix_tag.execute(params) + expect = create_template_dict(dbs=["APPL_DB"]) + expect["APPL_DB"]["keys"].append("DASH_PREFIX_TAG_TABLE:AclTagScale1798") + ddiff = DeepDiff(returned, expect, ignore_order=True) + assert not ddiff, ddiff + + def test_not_working_state(self, match_engine): + """ + Scenario: Missing Keys + """ + params = {Dash_Prefix_Tag.ARG_NAME: "AclTagScale1795", "namespace": ""} + m_dash_prefix_tag = Dash_Prefix_Tag(match_engine) + returned = m_dash_prefix_tag.execute(params) + expect = create_template_dict(dbs=["APPL_DB"]) + expect["APPL_DB"]["tables_not_found"].append("DASH_PREFIX_TAG_TABLE") + ddiff = DeepDiff(returned, expect, ignore_order=True) + print(returned) + assert not ddiff, ddiff diff --git a/tests/dump_tests/module_tests/dash_qos_test.py b/tests/dump_tests/module_tests/dash_qos_test.py new file mode 100644 index 0000000000..9079488a49 --- /dev/null +++ b/tests/dump_tests/module_tests/dash_qos_test.py @@ -0,0 +1,81 @@ +import os +import pytest +from deepdiff import DeepDiff +from dump.helper import create_template_dict, populate_mock +try: + from dump.plugins.dash_qos import Dash_Qos +except ModuleNotFoundError: + pytest.skip("Skipping Dash tests since it is not supported in this Platform", allow_module_level=True) +from dump.match_infra import MatchEngine, ConnectionPool +from swsscommon.swsscommon import SonicV2Connector +from utilities_common.constants import DEFAULT_NAMESPACE +import redis + +# Location for dedicated db's used for UT +module_tests_path = os.path.dirname(__file__) +dump_tests_path = os.path.join(module_tests_path, "../") +tests_path = os.path.join(dump_tests_path, "../") +dump_test_input = os.path.join(tests_path, "dump_input") +dash_input_files_path = os.path.join(dump_test_input, "dash") + +# Define the mock files to read from +dedicated_dbs = {} +dedicated_dbs['APPL_DB'] = os.path.join(dash_input_files_path, "appl_db.json") +dedicated_dbs['ASIC_DB'] = os.path.join(dash_input_files_path, "asic_db.json") + + +@pytest.fixture(scope="class", autouse=True) +def match_engine(): + + print("SETUP") + os.environ["VERBOSE"] = "1" + + # Monkey Patch the SonicV2Connector Object + db = SonicV2Connector() + from ...dump_tests import mock_redis + redis.Redis = mock_redis.RedisMock + + # popualate the db with mock data + db_names = list(dedicated_dbs.keys()) + try: + populate_mock(db, db_names, dedicated_dbs) + except Exception as e: + assert False, "Mock initialization failed: " + str(e) + + # Initialize connection pool + conn_pool = ConnectionPool() + conn_pool.fill(DEFAULT_NAMESPACE, db, db_names) + + # Initialize match_engine + match_engine = MatchEngine(conn_pool) + yield match_engine + print("TEARDOWN") + os.environ["VERBOSE"] = "0" + + +@pytest.mark.usefixtures("match_engine") +class TestDashQosModule: + def test_working_state(self, match_engine): + """ + Scenario: When the appl info is properly applied and propagated + """ + params = {Dash_Qos.ARG_NAME: "qos100", "namespace": ""} + m_dash_qos = Dash_Qos(match_engine) + returned = m_dash_qos.execute(params) + expect = create_template_dict(dbs=["APPL_DB"]) + expect["APPL_DB"]["keys"].append("DASH_QOS_TABLE:qos100") + ddiff = DeepDiff(returned, expect, ignore_order=True) + assert not ddiff, ddiff + + def test_not_working_state(self, match_engine): + """ + Scenario: Missing Keys + """ + params = {Dash_Qos.ARG_NAME: "qos2", "namespace": ""} + m_dash_qos = Dash_Qos(match_engine) + returned = m_dash_qos.execute(params) + expect = create_template_dict(dbs=["APPL_DB"]) + expect["APPL_DB"]["tables_not_found"].append("DASH_QOS_TABLE") + ddiff = DeepDiff(returned, expect, ignore_order=True) + print(returned) + assert not ddiff, ddiff diff --git a/tests/dump_tests/module_tests/dash_route_rule_test.py b/tests/dump_tests/module_tests/dash_route_rule_test.py new file mode 100644 index 0000000000..3ff67b436e --- /dev/null +++ b/tests/dump_tests/module_tests/dash_route_rule_test.py @@ -0,0 +1,88 @@ +import os +import pytest +from deepdiff import DeepDiff +from dump.helper import create_template_dict, populate_mock +try: + from dump.plugins.dash_route_rule import Dash_Route_Rule +except ModuleNotFoundError: + pytest.skip("Skipping Dash tests since it is not supported in this Platform", allow_module_level=True) +from dump.match_infra import MatchEngine, ConnectionPool +from swsscommon.swsscommon import SonicV2Connector +from utilities_common.constants import DEFAULT_NAMESPACE + +# Location for dedicated db's used for UT +module_tests_path = os.path.dirname(__file__) +dump_tests_path = os.path.join(module_tests_path, "../") +tests_path = os.path.join(dump_tests_path, "../") +dump_test_input = os.path.join(tests_path, "dump_input") +dash_input_files_path = os.path.join(dump_test_input, "dash") + +# Define the mock files to read from +dedicated_dbs = {} +dedicated_dbs['APPL_DB'] = os.path.join(dash_input_files_path, "appl_db.json") +dedicated_dbs['ASIC_DB'] = os.path.join(dash_input_files_path, "asic_db.json") + + +@pytest.fixture(scope="class", autouse=True) +def match_engine(): + + print("SETUP") + os.environ["VERBOSE"] = "1" + + # Monkey Patch the SonicV2Connector Object + db = SonicV2Connector() + from ...dump_tests import mock_redis + redis_obj = mock_redis.RedisMock() + redis_obj.load_file(dedicated_dbs['APPL_DB']) + + # popualate the db with mock data + db_names = list(dedicated_dbs.keys()) + try: + populate_mock(db, db_names, dedicated_dbs) + except Exception as e: + assert False, "Mock initialization failed: " + str(e) + + # Initialize connection pool + conn_pool = ConnectionPool() + conn_pool.fill(DEFAULT_NAMESPACE, db, db_names) + conn_pool.fill(DEFAULT_NAMESPACE, redis_obj, None, dash_object=True) + + # Initialize match_engine + match_engine = MatchEngine(conn_pool) + yield match_engine + print("TEARDOWN") + os.environ["VERBOSE"] = "0" + + +@pytest.mark.usefixtures("match_engine") +class TestDashRouteRuleModule: + + def test_working_state(self, match_engine): + """ + Scenario: When the appl info is properly applied and propagated + """ + params = {Dash_Route_Rule.ARG_NAME: "F4939FEFC47E:2000:10.0.2.0/24", "namespace": ""} + m_dash_route_rule = Dash_Route_Rule(match_engine) + returned = m_dash_route_rule.execute(params) + print(returned) + expect = create_template_dict(dbs=["APPL_DB", "ASIC_DB"]) + expect["APPL_DB"]["keys"].append("DASH_ROUTE_RULE_TABLE:F4939FEFC47E:2000:10.0.2.0/24") + expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_INBOUND_ROUTING_ENTRY:" + "{\"eni_id\":\"oid:0x73000000000023\",\"priority\":" + "\"1\",\"sip\":\"10.0.2.0\",\"sip_mask\":\"255.255.255.0\"" + ",\"switch_id\":\"oid:0x21000000000000\",\"vni\":\"2000\"}") + ddiff = DeepDiff(returned, expect, ignore_order=True) + assert not ddiff, ddiff + + def test_not_working_state(self, match_engine): + """ + Scenario: Missing Keys + """ + params = {Dash_Route_Rule.ARG_NAME: "F4939FEFC47E:2000:10.0.5.0/24", "namespace": ""} + m_dash_route_rule = Dash_Route_Rule(match_engine) + returned = m_dash_route_rule.execute(params) + expect = create_template_dict(dbs=["APPL_DB", "ASIC_DB"]) + expect["APPL_DB"]["tables_not_found"].append("DASH_ROUTE_RULE_TABLE") + ddiff = DeepDiff(returned, expect, ignore_order=True) + print(returned) + assert not ddiff, ddiff diff --git a/tests/dump_tests/module_tests/dash_route_test.py b/tests/dump_tests/module_tests/dash_route_test.py new file mode 100644 index 0000000000..0140a2db02 --- /dev/null +++ b/tests/dump_tests/module_tests/dash_route_test.py @@ -0,0 +1,99 @@ +import os +import pytest +from deepdiff import DeepDiff +from dump.helper import create_template_dict, populate_mock +try: + from dump.plugins.dash_route import Dash_Route +except ModuleNotFoundError: + pytest.skip("Skipping Dash tests since it is not supported in this Platform", allow_module_level=True) +from dump.match_infra import MatchEngine, ConnectionPool +from swsscommon.swsscommon import SonicV2Connector +from utilities_common.constants import DEFAULT_NAMESPACE + + +# Location for dedicated db's used for UT +module_tests_path = os.path.dirname(__file__) +dump_tests_path = os.path.join(module_tests_path, "../") +tests_path = os.path.join(dump_tests_path, "../") +dump_test_input = os.path.join(tests_path, "dump_input") +dash_input_files_path = os.path.join(dump_test_input, "dash") + +# Define the mock files to read from +dedicated_dbs = {} +dedicated_dbs['APPL_DB'] = os.path.join(dash_input_files_path, "appl_db.json") +dedicated_dbs['ASIC_DB'] = os.path.join(dash_input_files_path, "asic_db.json") + + +@pytest.fixture(scope="class", autouse=True) +def match_engine(): + + print("SETUP") + os.environ["VERBOSE"] = "1" + + # Monkey Patch the SonicV2Connector Object + db = SonicV2Connector() + from ...dump_tests import mock_redis + redis_obj = mock_redis.RedisMock() + redis_obj.load_file(dedicated_dbs['APPL_DB']) + + # popualate the db with mock data + db_names = list(dedicated_dbs.keys()) + try: + populate_mock(db, db_names, dedicated_dbs) + except Exception as e: + assert False, "Mock initialization failed: " + str(e) + + # Initialize connection pool + conn_pool = ConnectionPool() + conn_pool.fill(DEFAULT_NAMESPACE, db, db_names) + conn_pool.fill(DEFAULT_NAMESPACE, redis_obj, None, dash_object=True) + + # Initialize match_engine + match_engine = MatchEngine(conn_pool) + yield match_engine + print("TEARDOWN") + os.environ["VERBOSE"] = "0" + + +@pytest.mark.usefixtures("match_engine") +class TestDashRouteModule: + def test_missing_asic_state(self, match_engine): + """ + Scenario: When the appl info is properly applied and propagated to ASIC_DB + """ + params = {Dash_Route.ARG_NAME: "eni0:12.1.1.0/24", "namespace": ""} + m_dash_route = Dash_Route(match_engine) + returned = m_dash_route.execute(params) + expect = create_template_dict(dbs=["APPL_DB", "ASIC_DB"]) + expect["APPL_DB"]["keys"].append("DASH_ROUTE_TABLE:eni0:12.1.1.0/24") + expect["ASIC_DB"]["tables_not_found"].append("ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_ENTRY") + ddiff = DeepDiff(returned, expect, ignore_order=True) + assert not ddiff, ddiff + + def test_working_state(self, match_engine): + """ + Scenario: When the appl info is properly applied but not propagated to ASIC_DB + """ + params = {Dash_Route.ARG_NAME: "F4939FEFC47E:20.2.2.0/24", "namespace": ""} + m_dash_route = Dash_Route(match_engine) + returned = m_dash_route.execute(params) + expect = create_template_dict(dbs=["APPL_DB", "ASIC_DB"]) + expect["APPL_DB"]["keys"].append("DASH_ROUTE_TABLE:F4939FEFC47E:20.2.2.0/24") + expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_OUTBOUND_ROUTING_ENTRY" + ":{\"destination\":\"20.2.2.0/24\",\"eni_id\":\"oid:" + "0x73000000000023\",\"switch_id\":\"oid:0x21000000000000\"}") + ddiff = DeepDiff(returned, expect, ignore_order=True) + assert not ddiff, ddiff + + def test_not_working_state(self, match_engine): + """ + Scenario: Missing Keys + """ + params = {Dash_Route.ARG_NAME: "eni0:12.2.0./24", "namespace": ""} + m_dash_route = Dash_Route(match_engine) + returned = m_dash_route.execute(params) + expect = create_template_dict(dbs=["APPL_DB", "ASIC_DB"]) + expect["APPL_DB"]["tables_not_found"].append("DASH_ROUTE_TABLE") + ddiff = DeepDiff(returned, expect, ignore_order=True) + print(returned) + assert not ddiff, ddiff diff --git a/tests/dump_tests/module_tests/dash_vnet_mapping_test.py b/tests/dump_tests/module_tests/dash_vnet_mapping_test.py new file mode 100644 index 0000000000..65e1b10abd --- /dev/null +++ b/tests/dump_tests/module_tests/dash_vnet_mapping_test.py @@ -0,0 +1,81 @@ +import os +import pytest +from deepdiff import DeepDiff +from dump.helper import create_template_dict, populate_mock +try: + from dump.plugins.dash_vnet_mapping import Dash_Vnet_mapping +except ModuleNotFoundError: + pytest.skip("Skipping Dash tests since it is not supported in this Platform", allow_module_level=True) +from dump.match_infra import MatchEngine, ConnectionPool +from swsscommon.swsscommon import SonicV2Connector +from utilities_common.constants import DEFAULT_NAMESPACE +import redis + +# Location for dedicated db's used for UT +module_tests_path = os.path.dirname(__file__) +dump_tests_path = os.path.join(module_tests_path, "../") +tests_path = os.path.join(dump_tests_path, "../") +dump_test_input = os.path.join(tests_path, "dump_input") +dash_input_files_path = os.path.join(dump_test_input, "dash") + +# Define the mock files to read from +dedicated_dbs = {} +dedicated_dbs['APPL_DB'] = os.path.join(dash_input_files_path, "appl_db.json") +dedicated_dbs['ASIC_DB'] = os.path.join(dash_input_files_path, "asic_db.json") + + +@pytest.fixture(scope="class", autouse=True) +def match_engine(): + + print("SETUP") + os.environ["VERBOSE"] = "1" + + # Monkey Patch the SonicV2Connector Object + db = SonicV2Connector() + from ...dump_tests import mock_redis + redis.Redis = mock_redis.RedisMock + + # popualate the db with mock data + db_names = list(dedicated_dbs.keys()) + try: + populate_mock(db, db_names, dedicated_dbs) + except Exception as e: + assert False, "Mock initialization failed: " + str(e) + + # Initialize connection pool + conn_pool = ConnectionPool() + conn_pool.fill(DEFAULT_NAMESPACE, db, db_names) + + # Initialize match_engine + match_engine = MatchEngine(conn_pool) + yield match_engine + print("TEARDOWN") + os.environ["VERBOSE"] = "0" + + +@pytest.mark.usefixtures("match_engine") +class TestDashVnetMappingModule: + def test_working_state(self, match_engine): + """ + Scenario: When the appl info is properly applied and propagated + """ + params = {Dash_Vnet_mapping.ARG_NAME: "Vnet1:12.1.1.1", "namespace": ""} + m_dash_vnet_mapping = Dash_Vnet_mapping(match_engine) + returned = m_dash_vnet_mapping.execute(params) + expect = create_template_dict(dbs=["APPL_DB"]) + expect["APPL_DB"]["keys"].append("DASH_VNET_MAPPING_TABLE:Vnet1:12.1.1.1") + ddiff = DeepDiff(returned, expect, ignore_order=True) + assert not ddiff, ddiff + + def test_not_working_state(self, match_engine): + """ + Scenario: Missing Keys + """ + params = {Dash_Vnet_mapping.ARG_NAME: "Vnet2:12.1.1.1", "namespace": ""} + m_dash_vnet_mapping = Dash_Vnet_mapping(match_engine) + returned = m_dash_vnet_mapping.execute(params) + expect = create_template_dict(dbs=["APPL_DB"]) + expect["APPL_DB"]["tables_not_found"].append("DASH_VNET_MAPPING_TABLE") + ddiff = DeepDiff(returned, expect, ignore_order=True) + print(returned) + assert not ddiff, ddiff diff --git a/tests/dump_tests/module_tests/dash_vnet_test.py b/tests/dump_tests/module_tests/dash_vnet_test.py new file mode 100644 index 0000000000..ac7adbfc78 --- /dev/null +++ b/tests/dump_tests/module_tests/dash_vnet_test.py @@ -0,0 +1,95 @@ +import os +import pytest +from deepdiff import DeepDiff +from dump.helper import create_template_dict, populate_mock +try: + from dump.plugins.dash_vnet import Dash_Vnet +except ModuleNotFoundError: + pytest.skip("Skipping Dash tests since it is not supported in this Platform", allow_module_level=True) +from dump.match_infra import MatchEngine, ConnectionPool +from swsscommon.swsscommon import SonicV2Connector +from utilities_common.constants import DEFAULT_NAMESPACE + +# Location for dedicated db's used for UT +module_tests_path = os.path.dirname(__file__) +dump_tests_path = os.path.join(module_tests_path, "../") +tests_path = os.path.join(dump_tests_path, "../") +dump_test_input = os.path.join(tests_path, "dump_input") +dash_input_files_path = os.path.join(dump_test_input, "dash") + +# Define the mock files to read from +dedicated_dbs = {} +dedicated_dbs['APPL_DB'] = os.path.join(dash_input_files_path, "appl_db.json") +dedicated_dbs['ASIC_DB'] = os.path.join(dash_input_files_path, "asic_db.json") + + +@pytest.fixture(scope="class", autouse=True) +def match_engine(): + + print("SETUP") + os.environ["VERBOSE"] = "1" + + # Monkey Patch the SonicV2Connector Object + db = SonicV2Connector() + from ...dump_tests import mock_redis + redis_obj = mock_redis.RedisMock() + redis_obj.load_file(dedicated_dbs['APPL_DB']) + + # popualate the db with mock data + db_names = list(dedicated_dbs.keys()) + try: + populate_mock(db, db_names, dedicated_dbs) + except Exception as e: + assert False, "Mock initialization failed: " + str(e) + + # Initialize connection pool + conn_pool = ConnectionPool() + conn_pool.fill(DEFAULT_NAMESPACE, db, db_names) + conn_pool.fill(DEFAULT_NAMESPACE, redis_obj, None, dash_object=True) + + # Initialize match_engine + match_engine = MatchEngine(conn_pool) + yield match_engine + print("TEARDOWN") + os.environ["VERBOSE"] = "0" + + +@pytest.mark.usefixtures("match_engine") +class TestDashVnetModule: + def test_working_state(self, match_engine): + """ + Scenario: When the appl info is properly applied and propagated + """ + params = {Dash_Vnet.ARG_NAME: "Vnet1", "namespace": ""} + m_dash_vnet = Dash_Vnet(match_engine) + returned = m_dash_vnet.execute(params) + expect = create_template_dict(dbs=["APPL_DB", "ASIC_DB"]) + expect["APPL_DB"]["keys"].append("DASH_VNET_TABLE:Vnet1") + expect["ASIC_DB"]["keys"].append("ASIC_STATE:SAI_OBJECT_TYPE_VNET:oid:0x7a000000000021") + ddiff = DeepDiff(returned, expect, ignore_order=True) + assert not ddiff, ddiff + + def test_absent_asic_keys(self, match_engine): + """ + Scenario: Missing ASIC_DB Keys + """ + params = {Dash_Vnet.ARG_NAME: "Vnet2", "namespace": ""} + m_dash_vnet = Dash_Vnet(match_engine) + returned = m_dash_vnet.execute(params) + expect = create_template_dict(dbs=["APPL_DB", "ASIC_DB"]) + expect["APPL_DB"]["keys"].append("DASH_VNET_TABLE:Vnet2") + expect["ASIC_DB"]["tables_not_found"].append("ASIC_STATE:SAI_OBJECT_TYPE_VNET") + ddiff = DeepDiff(returned, expect, ignore_order=True) + assert not ddiff, ddiff + + def test_not_working_state(self, match_engine): + """ + Scenario: Missing Entries + """ + params = {Dash_Vnet.ARG_NAME: "Vnet3", "namespace": ""} + m_dash_vnet = Dash_Vnet(match_engine) + returned = m_dash_vnet.execute(params) + expect = create_template_dict(dbs=["APPL_DB", "ASIC_DB"]) + expect["APPL_DB"]["tables_not_found"].append("DASH_VNET_TABLE") + ddiff = DeepDiff(returned, expect, ignore_order=True) + assert not ddiff, ddiff diff --git a/tests/generic_config_updater/change_applier_test.py b/tests/generic_config_updater/change_applier_test.py index 7aad111f18..6a8926f013 100644 --- a/tests/generic_config_updater/change_applier_test.py +++ b/tests/generic_config_updater/change_applier_test.py @@ -72,28 +72,25 @@ def debug_print(msg): print(msg) - -# Mimics os.system call for sonic-cfggen -d --print-data > filename +# Mimics os.system call for `sonic-cfggen -d --print-data` output def subprocess_Popen_cfggen(cmd, *args, **kwargs): global running_config - # Extract file name from kwargs if 'stdout' is a file object - stdout = kwargs.get('stdout') - if hasattr(stdout, 'name'): - fname = stdout.name + stdout = kwargs.get('stdout', None) + + if stdout is None: + output = json.dumps(running_config, indent=4) + elif isinstance(stdout, int) and stdout == -1: + output = json.dumps(running_config, indent=4) else: - raise ValueError("stdout is not a file") + raise ValueError("stdout must be set to subprocess.PIPE or omitted for capturing output") - # Write the running configuration to the file specified in stdout - with open(fname, "w") as s: - json.dump(running_config, s, indent=4) - class MockPopen: def __init__(self): - self.returncode = 0 # Simulate successful command execution + self.returncode = 0 def communicate(self): - return "", "" # Simulate empty stdout and stderr + return output.encode(), "".encode() return MockPopen() @@ -225,7 +222,7 @@ def vlan_validate(old_cfg, new_cfg, keys): class TestChangeApplier(unittest.TestCase): - @patch("generic_config_updater.change_applier.subprocess.Popen") + @patch("generic_config_updater.gu_common.subprocess.Popen") @patch("generic_config_updater.change_applier.get_config_db") @patch("generic_config_updater.change_applier.set_config") def test_change_apply(self, mock_set, mock_db, mock_subprocess_Popen): diff --git a/tests/generic_config_updater/gcu_feature_patch_application_test.py b/tests/generic_config_updater/gcu_feature_patch_application_test.py index db625e8cd1..27d9ebf216 100644 --- a/tests/generic_config_updater/gcu_feature_patch_application_test.py +++ b/tests/generic_config_updater/gcu_feature_patch_application_test.py @@ -6,13 +6,15 @@ from mock import patch import generic_config_updater.change_applier +import generic_config_updater.gu_common import generic_config_updater.patch_sorter as ps import generic_config_updater.generic_updater as gu from .gutest_helpers import Files from generic_config_updater.gu_common import ConfigWrapper, PatchWrapper running_config = {} - + + def set_entry(config_db, tbl, key, data): global running_config if data != None: @@ -26,9 +28,11 @@ def set_entry(config_db, tbl, key, data): if not running_config[tbl]: running_config.pop(tbl) -def get_running_config(): + +def get_running_config(scope="localhost"): return running_config + class TestFeaturePatchApplication(unittest.TestCase): def setUp(self): self.config_wrapper = ConfigWrapper() @@ -87,13 +91,13 @@ def create_patch_applier(self, config): config_wrapper = self.config_wrapper config_wrapper.get_config_db_as_json = MagicMock(side_effect=get_running_config) change_applier = generic_config_updater.change_applier.ChangeApplier() - change_applier._get_running_config = MagicMock(side_effect=get_running_config) patch_wrapper = PatchWrapper(config_wrapper) return gu.PatchApplier(config_wrapper=config_wrapper, patch_wrapper=patch_wrapper, changeapplier=change_applier) + @patch('generic_config_updater.change_applier.get_config_db_as_json', side_effect=get_running_config) @patch("generic_config_updater.change_applier.get_config_db") @patch("generic_config_updater.change_applier.set_config") - def run_single_success_case_applier(self, data, mock_set, mock_db): + def run_single_success_case_applier(self, data, mock_set, mock_db, mock_get_config_db_as_json): current_config = data["current_config"] expected_config = data["expected_config"] patch = jsonpatch.JsonPatch(data["patch"]) @@ -121,7 +125,8 @@ def run_single_success_case_applier(self, data, mock_set, mock_db): self.assertEqual(simulated_config, expected_config) @patch("generic_config_updater.change_applier.get_config_db") - def run_single_failure_case_applier(self, data, mock_db): + @patch('generic_config_updater.change_applier.get_config_db_as_json', side_effect=get_running_config) + def run_single_failure_case_applier(self, data, mock_db, mock_get_config_db_as_json): current_config = data["current_config"] patch = jsonpatch.JsonPatch(data["patch"]) expected_error_substrings = data["expected_error_substrings"] diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py index 21f50e0b7b..4a16a5ca4f 100644 --- a/tests/generic_config_updater/gu_common_test.py +++ b/tests/generic_config_updater/gu_common_test.py @@ -361,13 +361,6 @@ def test_validate_lanes__same_valid_lanes_multi_ports_no_spaces__failure(self): }} self.validate_lanes(config, '67') - def test_validate_lanes_default_value_duplicate_check(self): - config = {"PORT": { - "Ethernet0": {"lanes": "0", "speed": "10000"}, - "Ethernet1": {"lanes": "0", "speed": "10000"}, - }} - self.validate_lanes(config) - def validate_lanes(self, config_db, expected_error=None): # Arrange config_wrapper = gu_common.ConfigWrapper() diff --git a/tests/generic_config_updater/multiasic_change_applier_test.py b/tests/generic_config_updater/multiasic_change_applier_test.py index 0102cfff00..743969737d 100644 --- a/tests/generic_config_updater/multiasic_change_applier_test.py +++ b/tests/generic_config_updater/multiasic_change_applier_test.py @@ -1,12 +1,38 @@ +import jsonpointer import unittest from importlib import reload from unittest.mock import patch, MagicMock from generic_config_updater.generic_updater import extract_scope +from generic_config_updater.generic_updater import GenericConfigUpdaterError import generic_config_updater.change_applier import generic_config_updater.services_validator import generic_config_updater.gu_common +def mock_get_running_config_side_effect(scope): + print(f"mocked_value_for_{scope}") + return { + "tables": { + "ACL_TABLE": { + "services_to_validate": ["aclservice"], + "validate_commands": ["acl_loader show table"] + }, + "PORT": { + "services_to_validate": ["portservice"], + "validate_commands": ["show interfaces status"] + } + }, + "services": { + "aclservice": { + "validate_commands": ["acl_loader show table"] + }, + "portservice": { + "validate_commands": ["show interfaces status"] + } + } + } + + class TestMultiAsicChangeApplier(unittest.TestCase): @patch('sonic_py_common.multi_asic.is_multi_asic') @@ -25,6 +51,12 @@ def test_extract_scope_multiasic(self, mock_is_multi_asic): "/asic0123456789/PORTCHANNEL/PortChannel102/admin_status": ( True, "asic0123456789", "/PORTCHANNEL/PortChannel102/admin_status" ), + "/asic1/PORTCHANNEL_INTERFACE/PortChannel106|10.0.0.6/31": ( + True, "asic1", "/PORTCHANNEL_INTERFACE/PortChannel106|10.0.0.6/31" + ), + "/asic1/PORTCHANNEL_INTERFACE/PortChannel106|10.0.0.6~131": ( + True, "asic1", "/PORTCHANNEL_INTERFACE/PortChannel106|10.0.0.6~131" + ), "/localhost/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( True, "localhost", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" ), @@ -71,7 +103,11 @@ def test_extract_scope_multiasic(self, mock_is_multi_asic): scope, remainder = extract_scope(test_path) assert(scope == expectedscope) assert(remainder == expectedremainder) - except Exception: + except AssertionError: + assert(not result) + except GenericConfigUpdaterError: + assert(not result) + except jsonpointer.JsonPointerException: assert(not result) @patch('sonic_py_common.multi_asic.is_multi_asic') @@ -134,10 +170,14 @@ def test_extract_scope_singleasic(self, mock_is_multi_asic): scope, remainder = extract_scope(test_path) assert(scope == expectedscope) assert(remainder == expectedremainder) - except Exception: + except AssertionError: + assert(not result) + except GenericConfigUpdaterError: + assert(not result) + except jsonpointer.JsonPointerException: assert(not result) - @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) + @patch('generic_config_updater.change_applier.get_config_db_as_json', autospec=True) @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) def test_apply_change_default_scope(self, mock_ConfigDBConnector, mock_get_running_config): # Setup mock for ConfigDBConnector @@ -145,26 +185,7 @@ def test_apply_change_default_scope(self, mock_ConfigDBConnector, mock_get_runni mock_ConfigDBConnector.return_value = mock_db # Setup mock for json.load to return some running configuration - mock_get_running_config.return_value = { - "tables": { - "ACL_TABLE": { - "services_to_validate": ["aclservice"], - "validate_commands": ["acl_loader show table"] - }, - "PORT": { - "services_to_validate": ["portservice"], - "validate_commands": ["show interfaces status"] - } - }, - "services": { - "aclservice": { - "validate_commands": ["acl_loader show table"] - }, - "portservice": { - "validate_commands": ["show interfaces status"] - } - } - } + mock_get_running_config.side_effect = mock_get_running_config_side_effect # Instantiate ChangeApplier with the default scope applier = generic_config_updater.change_applier.ChangeApplier() @@ -178,34 +199,13 @@ def test_apply_change_default_scope(self, mock_ConfigDBConnector, mock_get_runni # Assert ConfigDBConnector called with the correct namespace mock_ConfigDBConnector.assert_called_once_with(use_unix_socket_path=True, namespace="") - @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) + @patch('generic_config_updater.change_applier.get_config_db_as_json', autospec=True) @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) def test_apply_change_given_scope(self, mock_ConfigDBConnector, mock_get_running_config): # Setup mock for ConfigDBConnector mock_db = MagicMock() mock_ConfigDBConnector.return_value = mock_db - - # Setup mock for json.load to return some running configuration - mock_get_running_config.return_value = { - "tables": { - "ACL_TABLE": { - "services_to_validate": ["aclservice"], - "validate_commands": ["acl_loader show table"] - }, - "PORT": { - "services_to_validate": ["portservice"], - "validate_commands": ["show interfaces status"] - } - }, - "services": { - "aclservice": { - "validate_commands": ["acl_loader show table"] - }, - "portservice": { - "validate_commands": ["show interfaces status"] - } - } - } + mock_get_running_config.side_effect = mock_get_running_config_side_effect # Instantiate ChangeApplier with the default scope applier = generic_config_updater.change_applier.ChangeApplier(scope="asic0") @@ -219,7 +219,7 @@ def test_apply_change_given_scope(self, mock_ConfigDBConnector, mock_get_running # Assert ConfigDBConnector called with the correct scope mock_ConfigDBConnector.assert_called_once_with(use_unix_socket_path=True, namespace="asic0") - @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) + @patch('generic_config_updater.change_applier.get_config_db_as_json', autospec=True) @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) def test_apply_change_failure(self, mock_ConfigDBConnector, mock_get_running_config): # Setup mock for ConfigDBConnector @@ -241,7 +241,7 @@ def test_apply_change_failure(self, mock_ConfigDBConnector, mock_get_running_con self.assertTrue('Failed to get running config' in str(context.exception)) - @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) + @patch('generic_config_updater.change_applier.get_config_db_as_json', autospec=True) @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) def test_apply_patch_with_empty_tables_failure(self, mock_ConfigDBConnector, mock_get_running_config): # Setup mock for ConfigDBConnector @@ -249,14 +249,17 @@ def test_apply_patch_with_empty_tables_failure(self, mock_ConfigDBConnector, moc mock_ConfigDBConnector.return_value = mock_db # Setup mock for json.load to simulate configuration where crucial tables are unexpectedly empty - mock_get_running_config.return_value = { - "tables": { - # Simulate empty tables or missing crucial configuration - }, - "services": { - # Normally, services would be listed here + def mock_get_empty_running_config_side_effect(): + return { + "tables": { + # Simulate empty tables or missing crucial configuration + }, + "services": { + # Normally, services would be listed here + } } - } + + mock_get_running_config.side_effect = mock_get_empty_running_config_side_effect # Instantiate ChangeApplier with a specific scope to simulate applying changes in a multi-asic environment applier = generic_config_updater.change_applier.ChangeApplier(scope="asic0") diff --git a/tests/mock_tables/appl_db.json b/tests/mock_tables/appl_db.json index e967caa758..d755f46428 100644 --- a/tests/mock_tables/appl_db.json +++ b/tests/mock_tables/appl_db.json @@ -305,6 +305,40 @@ "type": "dynamic", "vni": "200" }, + "_STP_VLAN_TABLE:Vlan500": { + "bridge_id": "8064b86a97e24e9c", + "max_age": "20", + "hello_time": "2", + "forward_delay": "15", + "hold_time": "1", + "root_bridge_id": "0064b86a97e24e9c", + "root_path_cost": "600", + "desig_bridge_id": "806480a235f281ec", + "root_port": "Root", + "root_max_age": "20", + "root_hello_time": "2", + "root_forward_delay": "15", + "stp_instance": "0", + "topology_change_count": "1", + "last_topology_change": "0" + }, + "_STP_VLAN_PORT_TABLE:Vlan500:Ethernet4": { + "port_num": "4", + "priority": "128", + "path_cost": "200", + "port_state": "FORWARDING", + "desig_cost": "400", + "desig_root": "0064b86a97e24e9c", + "desig_bridge": "806480a235f281ec", + "desig_port": "4", + "bpdu_sent": "10", + "bpdu_received": "15", + "config_bpdu_sent": "10", + "config_bpdu_received": "2", + "tc_sent": "15", + "tc_received": "5", + "root_guard_timer": "0" + }, "MUX_CABLE_TABLE:Ethernet32": { "state": "active" }, diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 187efed553..3deca74255 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -2798,5 +2798,28 @@ "dhcpv6_servers": [ "fc02:2000::1" ] - } + }, + "STP|GLOBAL": { + "forward_delay": "15", + "hello_time": "2", + "max_age": "20", + "mode": "pvst", + "priority": "32768", + "rootguard_timeout": "30" + }, + "STP_PORT|Ethernet4": { + "bpdu_guard": "true", + "bpdu_guard_do_disable": "false", + "enabled": "true", + "portfast": "true", + "root_guard": "true", + "uplink_fast": "false" + }, + "STP_VLAN|Vlan500": { + "enabled": "true", + "forward_delay": "15", + "hello_time": "2", + "max_age": "20", + "priority": "32768" + } } diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 49ffaeedd8..bad7882cb6 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -1681,5 +1681,8 @@ }, "PORT_CAPACITY_TABLE|PORT_CAPACITY_DATA" : { "capacity": "80000" + }, + "STP_TABLE|GLOBAL": { + "max_stp_inst": "510" } } diff --git a/tests/mocked_libs/__init__.py b/tests/mocked_libs/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/mocked_libs/blkinfo.py b/tests/mocked_libs/blkinfo.py new file mode 100644 index 0000000000..6d5d809837 --- /dev/null +++ b/tests/mocked_libs/blkinfo.py @@ -0,0 +1,90 @@ +mock_json_op = \ + [ + { + "name": "sdx", + "kname": "sdx", + "fstype": "", + "label": "", + "mountpoint": "", + "size": "3965714432", + "maj:min": "8:0", + "rm": "0", + "model": "SMART EUSB", + "vendor": "SMART EUSB", + "serial": "SPG200807J1", + "hctl": "2:0:0:0", + "tran": "usb", + "rota": "1", + "type": "disk", + "ro": "0", + "owner": "", + "group": "", + "mode": "brw-rw----", + "children": [ + { + "name": "sdx1", + "kname": "sdx1", + "fstype": "ext4", + "label": "", + "mountpoint": "/host", + "size": "3964665856", + "maj:min": "8:1", + "rm": "0", + "model": " ", + "vendor": " ", + "serial": "", + "hctl": "", + "tran": "", + "rota": "1", + "type": "part", + "ro": "0", + "owner": "", + "group": "", + "mode": "brw-rw----", + "children": [], + "parents": ["sdx"], + "statistics": { + "major": "8", + "minor": "1", + "kname": "sdx1", + "reads_completed": "22104", + "reads_merged": "5299", + "sectors_read": "1091502", + "time_spent_reading_ms": "51711", + "writes_completed": "11283", + "writes_merged": "13401", + "sectors_written": "443784", + "time_spent_ writing": "133398", + "ios_in_progress": "0", + "time_spent_doing_ios_ms": "112040", + "weighted_time_ios_ms": "112040", + }, + } + ], + "parents": [], + "statistics": { + "major": "8", + "minor": "0", + "kname": "sdx", + "reads_completed": "22151", + "reads_merged": "5299", + "sectors_read": "1093606", + "time_spent_reading_ms": "52005", + "writes_completed": "11283", + "writes_merged": "13401", + "sectors_written": "443784", + "time_spent_ writing": "133398", + "ios_in_progress": "0", + "time_spent_doing_ios_ms": "112220", + "weighted_time_ios_ms": "112220", + }, + } + ] + + +class BlkDiskInfo: + def __init__(self): + return + + def get_disks(self, filters): + return mock_json_op diff --git a/tests/mocked_libs/psutil.py b/tests/mocked_libs/psutil.py new file mode 100644 index 0000000000..f43f024d1c --- /dev/null +++ b/tests/mocked_libs/psutil.py @@ -0,0 +1,6 @@ +from collections import namedtuple + + +def disk_partitions(): + sdiskpart = namedtuple('sdiskpart', ['mountpoint', 'device']) + return [sdiskpart(mountpoint="/host", device="/dev/sdx1")] diff --git a/tests/portchannel_test.py b/tests/portchannel_test.py index 9b8bf56863..d1223bd771 100644 --- a/tests/portchannel_test.py +++ b/tests/portchannel_test.py @@ -34,7 +34,7 @@ def test_add_portchannel_with_invalid_name_yang_validation(self): print(result.output) assert result.exit_code != 0 assert "Error: PortChan005 is invalid!, name should have prefix 'PortChannel' and suffix '<0-9999>'" in result.output - + def test_add_portchannel_with_invalid_name_adhoc_validation(self): config.ADHOC_VALIDATION = True runner = CliRunner() @@ -46,7 +46,15 @@ def test_add_portchannel_with_invalid_name_adhoc_validation(self): print(result.exit_code) print(result.output) assert result.exit_code != 0 - assert "Error: PortChan005 is invalid!, name should have prefix 'PortChannel' and suffix '<0-9999>'" in result.output + assert "Error: PortChan005 is invalid!, name should have prefix 'PortChannel' and suffix '<0-9999>' " \ + "and its length should not exceed 15 characters" in result.output + + result = runner.invoke(config.config.commands["portchannel"].commands["add"], ["PortChanl00000"], obj=obj) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Error: PortChanl00000 is invalid!, name should have prefix 'PortChannel' and suffix '<0-9999>' and " \ + "its length should not exceed 15 characters" in result.output @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=JsonPatchConflict)) @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) @@ -119,7 +127,7 @@ def test_add_portchannel_with_invalid_fast_rate(self, fast_rate): runner = CliRunner() db = Db() obj = {'db':db.cfgdb} - + # add a portchannel with invalid fats rate result = runner.invoke(config.config.commands["portchannel"].commands["add"], ["PortChannel0005", "--fast-rate", fast_rate], obj=obj) print(result.exit_code) diff --git a/tests/portstat_test.py b/tests/portstat_test.py index 9c6f94d96a..e7499b94fd 100644 --- a/tests/portstat_test.py +++ b/tests/portstat_test.py @@ -35,11 +35,11 @@ """ intf_fec_counters = """\ - IFACE STATE FEC_CORR FEC_UNCORR FEC_SYMBOL_ERR ---------- ------- ---------- ------------ ---------------- -Ethernet0 D 130,402 3 4 -Ethernet4 N/A 110,412 1 0 -Ethernet8 N/A 100,317 0 0 + IFACE STATE FEC_CORR FEC_UNCORR FEC_SYMBOL_ERR FEC_PRE_BER FEC_POST_BER +--------- ------- ---------- ------------ ---------------- ------------- -------------- +Ethernet0 D 130,402 3 4 N/A N/A +Ethernet4 N/A 110,412 1 0 N/A N/A +Ethernet8 N/A 100,317 0 0 N/A N/A """ intf_fec_counters_fec_hist = """\ diff --git a/tests/show_bmp_test.py b/tests/show_bmp_test.py index c0bc556d10..1e81966583 100644 --- a/tests/show_bmp_test.py +++ b/tests/show_bmp_test.py @@ -27,8 +27,8 @@ def test_show_bmp_neighbor_table(self): {"peer_addr": "10.0.0.61", "peer_asn": "64915", "peer_rd": "300", - "peer_port": "5000", - "local_addr": "10.1.0.32", + "remote_port": "5000", + "local_ip": "10.1.0.32", "local_asn": "65100", "local_port": "6000", "sent_cap": "supports-mpbgp,supports-graceful-restart", @@ -38,8 +38,8 @@ def test_show_bmp_neighbor_table(self): {"peer_addr": "10.0.0.62", "peer_asn": "64915", "peer_rd": "300", - "peer_port": "5000", - "local_addr": "10.1.0.32", + "remote_port": "5000", + "local_ip": "10.1.0.32", "local_asn": "65100", "local_port": "6000", "sent_cap": "supports-mpbgp,supports-graceful-restart", diff --git a/tests/show_test.py b/tests/show_test.py index d81192367a..819f197343 100644 --- a/tests/show_test.py +++ b/tests/show_test.py @@ -1040,6 +1040,12 @@ def test_show_ztp(self, mock_run_command): assert result.exit_code == 0 mock_run_command.assert_called_with(['ztp', 'status', '--verbose'], display_cmd=True) + @patch('show.main.run_command') + def test_show_banner(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['banner']) + assert result.exit_code == 0 + def teardown(self): print('TEAR DOWN') diff --git a/tests/sonic_package_manager/test_manager.py b/tests/sonic_package_manager/test_manager.py index a3a311ebb2..26e838ce6d 100644 --- a/tests/sonic_package_manager/test_manager.py +++ b/tests/sonic_package_manager/test_manager.py @@ -324,7 +324,7 @@ def test_manager_installation_version_range(package_manager): package_manager.install(f'test-package>=1.6.0') -def test_manager_upgrade(package_manager, sonic_fs): +def test_manager_upgrade(package_manager, sonic_fs, mock_run_command): package_manager.install('test-package-6=1.5.0') package = package_manager.get_installed_package('test-package-6') @@ -333,6 +333,15 @@ def test_manager_upgrade(package_manager, sonic_fs): assert upgraded_package.entry.version == Version.parse('2.0.0') assert upgraded_package.entry.default_reference == package.entry.default_reference + mock_run_command.assert_has_calls( + [ + call(['systemctl', 'stop', 'test-package-6']), + call(['systemctl', 'disable', 'test-package-6']), + call(['systemctl', 'enable', 'test-package-6']), + call(['systemctl', 'start', 'test-package-6']), + ] + ) + def test_manager_package_reset(package_manager, sonic_fs): package_manager.install('test-package-6=1.5.0') @@ -370,7 +379,7 @@ def __init__(self, dockerd_sock): class Image: def __init__(self, image_id): self.image_id = image_id - + def save(self, named): return ["named: {}".format(named).encode()] diff --git a/tests/ssdutil_test.py b/tests/ssdutil_test.py index bd57b0cbe7..dc27526ea7 100644 --- a/tests/ssdutil_test.py +++ b/tests/ssdutil_test.py @@ -1,8 +1,21 @@ +import os import sys import argparse from unittest.mock import patch, MagicMock import sonic_platform_base # noqa: F401 +tests_path = os.path.dirname(os.path.abspath(__file__)) + +# Add mocked_libs path so that the file under test +# can load mocked modules from there +mocked_libs_path = os.path.join(tests_path, "mocked_libs") # noqa: E402,F401 +sys.path.insert(0, mocked_libs_path) + +from .mocked_libs import psutil # noqa: E402,F401 +from .mocked_libs.blkinfo import BlkDiskInfo # noqa: E402,F401 + +sys.modules['os.stat'] = MagicMock() +sys.modules['os.major'] = MagicMock(return_value=8) sys.modules['sonic_platform'] = MagicMock() sys.modules['sonic_platform_base.sonic_ssd.ssd_generic'] = MagicMock() @@ -32,8 +45,33 @@ def get_vendor_output(self): class TestSsdutil: + @patch('os.geteuid', MagicMock(return_value=0)) + @patch('os.stat', MagicMock(st_rdev=2049)) + @patch('os.major', MagicMock(return_value=8)) + def test_get_default_disk(self): + (default_device, disk_type) = ssdutil.get_default_disk() + + assert default_device == "/dev/sdx" + assert disk_type == 'usb' + + @patch('os.geteuid', MagicMock(return_value=0)) + @patch('os.stat', MagicMock(st_rdev=2049)) + @patch('os.major', MagicMock(return_value=8)) + @patch('psutil.disk_partitions', MagicMock(return_value=None)) + def test_get_default_disk_none_partitions(self): + (default_device, disk_type) = ssdutil.get_default_disk() + + assert default_device == "/dev/sda" + assert disk_type is None + + def test_is_number_valueerror(self): + outcome = ssdutil.is_number("nope") + assert outcome is False + @patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', MagicMock(return_value=("test_path", ""))) # noqa: E501 @patch('os.geteuid', MagicMock(return_value=0)) + @patch('os.stat', MagicMock(st_rdev=2049)) + @patch('os.major', MagicMock(return_value=8)) def test_sonic_storage_path(self): with patch('argparse.ArgumentParser.parse_args', MagicMock()) as mock_args: # noqa: E501 diff --git a/tests/stp_test.py b/tests/stp_test.py new file mode 100644 index 0000000000..44a93065cc --- /dev/null +++ b/tests/stp_test.py @@ -0,0 +1,414 @@ +import os +import re +import pytest +from click.testing import CliRunner + +import config.main as config +import show.main as show +from utilities_common.db import Db +from .mock_tables import dbconnector + + +EXPECTED_SHOW_SPANNING_TREE_OUTPUT = """\ +Spanning-tree Mode: PVST + +VLAN 500 - STP instance 0 +-------------------------------------------------------------------- +STP Bridge Parameters: +Bridge Bridge Bridge Bridge Hold LastTopology Topology +Identifier MaxAge Hello FwdDly Time Change Change +hex sec sec sec sec sec cnt +8064b86a97e24e9c 20 2 15 1 0 1 + +RootBridge RootPath DesignatedBridge RootPort Max Hel Fwd +Identifier Cost Identifier Age lo Dly +hex hex sec sec sec +0064b86a97e24e9c 600 806480a235f281ec Root 20 2 15 + +STP Port Parameters: +Port Prio Path Port Uplink State Designated Designated Designated +Name rity Cost Fast Fast Cost Root Bridge +Ethernet4 128 200 N N FORWARDING 400 0064b86a97e24e9c 806480a235f281ec +""" + +EXPECTED_SHOW_SPANNING_TREE_VLAN_OUTPUT = """\ + +VLAN 500 - STP instance 0 +-------------------------------------------------------------------- +STP Bridge Parameters: +Bridge Bridge Bridge Bridge Hold LastTopology Topology +Identifier MaxAge Hello FwdDly Time Change Change +hex sec sec sec sec sec cnt +8064b86a97e24e9c 20 2 15 1 0 1 + +RootBridge RootPath DesignatedBridge RootPort Max Hel Fwd +Identifier Cost Identifier Age lo Dly +hex hex sec sec sec +0064b86a97e24e9c 600 806480a235f281ec Root 20 2 15 + +STP Port Parameters: +Port Prio Path Port Uplink State Designated Designated Designated +Name rity Cost Fast Fast Cost Root Bridge +Ethernet4 128 200 N N FORWARDING 400 0064b86a97e24e9c 806480a235f281ec +""" + +EXPECTED_SHOW_SPANNING_TREE_STATISTICS_OUTPUT = """\ +VLAN 500 - STP instance 0 +-------------------------------------------------------------------- +PortNum BPDU Tx BPDU Rx TCN Tx TCN Rx +Ethernet4 10 15 15 5 +""" + +EXPECTED_SHOW_SPANNING_TREE_BPDU_GUARD_OUTPUT = """\ +PortNum Shutdown Port Shut + Configured due to BPDU guard +------------------------------------------- +Ethernet4 No NA +""" + +EXPECTED_SHOW_SPANNING_TREE_ROOT_GUARD_OUTPUT = """\ +Root guard timeout: 30 secs + +Port VLAN Current State +------------------------------------------- +Ethernet4 500 Consistent state +""" + + +class TestStp(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "1" + print("SETUP") + + # Fixture for initializing the CliRunner + @pytest.fixture(scope="module") + def runner(self): + return CliRunner() + + # Fixture for initializing the Db + @pytest.fixture(scope="module") + def db(self): + return Db() + + def test_show_spanning_tree(self, runner, db): + result = runner.invoke(show.cli.commands["spanning-tree"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert (re.sub(r'\s+', ' ', result.output.strip())) == (re.sub( + r'\s+', ' ', EXPECTED_SHOW_SPANNING_TREE_OUTPUT.strip())) + + def test_show_spanning_tree_vlan(self, runner, db): + result = runner.invoke(show.cli.commands["spanning-tree"].commands["vlan"], ["500"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert re.sub(r'\s+', ' ', result.output.strip()) == re.sub( + r'\s+', ' ', EXPECTED_SHOW_SPANNING_TREE_VLAN_OUTPUT.strip()) + + def test_show_spanning_tree_statistics(self, runner, db): + result = runner.invoke(show.cli.commands["spanning-tree"].commands["statistics"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert re.sub(r'\s+', ' ', result.output.strip()) == re.sub( + r'\s+', ' ', EXPECTED_SHOW_SPANNING_TREE_STATISTICS_OUTPUT.strip()) + + def test_show_spanning_tree_statistics_vlan(self, runner, db): + result = runner.invoke( + show.cli.commands["spanning-tree"].commands["statistics"].commands["vlan"], ["500"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert re.sub(r'\s+', ' ', result.output.strip()) == re.sub( + r'\s+', ' ', EXPECTED_SHOW_SPANNING_TREE_STATISTICS_OUTPUT.strip()) + + def test_show_spanning_tree_bpdu_guard(self, runner, db): + result = runner.invoke(show.cli.commands["spanning-tree"].commands["bpdu_guard"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert re.sub(r'\s+', ' ', result.output.strip()) == re.sub( + r'\s+', ' ', EXPECTED_SHOW_SPANNING_TREE_BPDU_GUARD_OUTPUT.strip()) + + def test_show_spanning_tree_root_guard(self, runner, db): + result = runner.invoke(show.cli.commands["spanning-tree"].commands["root_guard"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert re.sub(r'\s+', ' ', result.output.strip()) == re.sub( + r'\s+', ' ', EXPECTED_SHOW_SPANNING_TREE_ROOT_GUARD_OUTPUT.strip()) + + @pytest.mark.parametrize("command, args, expected_exit_code, expected_output", [ + # Disable PVST + (config.config.commands["spanning-tree"].commands["disable"], ["pvst"], 0, None), + # Enable PVST + (config.config.commands["spanning-tree"].commands["enable"], ["pvst"], 0, None), + # Add VLAN and member + (config.config.commands["vlan"].commands["add"], ["500"], 0, None), + (config.config.commands["vlan"].commands["member"].commands["add"], ["500", "Ethernet4"], 0, None), + # Attempt to enable PVST when it is already enabled + (config.config.commands["spanning-tree"].commands["enable"], ["pvst"], 2, "PVST is already configured") + ]) + def test_disable_enable_global_pvst(self, runner, db, command, args, expected_exit_code, expected_output): + # Execute the command + result = runner.invoke(command, args, obj=db) + + # Print for debugging + print(result.exit_code) + print(result.output) + + # Check the exit code + assert result.exit_code == expected_exit_code + + # Check the output if an expected output is defined + if expected_output: + assert expected_output in result.output + + @pytest.mark.parametrize("command, args, expected_exit_code, expected_output", [ + # Disable pvst + (config.config.commands["spanning-tree"].commands["disable"], ["pvst"], 0, None), + # Attempt enabling STP interface without global STP enabled + (config.config.commands["spanning-tree"].commands["interface"].commands["enable"], + ["Ethernet4"], 2, "Global STP is not enabled"), + # Enable pvst + (config.config.commands["spanning-tree"].commands["enable"], ["pvst"], 0, None), + # Configure interface priority and cost + (config.config.commands["spanning-tree"].commands["interface"].commands["priority"], + ["Ethernet4", "16"], 0, None), + (config.config.commands["spanning-tree"].commands["interface"].commands["cost"], + ["Ethernet4", "500"], 0, None), + # Disable and enable interface spanning tree + (config.config.commands["spanning-tree"].commands["interface"].commands["disable"], ["Ethernet4"], 0, None), + (config.config.commands["spanning-tree"].commands["interface"].commands["enable"], ["Ethernet4"], 0, None), + # Configure portfast disable and enable + (config.config.commands["spanning-tree"].commands["interface"].commands["portfast"].commands["disable"], + ["Ethernet4"], 0, None), + (config.config.commands["spanning-tree"].commands["interface"].commands["portfast"].commands["enable"], + ["Ethernet4"], 0, None), + # Configure uplink fast disable and enable + (config.config.commands["spanning-tree"].commands["interface"].commands["uplink_fast"].commands["disable"], + ["Ethernet4"], 0, None), + (config.config.commands["spanning-tree"].commands["interface"].commands["uplink_fast"].commands["enable"], + ["Ethernet4"], 0, None), + # Configure BPDU guard enable and disable with shutdown + (config.config.commands["spanning-tree"].commands["interface"].commands["bpdu_guard"].commands["enable"], + ["Ethernet4"], 0, None), + (config.config.commands["spanning-tree"].commands["interface"].commands["bpdu_guard"].commands["disable"], + ["Ethernet4"], 0, None), + (config.config.commands["spanning-tree"].commands["interface"].commands["bpdu_guard"].commands["enable"], + ["Ethernet4", "--shutdown"], 0, None), + (config.config.commands["spanning-tree"].commands["interface"].commands["bpdu_guard"].commands["disable"], + ["Ethernet4"], 0, None), + # Configure root guard enable and disable + (config.config.commands["spanning-tree"].commands["interface"].commands["root_guard"].commands["enable"], + ["Ethernet4"], 0, None), + (config.config.commands["spanning-tree"].commands["interface"].commands["root_guard"].commands["disable"], + ["Ethernet4"], 0, None), + # Invalid cost and priority values + (config.config.commands["spanning-tree"].commands["interface"].commands["cost"], ["Ethernet4", "0"], + 2, "STP interface path cost must be in range 1-200000000"), + (config.config.commands["spanning-tree"].commands["interface"].commands["cost"], ["Ethernet4", "2000000000"], + 2, "STP interface path cost must be in range 1-200000000"), + (config.config.commands["spanning-tree"].commands["interface"].commands["priority"], ["Ethernet4", "1000"], + 2, "STP interface priority must be in range 0-240"), + # Attempt to enable STP on interface with various conflicts + (config.config.commands["spanning-tree"].commands["interface"].commands["enable"], ["Ethernet4"], + 2, "STP is already enabled for"), + (config.config.commands["spanning-tree"].commands["interface"].commands["enable"], ["Ethernet0"], + 2, "has ip address"), + (config.config.commands["spanning-tree"].commands["interface"].commands["enable"], ["Ethernet120"], + 2, "is a portchannel member port"), + (config.config.commands["spanning-tree"].commands["interface"].commands["enable"], ["Ethernet20"], + 2, "has no VLAN configured") + ]) + def test_stp_validate_interface_params(self, runner, db, command, args, expected_exit_code, expected_output): + # Execute the command + result = runner.invoke(command, args, obj=db) + + # Print for debugging + print(result.exit_code) + print(result.output) + + # Check the exit code + assert result.exit_code == expected_exit_code + + # Check the output if an expected output is defined + if expected_output: + assert expected_output in result.output + + @pytest.mark.parametrize("command, args, expected_exit_code, expected_output", [ + (config.config.commands["spanning-tree"].commands["disable"], ["pvst"], 0, None), + (config.config.commands["spanning-tree"].commands["enable"], ["pvst"], 0, None), + (config.config.commands["spanning-tree"].commands["vlan"].commands["interface"].commands["cost"], + ["500", "Ethernet4", "200"], 0, None), + (config.config.commands["spanning-tree"].commands["vlan"].commands["interface"].commands["priority"], + ["500", "Ethernet4", "32"], 0, None), + (config.config.commands["spanning-tree"].commands["vlan"].commands["interface"].commands["cost"], + ["500", "Ethernet4", "0"], 2, "STP interface path cost must be in range 1-200000000"), + (config.config.commands["spanning-tree"].commands["vlan"].commands["interface"].commands["cost"], + ["500", "Ethernet4", "2000000000"], 2, "STP interface path cost must be in range 1-200000000"), + (config.config.commands["spanning-tree"].commands["vlan"].commands["interface"].commands["priority"], + ["500", "Ethernet4", "1000"], 2, "STP per vlan port priority must be in range 0-240"), + (config.config.commands["vlan"].commands["add"], ["99"], 0, None), + (config.config.commands["spanning-tree"].commands["vlan"].commands["interface"].commands["priority"], + ["99", "Ethernet4", "16"], 2, "is not member of"), + (config.config.commands["vlan"].commands["del"], ["99"], 0, None), + (config.config.commands["vlan"].commands["member"].commands["del"], ["500", "Ethernet4"], 0, None), + (config.config.commands["vlan"].commands["del"], ["500"], 0, None) + ]) + def test_stp_validate_vlan_interface_params(self, runner, db, command, args, expected_exit_code, expected_output): + # Execute the command + result = runner.invoke(command, args, obj=db) + # Output result information + print(result.exit_code) + print(result.output) + + # Check exit code + assert result.exit_code == expected_exit_code + + # If an expected output is defined, check that as well + if expected_output is not None: + assert expected_output in result.output + + @pytest.mark.parametrize("command, args, expected_exit_code, expected_output", [ + (config.config.commands["spanning-tree"].commands["disable"], ["pvst"], 0, None), + (config.config.commands["spanning-tree"].commands["enable"], ["pvst"], 0, None), + # Add VLAN and member + (config.config.commands["vlan"].commands["add"], ["500"], 0, None), + (config.config.commands["spanning-tree"].commands["vlan"].commands["hello"], ["500", "3"], 0, None), + (config.config.commands["spanning-tree"].commands["vlan"].commands["max_age"], ["500", "21"], 0, None), + (config.config.commands["spanning-tree"].commands["vlan"].commands["forward_delay"], ["500", "16"], 0, None), + (config.config.commands["spanning-tree"].commands["vlan"].commands["priority"], ["500", "4096"], 0, None), + (config.config.commands["spanning-tree"].commands["vlan"].commands["hello"], ["500", "0"], + 2, "STP hello timer must be in range 1-10"), + (config.config.commands["spanning-tree"].commands["vlan"].commands["hello"], ["500", "20"], + 2, "STP hello timer must be in range 1-10"), + (config.config.commands["spanning-tree"].commands["vlan"].commands["forward_delay"], ["500", "2"], + 2, "STP forward delay value must be in range 4-30"), + (config.config.commands["spanning-tree"].commands["vlan"].commands["forward_delay"], ["500", "42"], + 2, "STP forward delay value must be in range 4-30"), + (config.config.commands["spanning-tree"].commands["vlan"].commands["max_age"], ["500", "4"], + 2, "STP max age value must be in range 6-40"), + (config.config.commands["spanning-tree"].commands["vlan"].commands["max_age"], ["500", "45"], + 2, "STP max age value must be in range 6-40"), + (config.config.commands["spanning-tree"].commands["vlan"].commands["forward_delay"], ["500", "4"], + 2, "2*(forward_delay-1) >= max_age >= 2*(hello_time +1 )"), + (config.config.commands["spanning-tree"].commands["vlan"].commands["priority"], ["500", "65536"], + 2, "STP bridge priority must be in range 0-61440"), + (config.config.commands["spanning-tree"].commands["vlan"].commands["priority"], ["500", "8000"], + 2, "STP bridge priority must be multiple of 4096"), + (config.config.commands["vlan"].commands["del"], ["500"], 0, None) + ]) + def test_stp_validate_vlan_timer_and_priority_params(self, runner, db, + command, args, expected_exit_code, expected_output): + # Execute the command + result = runner.invoke(command, args, obj=db) + + # Print for debugging + print(result.exit_code) + print(result.output) + + # Check the exit code + assert result.exit_code == expected_exit_code + + # Check the output if there's an expected output + if expected_output: + assert expected_output in result.output + + @pytest.mark.parametrize("command, args, expected_exit_code, expected_output", [ + # Disable PVST globally + (config.config.commands["spanning-tree"].commands["disable"], ["pvst"], 0, None), + # Add VLAN 500 and assign a member port + (config.config.commands["vlan"].commands["add"], ["500"], 0, None), + (config.config.commands["vlan"].commands["member"].commands["add"], ["500", "Ethernet4"], 0, None), + # Enable PVST globally + (config.config.commands["spanning-tree"].commands["enable"], ["pvst"], 0, None), + # Add VLAN 600 + (config.config.commands["vlan"].commands["add"], ["600"], 0, None), + # Disable and then enable spanning-tree on VLAN 600 + (config.config.commands["spanning-tree"].commands["vlan"].commands["disable"], ["600"], 0, None), + (config.config.commands["spanning-tree"].commands["vlan"].commands["enable"], ["600"], 0, None), + # Attempt to delete VLAN 600 while STP is enabled + (config.config.commands["vlan"].commands["del"], ["600"], 0, None), + # Enable STP on non-existing VLAN 1010 + (config.config.commands["spanning-tree"].commands["vlan"].commands["enable"], ["1010"], 2, "doesn't exist"), + # Disable STP on non-existing VLAN 1010 + (config.config.commands["spanning-tree"].commands["vlan"].commands["disable"], ["1010"], 2, "doesn't exist"), + ]) + def test_add_vlan_enable_pvst(self, runner, db, command, args, expected_exit_code, expected_output): + # Execute the command + result = runner.invoke(command, args, obj=db) + + # Print for debugging + print(result.exit_code) + print(result.output) + + # Check the exit code + assert result.exit_code == expected_exit_code + + # Check the output if an expected output is defined + if expected_output: + assert expected_output in result.output + + @pytest.mark.parametrize("command, args, expected_exit_code, expected_output", [ + # Valid cases + (config.config.commands["spanning-tree"].commands["hello"], ["3"], 0, None), + (config.config.commands["spanning-tree"].commands["forward_delay"], ["16"], 0, None), + (config.config.commands["spanning-tree"].commands["max_age"], ["22"], 0, None), + (config.config.commands["spanning-tree"].commands["priority"], ["8192"], 0, None), + (config.config.commands["spanning-tree"].commands["root_guard_timeout"], ["500"], 0, None), + # Invalid hello timer values + (config.config.commands["spanning-tree"].commands["hello"], ["0"], 2, + "STP hello timer must be in range 1-10"), + (config.config.commands["spanning-tree"].commands["hello"], ["20"], 2, + "STP hello timer must be in range 1-10"), + # Invalid forward delay values + (config.config.commands["spanning-tree"].commands["forward_delay"], ["2"], 2, + "STP forward delay value must be in range 4-30"), + (config.config.commands["spanning-tree"].commands["forward_delay"], ["50"], 2, + "STP forward delay value must be in range 4-30"), + # Invalid max age values + (config.config.commands["spanning-tree"].commands["max_age"], ["5"], 2, + "STP max age value must be in range 6-40"), + (config.config.commands["spanning-tree"].commands["max_age"], ["45"], 2, + "STP max age value must be in range 6-40"), + # Consistency check for forward delay and max age + (config.config.commands["spanning-tree"].commands["forward_delay"], ["4"], 2, + "2*(forward_delay-1) >= max_age >= 2*(hello_time +1 )"), + # Invalid root guard timeout values + (config.config.commands["spanning-tree"].commands["root_guard_timeout"], ["4"], 2, + "STP root guard timeout must be in range 5-600"), + (config.config.commands["spanning-tree"].commands["root_guard_timeout"], ["700"], 2, + "STP root guard timeout must be in range 5-600"), + # Invalid priority values + (config.config.commands["spanning-tree"].commands["priority"], ["65536"], 2, + "STP bridge priority must be in range 0-61440"), + (config.config.commands["spanning-tree"].commands["priority"], ["8000"], 2, + "STP bridge priority must be multiple of 4096"), + (config.config.commands["vlan"].commands["member"].commands["del"], ["500", "Ethernet4"], 0, None), + (config.config.commands["vlan"].commands["del"], ["500"], 0, None) + ]) + def test_stp_validate_global_timer_and_priority_params(self, runner, db, command, + args, expected_exit_code, expected_output): + # Execute the command + result = runner.invoke(command, args, obj=db) + + # Print for debugging + print(result.exit_code) + print(result.output) + + # Check the exit code + assert result.exit_code == expected_exit_code + + # Check the output if an expected output is defined + if expected_output: + assert expected_output in result.output + + @classmethod + def teardown_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "0" + print("TEARDOWN") + dbconnector.load_namespace_config() + dbconnector.dedicated_dbs.clear() diff --git a/tests/subintf_test.py b/tests/subintf_test.py index 795958c7ae..ec73d6c620 100644 --- a/tests/subintf_test.py +++ b/tests/subintf_test.py @@ -24,7 +24,7 @@ def test_add_del_subintf_short_name(self): runner = CliRunner() db = Db() obj = {'db':db.cfgdb} - + result = runner.invoke(config.config.commands["subinterface"].commands["add"], ["Eth0.102", "1002"], obj=obj) print(result.exit_code, result.output) assert result.exit_code == 0 @@ -53,35 +53,23 @@ def test_add_del_subintf_with_long_name(self): runner = CliRunner() db = Db() obj = {'db':db.cfgdb} - + result = runner.invoke(config.config.commands["subinterface"].commands["add"], ["Ethernet0.102"], obj=obj) print(result.exit_code, result.output) assert result.exit_code == 0 assert ('Ethernet0.102') in db.cfgdb.get_table('VLAN_SUB_INTERFACE') assert db.cfgdb.get_table('VLAN_SUB_INTERFACE')['Ethernet0.102']['admin_status'] == 'up' - result = runner.invoke(config.config.commands["subinterface"].commands["add"], ["PortChannel0004.104"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code == 0 - assert ('PortChannel0004.104') in db.cfgdb.get_table('VLAN_SUB_INTERFACE') - assert db.cfgdb.get_table('VLAN_SUB_INTERFACE')['PortChannel0004.104']['admin_status'] == 'up' - result = runner.invoke(config.config.commands["subinterface"].commands["del"], ["Ethernet0.102"], obj=obj) print(result.exit_code, result.output) assert result.exit_code == 0 assert ('Ethernet0.102') not in db.cfgdb.get_table('VLAN_SUB_INTERFACE') - result = runner.invoke(config.config.commands["subinterface"].commands["del"], ["PortChannel0004.104"], obj=obj) - print(result.exit_code, result.output) - assert result.exit_code == 0 - assert ('PortChannel0004.104') not in db.cfgdb.get_table('VLAN_SUB_INTERFACE') - - def test_add_existing_subintf_again(self): runner = CliRunner() db = Db() obj = {'db':db.cfgdb} - + result = runner.invoke(config.config.commands["subinterface"].commands["add"], ["Ethernet0.102"], obj=obj) print(result.exit_code, result.output) assert result.exit_code == 0 @@ -104,7 +92,7 @@ def test_delete_non_existing_subintf(self): runner = CliRunner() db = Db() obj = {'db':db.cfgdb} - + result = runner.invoke(config.config.commands["subinterface"].commands["del"], ["Ethernet0.102"], obj=obj) print(result.exit_code, result.output) assert result.exit_code != 0 diff --git a/tests/syslog_multi_asic_test.py b/tests/syslog_multi_asic_test.py index 7933edcd66..c1a136582c 100644 --- a/tests/syslog_multi_asic_test.py +++ b/tests/syslog_multi_asic_test.py @@ -279,3 +279,19 @@ def test_disable_syslog_rate_limit_feature(self, setup_cmd_module): ['database', '-n', 'asic0'] ) assert result.exit_code == 0 + + @mock.patch('config.syslog.clicommon.run_command') + def test_config_log_level(self, mock_run, setup_cmd_module): + _, config = setup_cmd_module + db = Db() + runner = CliRunner() + + mock_run.return_value = ('something', 0) + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'component', '-l', 'DEBUG', '-n', 'asic0'], obj=db + ) + assert result.exit_code == 0 + cfg_db = db.cfgdb_clients['asic0'] + data = cfg_db.get_entry('LOGGER', 'component') + assert data.get('LOGLEVEL') == 'DEBUG' diff --git a/tests/syslog_test.py b/tests/syslog_test.py index c1cbee1127..e77f6d0e6c 100644 --- a/tests/syslog_test.py +++ b/tests/syslog_test.py @@ -484,3 +484,73 @@ def side_effect(*args, **kwargs): config.config.commands["syslog"].commands["rate-limit-feature"].commands["disable"], obj=db ) assert result.exit_code == SUCCESS + + @mock.patch('config.syslog.clicommon.run_command') + def test_config_log_level(self, mock_run): + db = Db() + db.cfgdb.set_entry('LOGGER', 'log1', {'require_manual_refresh': 'true'}) + + runner = CliRunner() + + mock_run.return_value = ('something', 0) + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'component', '-l', 'DEBUG'], obj=db + ) + assert result.exit_code == SUCCESS + data = db.cfgdb.get_entry('LOGGER', 'component') + assert data.get('LOGLEVEL') == 'DEBUG' + + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'component', '-l', 'DEBUG', '--pid', '123'], obj=db + ) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'component', '-l', 'DEBUG', '--container', 'pmon', '--pid', '123'], obj=db + ) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'component', '-l', 'DEBUG', '--container', 'pmon', '--program', 'xcvrd'], obj=db + ) + assert result.exit_code == SUCCESS + + @mock.patch('config.syslog.clicommon.run_command') + def test_config_log_level_negative(self, mock_run): + db = Db() + + runner = CliRunner() + + mock_run.return_value = ('something', 0) + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'log1', '-l', 'DEBUG', '--container', 'pmon'], obj=db + ) + assert result.exit_code != SUCCESS + + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'log1', '-l', 'DEBUG', '--program', 'xcvrd'], obj=db + ) + assert result.exit_code != SUCCESS + + mock_run.reset_mock() + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'log1', '-l', 'DEBUG', '--container', 'swss', '--program', 'orchagent'], obj=db + ) + assert result.exit_code == SUCCESS + # Verify it does not send signal to orchagent if require_manual_refresh is not true + assert mock_run.call_count == 0 + + mock_run.return_value = ('something', -1) + db.cfgdb.set_entry('LOGGER', 'log1', {'require_manual_refresh': 'true'}) + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'log1', '-l', 'DEBUG', '--container', 'pmon', '--program', 'xcvrd'], obj=db + ) + assert result.exit_code != SUCCESS diff --git a/tests/vlan_test.py b/tests/vlan_test.py index fc3569b87d..8b0ce1b617 100644 --- a/tests/vlan_test.py +++ b/tests/vlan_test.py @@ -574,6 +574,15 @@ def test_config_vlan_del_member_with_invalid_port(self): assert result.exit_code != 0 assert "Error: Invalid VLAN ID 4097 (2-4094)" in result.output + def test_config_vlan_add_member_with_invalid_long_name(self): + runner = CliRunner() + result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], + ["123456789012", "Ethernet4"]) + print(result.exit_code) + print(result.output) + assert result.exit_code != 0 + assert "Error: Invalid VLAN ID 123456789012 (2-4094)" in result.output + def test_config_vlan_add_member_with_nonexist_vlanid(self): runner = CliRunner() result = runner.invoke(config.config.commands["vlan"].commands["member"].commands["add"], ["1001", "Ethernet4"]) diff --git a/tests/vnet_route_check_test.py b/tests/vnet_route_check_test.py index 092a89e2f9..10d97f21a3 100644 --- a/tests/vnet_route_check_test.py +++ b/tests/vnet_route_check_test.py @@ -341,7 +341,9 @@ def get(self, key): db_conns = {"APPL_DB": APPL_DB, "ASIC_DB": ASIC_DB, "COUNTERS_DB": CNTR_DB} -def conn_side_effect(arg, _): + + +def conn_side_effect(arg, _, __): return db_conns[arg] diff --git a/tests/vrf_test.py b/tests/vrf_test.py index 323f61dee7..a329c36c98 100644 --- a/tests/vrf_test.py +++ b/tests/vrf_test.py @@ -41,7 +41,7 @@ def test_vrf_show(self): Loopback0 Po0002.101 """ - + result = runner.invoke(show.cli.commands['vrf'], [], obj=db) dbconnector.dedicated_dbs = {} assert result.exit_code == 0 @@ -65,7 +65,7 @@ def test_vrf_bind_unbind(self): Loopback0 Po0002.101 """ - + result = runner.invoke(show.cli.commands['vrf'], [], obj=db) dbconnector.dedicated_dbs = {} assert result.exit_code == 0 @@ -81,7 +81,7 @@ def test_vrf_bind_unbind(self): assert result.exit_code == 0 assert 'Ethernet4' not in db.cfgdb.get_table('INTERFACE') assert result.output == expected_output_unbind - + expected_output_unbind = "Interface Loopback0 IP disabled and address(es) removed due to unbinding VRF.\n" result = runner.invoke(config.config.commands["interface"].commands["vrf"].commands["unbind"], ["Loopback0"], obj=vrf_obj) @@ -108,7 +108,7 @@ def test_vrf_bind_unbind(self): assert result.exit_code == 0 assert 'PortChannel002' not in db.cfgdb.get_table('PORTCHANNEL_INTERFACE') assert result.output == expected_output_unbind - + vrf_obj = {'config_db':db.cfgdb, 'namespace':DEFAULT_NAMESPACE} state_db = SonicV2Connector(use_unix_socket_path=True, namespace='') state_db.connect(state_db.STATE_DB, False) @@ -203,7 +203,7 @@ def test_vrf_bind_unbind(self): Loopback0 Po0002.101 """ - + result = runner.invoke(show.cli.commands['vrf'], [], obj=db) dbconnector.dedicated_dbs = {} assert result.exit_code == 0 @@ -213,16 +213,16 @@ def test_vrf_add_del(self): runner = CliRunner() db = Db() vrf_obj = {'config_db':db.cfgdb, 'namespace':db.db.namespace} - + result = runner.invoke(config.config.commands["vrf"].commands["add"], ["Vrf100"], obj=vrf_obj) assert ('Vrf100') in db.cfgdb.get_table('VRF') assert result.exit_code == 0 - + result = runner.invoke(config.config.commands["vrf"].commands["add"], ["Vrf1"], obj=vrf_obj) assert "VRF Vrf1 already exists!" in result.output assert ('Vrf1') in db.cfgdb.get_table('VRF') assert result.exit_code != 0 - + expected_output_del = "VRF Vrf1 deleted and all associated IP addresses removed.\n" result = runner.invoke(config.config.commands["vrf"].commands["del"], ["Vrf1"], obj=vrf_obj) assert result.exit_code == 0 @@ -230,7 +230,7 @@ def test_vrf_add_del(self): assert ('Vrf1') not in db.cfgdb.get_table('VRF') result = runner.invoke(config.config.commands["vrf"].commands["del"], ["Vrf200"], obj=vrf_obj) - assert result.exit_code != 0 + assert result.exit_code != 0 assert ('Vrf200') not in db.cfgdb.get_table('VRF') assert "VRF Vrf200 does not exist!" in result.output @@ -245,25 +245,33 @@ def test_invalid_vrf_name(self): assert result.exit_code != 0 assert ('vrf-blue') not in db.cfgdb.get_table('VRF') assert expected_output in result.output - + result = runner.invoke(config.config.commands["vrf"].commands["add"], ["VRF2"], obj=obj) assert result.exit_code != 0 assert ('VRF2') not in db.cfgdb.get_table('VRF') assert expected_output in result.output - + result = runner.invoke(config.config.commands["vrf"].commands["add"], ["VrF10"], obj=obj) assert result.exit_code != 0 assert ('VrF10') not in db.cfgdb.get_table('VRF') assert expected_output in result.output - + result = runner.invoke(config.config.commands["vrf"].commands["del"], ["vrf-blue"], obj=obj) assert result.exit_code != 0 assert expected_output in result.output - + result = runner.invoke(config.config.commands["vrf"].commands["del"], ["VRF2"], obj=obj) assert result.exit_code != 0 assert expected_output in result.output - + result = runner.invoke(config.config.commands["vrf"].commands["del"], ["VrF10"], obj=obj) assert result.exit_code != 0 assert expected_output in result.output + + expected_output = """\ +Error: 'vrf_name' length should not exceed 15 characters +""" + result = runner.invoke(config.config.commands["vrf"].commands["add"], ["VrfNameTooLong!!!"], obj=obj) + assert result.exit_code != 0 + assert ('VrfNameTooLong!!!') not in db.cfgdb.get_table('VRF') + assert expected_output in result.output diff --git a/tests/vrrp_test.py b/tests/vrrp_test.py new file mode 100644 index 0000000000..bd33738fa5 --- /dev/null +++ b/tests/vrrp_test.py @@ -0,0 +1,1499 @@ +import os +from unittest import mock + +from click.testing import CliRunner + +import config.main as config +from utilities_common.db import Db +import utilities_common.bgp_util as bgp_util + + +class TestConfigVRRP(object): + _old_run_bgp_command = None + + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "1" + cls._old_run_bgp_command = bgp_util.run_bgp_command + bgp_util.run_bgp_command = mock.MagicMock( + return_value=cls.mock_run_bgp_command()) + print("SETUP") + + ''' Tests for VRRPv4 and VRRPv6 ''' + + def mock_run_bgp_command(): + return "" + + def test_add_del_vrrp_instance_without_vip(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 10.10.10.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '10.10.10.1/24') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet63 9.9.9.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet63", "9.9.9.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet63', '9.9.9.1/24') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp remove Ethernet63 7 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernet63", "7"], obj=obj) + print(result.exit_code, result.output) + assert "Ethernet63 dose not configured the vrrp instance 7!" in result.output + assert result.exit_code != 0 + + # config int vrrp add Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["add"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + + # config int vrrp add Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["add"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert "Ethernet64 has already configured the vrrp instance 8!" in result.output + assert result.exit_code != 0 + + # config int vrrp add Ethernet63 7 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["add"], + ["Ethernet63", "7"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet63', '7') in db.cfgdb.get_table('VRRP') + + # config int vrrp remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP') + + # config int vrrp remove Ethernet63 7 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernet63", "7"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet63', '7') not in db.cfgdb.get_table('VRRP') + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["add"], + ["Ethernt64", "8"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["add"], + ["Ethernet2", "7"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernt64", "8"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernet2", "7"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # config int ip remove Ethernet64 10.10.10.1/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10.10.10.1/24') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet63 9.9.9.1/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet63", "9.9.9.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet63', '9.9.9.1/24') not in db.cfgdb.get_table('INTERFACE') + + def test_add_del_vrrp6_instance_without_vip(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 100::64/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "100::64/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '100::64/64') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet63 99::64/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet63", "99::64/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet63', '99::64/64') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp6 add Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["add"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + + # config int vrrp6 add Ethernet63 7 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["add"], + ["Ethernet63", "7"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet63', '7') in db.cfgdb.get_table('VRRP6') + + # config int vrrp6 add Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["add"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert "Ethernet64 has already configured the Vrrpv6 instance 8!" in result.output + assert result.exit_code != 0 + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["add"], + ["Ethernt64", "8"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["add"], + ["Ethernet2", "7"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # config int vrrp6 remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP6') + + # config int ip remove Ethernet64 100::64/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "100::64/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '100::64/64') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet63 99::64/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet63", "99::64/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet63', '99::64/64') not in db.cfgdb.get_table('INTERFACE') + + def test_add_del_vrrp_instance(self): + runner = CliRunner() + db = Db() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 10.10.10.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '10.10.10.1/24') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet63 9.9.9.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet63", "9.9.9.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet63', '9.9.9.1/24') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet62 8.8.8.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet62", "8.8.8.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet62', '8.8.8.1/24') in db.cfgdb.get_table('INTERFACE') + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernt64", "8", "10.10.10.8/24"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet2", "8", "10.10.10.8/24"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # config int vrrp ip add Ethernet64 8 10.10.10.8/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet64", "8", "10.10.10.8/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['vip'] == ['10.10.10.8/24'] + + # config int vrrp ip add Ethernet64 8 10.10.10.16/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet64", "8", "10.10.10.16/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['vip'] == ['10.10.10.8/24', '10.10.10.16/24'] + + # config int vrrp ip add Ethernet62 7 8.8.8.16/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet62", "7", "8.8.8.16/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet62', '7') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet62', '7']['vip'] == ['8.8.8.16/24'] + + # config int vrrp ip add Ethernet62 7 8.8.8.16/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet62", "7", "8.8.8.16/24"], obj=obj) + print(result.exit_code, result.output) + assert "8.8.8.16/24 has already configured" in result.output + assert result.exit_code != 0 + + # config int vrrp ip add Ethernet62 7 0.0.0.0 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet62", "7", "0.0.0.0"], obj=obj) + print(result.exit_code, result.output) + assert "IPv4 address 0.0.0.0/32 is Zero" in result.output + assert result.exit_code != 0 + + # config int vrrp ip add Ethernet62 7 777.256.1.1/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet62", "7", "777.256.1.1/24"], obj=obj) + print(result.exit_code, result.output) + assert "IP address 777.256.1.1/24 is not valid" in result.output + assert result.exit_code != 0 + + # config int vrrp ip add Ethernet62 7 224.0.0.41/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet62", "7", "224.0.0.41/24"], obj=obj) + print(result.exit_code, result.output) + assert "IP address 224.0.0.41/24 is multicast" in result.output + assert result.exit_code != 0 + + # config int vrrp ip add Ethernet62 7 6.6.6.6 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet62", "7", "6.6.6.6"], obj=obj) + print(result.exit_code, result.output) + assert "IP address 6.6.6.6 is missing a mask." in result.output + assert result.exit_code != 0 + + # config int vrrp ip remove Ethernet64 8 10.10.10.8/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["remove"], + ["Ethernet64", "8", "10.10.10.8/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['vip'] == ['10.10.10.16/24'] + + # config int vrrp ip remove Ethernet64 8 10.10.10.8/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["remove"], + ["Ethernet64", "8", "10.10.10.8/24"], obj=obj) + print(result.exit_code, result.output) + assert "10.10.10.8/24 is not configured on the vrrp instance" in result.output + assert result.exit_code != 0 + + # config int vrrp ip remove Ethernet64 8 10.10.10.888/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["remove"], + ["Ethernet64", "8", "10.10.10.888/24"], obj=obj) + print(result.exit_code, result.output) + assert "IP address is not valid:" in result.output + assert result.exit_code != 0 + + # config int vrrp ip remove Ethernet64 8 10.10.10.16/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["remove"], + ["Ethernet64", "8", "10.10.10.16/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['vip'] == [''] + + # config int vrrp remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP') + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["remove"], + ["Ethernt64", "8", "10.10.10.16/24"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["remove"], + ["Ethernet2", "8", "10.10.10.16/24"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # config int vrrp remove Ethernet63 9 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["remove"], + ["Ethernet63", "9", "10.10.10.16/24"], obj=obj) + print(result.exit_code, result.output) + assert "10.10.10.16/24 is not configured on the vrrp instance" in result.output + assert result.exit_code != 0 + + # config int ip remove Ethernet64 10.10.10.1/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10.10.10.1/24') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet63 9.9.9.1/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet63", "9.9.9.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet63', '9.9.9.1/24') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet62 8.8.8.1/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet62", "8.8.8.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet62', '8.8.8.1/24') not in db.cfgdb.get_table('INTERFACE') + + def test_add_del_vrrp6_instance(self): + runner = CliRunner() + db = Db() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 100::1/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "100::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '100::1/64') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet63 99::1/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet63", "99::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet63', '99::1/64') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet62 88::1/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet62", "88::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet62', '88::1/64') in db.cfgdb.get_table('INTERFACE') + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernt64", "8", "100::8/64"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet2", "8", "100::8/64"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # config int vrrp6 ipv6 add Ethernet64 8 100::8/64 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet64", "8", "100::8/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['vip'] == ['100::8/64'] + + # config int vrrp6 ipv6 add Ethernet64 8 100::16/64 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet64", "8", "100::16/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['vip'] == ['100::8/64', '100::16/64'] + + # config int vrrp6 ipv6 add Ethernet62 7 88::16/64 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet62", "7", "88::16/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet62', '7') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet62', '7']['vip'] == ['88::16/64'] + + # config int vrrp6 ipv6 add Ethernet62 7 88::16/64 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet62", "7", "88::16/64"], obj=obj) + print(result.exit_code, result.output) + assert "88::16/64 has already configured" in result.output + assert result.exit_code != 0 + + # config int vrrp6 ipv6 add Ethernet62 7 :: + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet62", "7", "::"], obj=obj) + print(result.exit_code, result.output) + assert "IPv6 address ::/128 is unspecified" in result.output + assert result.exit_code != 0 + + # config int vrrp6 ipv6 add Ethernet62 7 785h::12/64 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet62", "7", "785h::12/64"], obj=obj) + print(result.exit_code, result.output) + assert "IP address 785h::12/64 is not valid" in result.output + assert result.exit_code != 0 + + # config int vrrp6 ipv6 add Ethernet62 7 88::2 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet62", "7", "88::2"], obj=obj) + print(result.exit_code, result.output) + assert "IPv6 address 88::2 is missing a mask." in result.output + assert result.exit_code != 0 + + # config int vrrp6 ipv6 remove Ethernet64 8 100::8/64 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["remove"], + ["Ethernet64", "8", "100::8/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['vip'] == ['100::16/64'] + + # config int vrrp6 ipv6 remove Ethernet64 8 100::8/64 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["remove"], + ["Ethernet64", "8", "100::8/64"], obj=obj) + print(result.exit_code, result.output) + assert "100::8/64 is not configured on the Vrrpv6 instance 8!" in result.output + assert result.exit_code != 0 + + # config int vrrp6 ipv6 remove Ethernet64 8 100::16/64 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["remove"], + ["Ethernet64", "8", "100::16/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['vip'] == [''] + + # config int vrrp6 remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP6') + + # check interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["remove"], + ["Ethernt64", "8", "100::16/64"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["remove"], + ["Ethernet2", "8", "100::16/64"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # config int vrrp remove Ethernet63 9 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["remove"], + ["Ethernet63", "9", "100::16/64"], obj=obj) + print(result.exit_code, result.output) + assert "100::16/64 is not configured on the Vrrpv6 instance 9" in result.output + assert result.exit_code != 0 + + # config int vrrp6 ipv6 remove Ethernet64 8 88cg::2/64 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["remove"], + ["Ethernet64", "8", "88cg::2/64"], obj=obj) + print(result.exit_code, result.output) + assert "IPv6 address is not valid:" in result.output + assert result.exit_code != 0 + + # config int ip remove Ethernet64 100::1/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "100::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '100::1/64') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet63 99::1/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet63", "99::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet63', '99::1/64') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet62 88::1/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet62", "88::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet62', '88::1/64') not in db.cfgdb.get_table('INTERFACE') + + def test_add_del_vrrp_instance_track_intf(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 10.10.10.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '10.10.10.1/24') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet5 10.10.10.5/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet5", "10.10.10.5/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet5', '10.10.10.5/24') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet6 10.10.10.6/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet6", "10.10.10.6/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet6', '10.10.10.6/24') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet7 10.10.10.7/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet7", "10.10.10.7/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet7', '10.10.10.7/24') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp ip add Ethernet64 8 10.10.10.8/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet64", "8", "10.10.10.8/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['vip'] == ['10.10.10.8/24'] + + # check interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["add"], + ["Ethernt64", "8", "Ethernet5", "20"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["add"], + ["Ethernet2", "8", "Ethernet5", "20"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check track_interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernt5", "20"], obj=obj) + print(result.exit_code, result.output) + assert "'track_interface' is not valid." in result.output + assert result.exit_code != 0 + + # check track_interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernet2", "20"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # config interface vrrp track_interface add Ethernet64 8 Ethernet5 20 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernet5", "20"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8', 'Ethernet5') in db.cfgdb.get_table('VRRP_TRACK') + assert db.cfgdb.get_table('VRRP_TRACK')['Ethernet64', '8', 'Ethernet5']['priority_increment'] == '20' + + # config interface vrrp track_interface add Ethernet64 8 Ethernet6 30 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernet6", "30"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8', 'Ethernet6') in db.cfgdb.get_table('VRRP_TRACK') + assert db.cfgdb.get_table('VRRP_TRACK')['Ethernet64', '8', 'Ethernet6']['priority_increment'] == '30' + + # config interface vrrp track_interface add Ethernet64 8 Ethernet6 25 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernet6", "25"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8', 'Ethernet6') in db.cfgdb.get_table('VRRP_TRACK') + assert db.cfgdb.get_table('VRRP_TRACK')['Ethernet64', '8', 'Ethernet6']['priority_increment'] == '25' + + # config interface vrrp track_interface add Ethernet64 8 Ethernet7 80 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernet7", "80"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + + # config interface vrrp track_interface add Ethernet7 7 Ethernet5 40 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["add"], + ["Ethernet7", "7", "Ethernet5", "40"], obj=obj) + print(result.exit_code, result.output) + assert "vrrp instance 7 not found on interface Ethernet7" in result.output + assert result.exit_code != 0 + + # config interface vrrp track_interface remove Ethernet64 8 Ethernet6 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["remove"], + ["Ethernet64", "8", "Ethernet6"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8', 'Ethernet6') not in db.cfgdb.get_table('VRRP_TRACK') + + # config interface vrrp track_interface remove Ethernet64 8 Ethernet5 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["remove"], + ["Ethernet64", "8", "Ethernet5"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8', 'Ethernet5') not in db.cfgdb.get_table('VRRP_TRACK') + + # check interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["remove"], + ["Ethernt64", "8", "Ethernet5"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["remove"], + ["Ethernet2", "8", "Ethernet5"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check track_interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["remove"], + ["Ethernet64", "8", "Ethernt5"], obj=obj) + print(result.exit_code, result.output) + assert "'track_interface' is not valid." in result.output + assert result.exit_code != 0 + + # check track_interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["remove"], + ["Ethernet64", "8", "Ethernet2"], obj=obj) + print(result.exit_code, result.output) + assert "Ethernet2 is not configured on the vrrp instance 8" in result.output + assert result.exit_code != 0 + + # config interface vrrp track_interface remove Ethernet7 7 Ethernet5 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["remove"], + ["Ethernet7", "7", "Ethernet5"], obj=obj) + print(result.exit_code, result.output) + assert "vrrp instance 7 not found on interface Ethernet7" in result.output + assert result.exit_code != 0 + + # config int vrrp remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP') + + # config int ip remove Ethernet7 10.10.10.7/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet7", "10.10.10.7/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet7', '10.10.10.7/24') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet6 10.10.10.6/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet6", "10.10.10.6/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet6', '10.10.10.6/24') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet5 10.10.10.5/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet5", "10.10.10.5/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet5', '10.10.10.5/24') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet64 10.10.10.1/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10.10.10.1/24') not in db.cfgdb.get_table('INTERFACE') + + def test_add_del_vrrp6_instance_track_intf(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 100::64/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "100::64/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '100::64/64') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet5 100::5/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet5", "100::5/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet5', '100::5/64') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet6 100::6/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet6", "100::6/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet6', '100::6/64') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet7 100::7/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet7", "100::7/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet7', '100::7/64') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp6 ipv6 add Ethernet64 8 100::1/64 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet64", "8", "100::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['vip'] == ['100::1/64'] + + # check interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["add"], + ["Ethernt64", "8", "Ethernet", "20"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["add"], + ["Ethernet2", "8", "Ethernet5", "20"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check track_interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernt5", "20"], obj=obj) + print(result.exit_code, result.output) + assert "'track_interface' is not valid." in result.output + assert result.exit_code != 0 + + # check track_interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernet2", "20"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # config interface vrrp6 track_interface add Ethernet7 8 Ethernet5 20 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["add"], + ["Ethernet7", "8", "Ethernet5", "20"], obj=obj) + print(result.exit_code, result.output) + assert "vrrp6 instance 8 not found on interface Ethernet7" in result.output + assert result.exit_code != 0 + + # config interface vrrp6 track_interface add Ethernet64 8 Ethernet5 20 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernet5", "20"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8', 'Ethernet5') in db.cfgdb.get_table('VRRP6_TRACK') + assert db.cfgdb.get_table('VRRP6_TRACK')['Ethernet64', '8', 'Ethernet5']['priority_increment'] == '20' + + # config interface vrrp6 track_interface add Ethernet64 8 Ethernet6 30 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernet6", "30"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8', 'Ethernet6') in db.cfgdb.get_table('VRRP6_TRACK') + assert db.cfgdb.get_table('VRRP6_TRACK')['Ethernet64', '8', 'Ethernet6']['priority_increment'] == '30' + + # config interface vrrp6 track_interface add Ethernet64 8 Ethernet7 80 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernet7", "80"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + + # config interface vrrp6 track_interface remove Ethernet64 8 Ethernet6 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["remove"], + ["Ethernet64", "8", "Ethernet6"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8', 'Ethernet6') not in db.cfgdb.get_table('VRRP6_TRACK') + + # config interface vrrp6 track_interface remove Ethernet64 8 Ethernet5 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["remove"], + ["Ethernet64", "8", "Ethernet5"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8', 'Ethernet5') not in db.cfgdb.get_table('VRRP6_TRACK') + + # config interface vrrp6 track_interface remove Ethernet7 8 Ethernet5 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["remove"], + ["Ethernet7", "8", "Ethernet5"], obj=obj) + print(result.exit_code, result.output) + assert "vrrp6 instance 8 not found on interface Ethernet7" in result.output + assert result.exit_code != 0 + + # check interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["remove"], + ["Ethernt64", "8", "Ethernet5"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["remove"], + ["Ethernet2", "8", "Ethernet5"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check track_interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["remove"], + ["Ethernet64", "8", "Ethernt5"], obj=obj) + print(result.exit_code, result.output) + assert "'track_interface' is not valid." in result.output + assert result.exit_code != 0 + + # check track_interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["remove"], + ["Ethernet64", "8", "Ethernet2"], obj=obj) + print(result.exit_code, result.output) + assert "Ethernet2 is not configured on the vrrp6 instance 8" in result.output + assert result.exit_code != 0 + + # config int vrrp6 remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP6') + + # config int ip remove Ethernet7 100::7/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet7", "100::7/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet7', '100::7/64') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet6 100::6/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet6", "100::6/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet6', '100::6/64') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet5 100::5/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet5", "100::5/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet5', '100::5/64') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet64 100::64/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "100::64/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '100::64/64') not in db.cfgdb.get_table('INTERFACE') + + def test_enable_disable_vrrp_instance_preempt(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 10.10.10.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '10.10.10.1/24') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp ip add Ethernet64 8 10.10.10.8/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet64", "8", "10.10.10.8/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['vip'] == ['10.10.10.8/24'] + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["pre_empt"], + ["Ethernt64", "8", "disabled"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["pre_empt"], + ["Ethernet2", "8", "disabled"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check the vrrp instance is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["pre_empt"], + ["Ethernet64", "9", "disabled"], obj=obj) + print(result.exit_code, result.output) + assert "vrrp instance 9 not found on interface Ethernet64" in result.output + assert result.exit_code != 0 + + # config interface vrrp vrrp pre_empt Ethernet64 8 disabled + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["pre_empt"], + ["Ethernet64", "8", "disabled"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['preempt'] == 'disabled' + + # config interface vrrp vrrp pre_empt Ethernet64 8 enabled + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["pre_empt"], + ["Ethernet64", "8", "enabled"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['preempt'] == 'enabled' + + # config int vrrp remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP') + + # config int ip remove Ethernet64 10.10.10.1/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10.10.10.1/24') not in db.cfgdb.get_table('INTERFACE') + + def test_enable_disable_vrrp6_instance_preempt(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 10::8/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "10::8/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '10::8/64') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp6 ipv6 add Ethernet64 8 10::1/64 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet64", "8", "10::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['vip'] == ['10::1/64'] + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["pre_empt"], + ["Ethernt64", "8", "disabled"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["pre_empt"], + ["Ethernet2", "8", "disabled"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check the vrrp6 instance is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["pre_empt"], + ["Ethernet64", "9", "disabled"], obj=obj) + print(result.exit_code, result.output) + assert "Vrrpv6 instance 9 not found on interface Ethernet64" in result.output + assert result.exit_code != 0 + + # config interface vrrp6 pre_empt Ethernet64 8 disabled + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["pre_empt"], + ["Ethernet64", "8", "disabled"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['preempt'] == 'disabled' + + # config interface vrrp vrrp pre_empt Ethernet64 8 enabled + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["pre_empt"], + ["Ethernet64", "8", "enabled"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['preempt'] == 'enabled' + + # config int vrrp remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP6') + + # config int ip remove Ethernet64 10::8/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "10::8/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10::8/64') not in db.cfgdb.get_table('INTERFACE') + + def test_config_vrrp_instance_adv_interval(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 10.10.10.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '10.10.10.1/24') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp ip add Ethernet64 8 10.10.10.8/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet64", "8", "10.10.10.8/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['vip'] == ['10.10.10.8/24'] + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["adv_interval"], + ["Ethernt64", "8", "2"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["adv_interval"], + ["Ethernet2", "8", "2"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check the vrrp instance is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["adv_interval"], + ["Ethernet64", "9", "2"], obj=obj) + print(result.exit_code, result.output) + assert "vrrp instance 9 not found on interface Ethernet64" in result.output + assert result.exit_code != 0 + + # config interface vrrp vrrp adv_interval Ethernet64 8 2 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["adv_interval"], + ["Ethernet64", "8", "2"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['adv_interval'] == '2' + + # config interface vrrp vrrp adv_interval Ethernet64 8 500 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["adv_interval"], + ["Ethernet64", "8", "500"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + + # config int vrrp remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP') + + # config int ip remove Ethernet64 10.10.10.1/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10.10.10.1/24') not in db.cfgdb.get_table('INTERFACE') + + def test_config_vrrp6_instance_adv_interval(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 10::8/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "10::8/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '10::8/64') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp6 ipv6 add Ethernet64 8 10::1/64 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet64", "8", "10::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['vip'] == ['10::1/64'] + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["adv_interval"], + ["Ethernt64", "8", "2"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["adv_interval"], + ["Ethernet2", "8", "2"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check the vrrp instance is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["adv_interval"], + ["Ethernet64", "9", "2"], obj=obj) + print(result.exit_code, result.output) + assert "Vrrpv6 instance 9 not found on interface Ethernet64" in result.output + assert result.exit_code != 0 + + # config interface vrrp6 adv_interval Ethernet64 8 2 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["adv_interval"], + ["Ethernet64", "8", "2"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['adv_interval'] == '2' + + # config interface vrrp6 adv_interval Ethernet64 8 500 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["adv_interval"], + ["Ethernet64", "8", "500"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + + # config int vrrp6 remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP6') + + # config int ip remove Ethernet64 10::8/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "10::8/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10::8/64') not in db.cfgdb.get_table('INTERFACE') + + def test_config_vrrp_instance_priority(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 10.10.10.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '10.10.10.1/24') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp ip add Ethernet64 8 10.10.10.8/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet64", "8", "10.10.10.8/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['vip'] == ['10.10.10.8/24'] + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["priority"], + ["Ethernt64", "8", "150"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["priority"], + ["Ethernet2", "8", "150"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check the vrrp instance is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["priority"], + ["Ethernet64", "9", "150"], obj=obj) + print(result.exit_code, result.output) + assert "vrrp instance 9 not found on interface Ethernet64" in result.output + assert result.exit_code != 0 + + # config interface vrrp priority Ethernet64 8 150 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["priority"], + ["Ethernet64", "8", "150"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['priority'] == '150' + + # config interface vrrp priority Ethernet64 8 256 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["priority"], + ["Ethernet64", "8", "256"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + + # config int vrrp remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP') + + # config int ip remove Ethernet64 10.10.10.1/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10.10.10.1/24') not in db.cfgdb.get_table('INTERFACE') + + def test_config_vrrp6_instance_priority(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 10::8/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "10::8/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '10::8/64') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp6 ipv6 add Ethernet64 8 10::1/64 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet64", "8", "10::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['vip'] == ['10::1/64'] + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["priority"], + ["Ethernt64", "8", "150"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["priority"], + ["Ethernet2", "8", "150"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check the vrrp instance is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["priority"], + ["Ethernet64", "9", "150"], obj=obj) + print(result.exit_code, result.output) + assert "Vrrpv6 instance 9 not found on interface Ethernet64" in result.output + assert result.exit_code != 0 + + # config interface vrrp6 priority Ethernet64 8 150 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["priority"], + ["Ethernet64", "8", "150"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['priority'] == '150' + + # config interface vrrp priority Ethernet64 8 256 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["priority"], + ["Ethernet64", "8", "256"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + + # config int vrrp6 remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP6') + + # config int ip remove Ethernet64 10::8/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "10::8/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10::8/64') not in db.cfgdb.get_table('INTERFACE') + + def test_config_vrrp_instance_version(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 10.10.10.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '10.10.10.1/24') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp ip add Ethernet64 8 10.10.10.8/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet64", "8", "10.10.10.8/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['vip'] == ['10.10.10.8/24'] + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["version"], + ["Ethernt64", "8", "3"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["version"], + ["Ethernet2", "8", "3"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check the vrrp instance is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["version"], + ["Ethernet64", "9", "3"], obj=obj) + print(result.exit_code, result.output) + assert "vrrp instance 9 not found on interface Ethernet64" in result.output + assert result.exit_code != 0 + + # config interface vrrp version Ethernet64 8 3 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["version"], + ["Ethernet64", "8", "3"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['version'] == '3' + + # config interface vrrp version Ethernet64 8 1 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["version"], + ["Ethernet64", "8", "1"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + + # config int vrrp remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP') + + # config int ip remove Ethernet64 10.10.10.1/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10.10.10.1/24') not in db.cfgdb.get_table('INTERFACE') diff --git a/tests/vxlan_test.py b/tests/vxlan_test.py index 9775fe96d0..d1decbc6eb 100644 --- a/tests/vxlan_test.py +++ b/tests/vxlan_test.py @@ -249,7 +249,7 @@ def test_show_vxlan_remotevni_specific_cnt(self): print(result.output) assert result.exit_code == 0 assert result.output == show_vxlan_remotevni_specific_cnt_output - + @patch("validated_config_db_connector.device_info.is_yang_config_validation_enabled", mock.Mock(return_value=True)) @patch("config.validated_config_db_connector.ValidatedConfigDBConnector.validated_set_entry", mock.Mock(side_effect=ValueError)) @patch("config.main.ConfigDBConnector.get_entry", mock.Mock(return_value="Vlan Data")) @@ -371,6 +371,19 @@ def test_config_vxlan_add(self): assert result.exit_code == 0 assert result.output == show_vxlan_vlanvnimap_output + def test_config_vxlan_add_invalid_name(self): + runner = CliRunner() + db = Db() + + result = runner.invoke(config.config.commands["vxlan"].commands["add"], ["vtep111111111111", "1.1.1.1"], obj=db) + print(result.exit_code) + print(result.output) + expected_output = """\ +Error: 'vxlan_name' length should not exceed 15 characters +""" + assert expected_output in result.output + assert result.exit_code != 0 + def test_config_vxlan_del(self): dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db') db = Db() diff --git a/utilities_common/netstat.py b/utilities_common/netstat.py index 21b1a0faeb..e32e28c745 100755 --- a/utilities_common/netstat.py +++ b/utilities_common/netstat.py @@ -108,6 +108,16 @@ def format_prate(rate): return "{:.2f}".format(float(rate))+'/s' +def format_fec_ber(rate): + """ + Show the ber rate. + """ + if rate == STATUS_NA: + return STATUS_NA + else: + return "{:.2e}".format(float(rate)) + + def format_util(brate, port_rate): """ Calculate the util. diff --git a/utilities_common/portstat.py b/utilities_common/portstat.py index 6942fa5f2a..d28584682a 100644 --- a/utilities_common/portstat.py +++ b/utilities_common/portstat.py @@ -11,7 +11,8 @@ from utilities_common import constants import utilities_common.multi_asic as multi_asic_util from utilities_common.netstat import ns_diff, table_as_json, format_brate, format_prate, \ - format_util, format_number_with_comma, format_util_directly + format_util, format_number_with_comma, format_util_directly, \ + format_fec_ber """ The order and count of statistics mentioned below needs to be in sync with the values in portstat script @@ -32,11 +33,11 @@ header_std = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_UTIL', 'RX_ERR', 'RX_DRP', 'RX_OVR', 'TX_OK', 'TX_BPS', 'TX_UTIL', 'TX_ERR', 'TX_DRP', 'TX_OVR'] header_errors_only = ['IFACE', 'STATE', 'RX_ERR', 'RX_DRP', 'RX_OVR', 'TX_ERR', 'TX_DRP', 'TX_OVR'] -header_fec_only = ['IFACE', 'STATE', 'FEC_CORR', 'FEC_UNCORR', 'FEC_SYMBOL_ERR'] +header_fec_only = ['IFACE', 'STATE', 'FEC_CORR', 'FEC_UNCORR', 'FEC_SYMBOL_ERR', 'FEC_PRE_BER', 'FEC_POST_BER'] header_rates_only = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_PPS', 'RX_UTIL', 'TX_OK', 'TX_BPS', 'TX_PPS', 'TX_UTIL'] -rates_key_list = ['RX_BPS', 'RX_PPS', 'RX_UTIL', 'TX_BPS', 'TX_PPS', 'TX_UTIL'] -ratestat_fields = ("rx_bps", "rx_pps", "rx_util", "tx_bps", "tx_pps", "tx_util") +rates_key_list = ['RX_BPS', 'RX_PPS', 'RX_UTIL', 'TX_BPS', 'TX_PPS', 'TX_UTIL', 'FEC_PRE_BER', 'FEC_POST_BER'] +ratestat_fields = ("rx_bps", "rx_pps", "rx_util", "tx_bps", "tx_pps", "tx_util", "fec_pre_ber", "fec_post_ber") RateStats = namedtuple("RateStats", ratestat_fields) """ @@ -194,10 +195,13 @@ def collect_stat_from_lc(self): tx_err = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_err") tx_drop = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_drop") tx_ovr = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_ovr") + fec_pre_ber = self.db.get(self.db.CHASSIS_STATE_DB, key, "fec_pre_ber") + fec_post_ber = self.db.get(self.db.CHASSIS_STATE_DB, key, "fec_post_ber") port_alias = key.split("|")[-1] cnstat_dict[port_alias] = NStats._make([rx_ok, rx_err, rx_drop, rx_ovr, tx_ok, tx_err, tx_drop, tx_ovr] + [STATUS_NA] * (len(NStats._fields) - 8))._asdict() - ratestat_dict[port_alias] = RateStats._make([rx_bps, rx_pps, rx_util, tx_bps, tx_pps, tx_util]) + ratestat_dict[port_alias] = RateStats._make([rx_bps, rx_pps, rx_util, tx_bps, + tx_pps, tx_util, fec_pre_ber, fec_post_ber]) self.cnstat_dict.update(cnstat_dict) self.ratestat_dict.update(ratestat_dict) @@ -229,7 +233,7 @@ def get_counters(port): if counter_name not in fvs: fields[pos] = STATUS_NA elif fields[pos] != STATUS_NA: - fields[pos] = str(int(fields[pos]) + int(fvs[counter_name])) + fields[pos] = str(int(fields[pos]) + int(float(fvs[counter_name]))) cntr = NStats._make(fields)._asdict() return cntr @@ -238,7 +242,7 @@ def get_rates(table_id): """ Get the rates from specific table. """ - fields = ["0", "0", "0", "0", "0", "0"] + fields = ["0", "0", "0", "0", "0", "0", "0", "0"] for pos, name in enumerate(rates_key_list): full_table_id = RATES_TABLE_PREFIX + table_id counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, name) @@ -363,7 +367,9 @@ def cnstat_print(self, cnstat_dict, ratestat_dict, intf_list, use_json, print_al table.append((key, self.get_port_state(key), format_number_with_comma(data['fec_corr']), format_number_with_comma(data['fec_uncorr']), - format_number_with_comma(data['fec_symbol_err']))) + format_number_with_comma(data['fec_symbol_err']), + format_fec_ber(rates.fec_pre_ber), + format_fec_ber(rates.fec_post_ber))) elif rates_only: header = header_rates_only table.append((key, self.get_port_state(key),