diff --git a/acl_loader/main.py b/acl_loader/main.py index b139b3641b..b8fededb6e 100644 --- a/acl_loader/main.py +++ b/acl_loader/main.py @@ -2,6 +2,7 @@ import click import json +import syslog import tabulate from natsort import natsorted @@ -13,14 +14,17 @@ def info(msg): click.echo(click.style("Info: ", fg='cyan') + click.style(str(msg), fg='green')) + syslog.syslog(syslog.LOG_INFO, msg) def warning(msg): click.echo(click.style("Warning: ", fg='cyan') + click.style(str(msg), fg='yellow')) + syslog.syslog(syslog.LOG_WARNING, msg) def error(msg): click.echo(click.style("Error: ", fg='cyan') + click.style(str(msg), fg='red')) + syslog.syslog(syslog.LOG_ERR, msg) def deep_update(dst, src): @@ -80,7 +84,7 @@ def __init__(self): self.sessions_db_info = {} self.configdb = ConfigDBConnector() self.configdb.connect() - self.appdb = SonicV2Connector() + self.appdb = SonicV2Connector(host="127.0.0.1") self.appdb.connect(self.appdb.APPL_DB) self.read_tables_info() @@ -115,8 +119,10 @@ def read_sessions_info(self): self.sessions_db_info = self.configdb.get_table(self.MIRROR_SESSION) for key in self.sessions_db_info.keys(): app_db_info = self.appdb.get_all(self.appdb.APPL_DB, "{}:{}".format(self.MIRROR_SESSION, key)) - - status = app_db_info.get("status", "inactive") + if app_db_info: + status = app_db_info.get("status", "inactive") + else: + status = "error" self.sessions_db_info[key]["status"] = status def get_sessions_db_info(self): @@ -352,8 +358,11 @@ def convert_rules(self): for acl_entry_name in acl_set.acl_entries.acl_entry: acl_entry = acl_set.acl_entries.acl_entry[acl_entry_name] - rule = self.convert_rule_to_db_schema(table_name, acl_entry) - deep_update(self.rules_info, rule) + try: + rule = self.convert_rule_to_db_schema(table_name, acl_entry) + deep_update(self.rules_info, rule) + except AclLoaderException as ex: + error("Error processing rule %s: %s. Skipped." % (acl_entry_name, ex)) if not self.is_table_mirror(table_name): deep_update(self.rules_info, self.deny_rule(table_name)) @@ -413,25 +422,34 @@ def show_table(self, table_name): :param table_name: Optional. ACL table name. Filter tables by specified name. :return: """ - header = ("Name", "Type", "Ports", "Description") + header = ("Name", "Type", "Binding", "Description") data = [] for key, val in self.get_tables_db_info().iteritems(): if table_name and key != table_name: continue - if not val["ports"]: - data.append([key, val["type"], "", val["policy_desc"]]) + if val["type"] == AclLoader.ACL_TABLE_TYPE_CTRLPLANE: + services = natsorted(val["services"]) + data.append([key, val["type"], services[0], val["policy_desc"]]) + + if len(services) > 1: + for service in services[1:]: + data.append(["", "", service, ""]) else: - ports = natsorted(val["ports"]) - data.append([key, val["type"], ports[0], val["policy_desc"]]) + if not val["ports"]: + data.append([key, val["type"], "", val["policy_desc"]]) + else: + ports = natsorted(val["ports"]) + data.append([key, val["type"], ports[0], val["policy_desc"]]) - if len(ports) > 1: - for port in ports[1:]: - data.append(["", "", port, ""]) + if len(ports) > 1: + for port in ports[1:]: + data.append(["", "", port, ""]) print(tabulate.tabulate(data, headers=header, tablefmt="simple", missingval="")) + def show_session(self, session_name): """ Show mirror session configuration. diff --git a/clear/main.py b/clear/main.py index d74947967d..b66e966c52 100755 --- a/clear/main.py +++ b/clear/main.py @@ -200,5 +200,35 @@ def arp(ipaddress): cli.add_command(arp) ip.add_command(arp) +# +# 'fdb' command #### +# +@cli.group() +def fdb(): + """Clear FDB table""" + pass + +@fdb.command('all') +def clear_all_fdb(): + """Clear All FDB entries""" + command = 'fdbclear' + run_command(command) + +# 'sonic-clear fdb port' and 'sonic-clear fdb vlan' will be added later +''' +@fdb.command('port') +@click.argument('portid', required=True) +def clear_port_fdb(portid): + """Clear FDB entries learned from one port""" + command = 'fdbclear' + ' -p ' + portid + run_command(command) + +@fdb.command('vlan') +@click.argument('vlanid', required=True) +def clear_vlan_fdb(vlanid): + """Clear FDB entries learned in one VLAN""" + command = 'fdbclear' + ' -v ' + vlanid + run_command(command) +''' if __name__ == '__main__': cli() diff --git a/config/aaa.py b/config/aaa.py index dbab270f10..0980703638 100644 --- a/config/aaa.py +++ b/config/aaa.py @@ -124,10 +124,10 @@ def timeout(ctx, second): @click.command() -@click.argument('type', metavar='', type=click.Choice(["chap", "pap", "mschap"]), required=False) +@click.argument('type', metavar='', type=click.Choice(["chap", "pap", "mschap", "login"]), required=False) @click.pass_context def authtype(ctx, type): - """Specify TACACS+ server global auth_type [chap | pap | mschap]""" + """Specify TACACS+ server global auth_type [chap | pap | mschap | login]""" if ctx.obj == 'default': del_table_key('TACPLUS', 'global', 'auth_type') elif type: @@ -158,7 +158,7 @@ def passkey(ctx, secret): @click.argument('address', metavar='') @click.option('-t', '--timeout', help='Transmission timeout interval, default 5', type=int) @click.option('-k', '--key', help='Shared secret') -@click.option('-a', '--auth_type', help='Authentication type, default pap', type=click.Choice(["chap", "pap", "mschap"])) +@click.option('-a', '--auth_type', help='Authentication type, default pap', type=click.Choice(["chap", "pap", "mschap", "login"])) @click.option('-o', '--port', help='TCP port range is 1 to 65535, default 49', type=click.IntRange(1, 65535), default=49) @click.option('-p', '--pri', help="Priority, default 1", type=click.IntRange(1, 64), default=1) def add(address, timeout, key, auth_type, port, pri): diff --git a/config/main.py b/config/main.py index 5dec73c1d2..7e01283528 100755 --- a/config/main.py +++ b/config/main.py @@ -49,38 +49,48 @@ def _get_all_neighbor_ipaddresses(): config_db.connect() return config_db.get_table('BGP_NEIGHBOR').keys() -def _get_neighbor_ipaddress_by_hostname(hostname): - """Returns string containing IP address of neighbor with hostname or None if not a neighbor +def _get_neighbor_ipaddress_list_by_hostname(hostname): + """Returns list of strings, each containing an IP address of neighbor with + hostname . Returns empty list if not a neighbor """ + addrs = [] config_db = ConfigDBConnector() config_db.connect() bgp_sessions = config_db.get_table('BGP_NEIGHBOR') for addr, session in bgp_sessions.iteritems(): if session.has_key('name') and session['name'] == hostname: - return addr - return None + addrs.append(addr) + return addrs -def _switch_bgp_session_status_by_addr(ipaddress, status, verbose): +def _change_bgp_session_status_by_addr(ipaddress, status, verbose): """Start up or shut down BGP session by IP address """ verb = 'Starting' if status == 'up' else 'Shutting' click.echo("{} {} BGP session with neighbor {}...".format(verb, status, ipaddress)) config_db = ConfigDBConnector() config_db.connect() + config_db.mod_entry('bgp_neighbor', ipaddress, {'admin_status': status}) -def _switch_bgp_session_status(ipaddr_or_hostname, status, verbose): +def _change_bgp_session_status(ipaddr_or_hostname, status, verbose): """Start up or shut down BGP session by IP address or hostname """ - if _is_neighbor_ipaddress(ipaddr_or_hostname): - ipaddress = ipaddr_or_hostname + ip_addrs = [] + + # If we were passed an IP address, convert it to lowercase because IPv6 addresses were + # stored in ConfigDB with all lowercase alphabet characters during minigraph parsing + if _is_neighbor_ipaddress(ipaddr_or_hostname.lower()): + ip_addrs.append(ipaddr_or_hostname.lower()) else: # If is not the IP address of a neighbor, check to see if it's a hostname - ipaddress = _get_neighbor_ipaddress_by_hostname(ipaddr_or_hostname) - if ipaddress == None: + ip_addrs = _get_neighbor_ipaddress_list_by_hostname(ipaddr_or_hostname) + + if not ip_addrs: print "Error: could not locate neighbor '{}'".format(ipaddr_or_hostname) raise click.Abort - _switch_bgp_session_status_by_addr(ipaddress, status, verbose) + + for ip_addr in ip_addrs: + _change_bgp_session_status_by_addr(ip_addr, status, verbose) def _change_hostname(hostname): current_hostname = os.uname()[1] @@ -90,6 +100,41 @@ def _change_hostname(hostname): run_command('sed -i "/\s{}$/d" /etc/hosts'.format(current_hostname), display_cmd=True) run_command('echo "127.0.0.1 {}" >> /etc/hosts'.format(hostname), display_cmd=True) +def _clear_qos(): + QOS_TABLE_NAMES = [ + 'TC_TO_PRIORITY_GROUP_MAP', + 'MAP_PFC_PRIORITY_TO_QUEUE', + 'TC_TO_QUEUE_MAP', + 'DSCP_TO_TC_MAP', + 'SCHEDULER', + 'PFC_PRIORITY_TO_PRIORITY_GROUP_MAP', + 'PORT_QOS_MAP', + 'WRED_PROFILE', + 'QUEUE', + 'CABLE_LENGTH', + 'BUFFER_POOL', + 'BUFFER_PROFILE', + 'BUFFER_PG', + 'BUFFER_QUEUE'] + config_db = ConfigDBConnector() + config_db.connect() + for qos_table in QOS_TABLE_NAMES: + config_db.delete_table(qos_table) + +def _get_hwsku(): + config_db = ConfigDBConnector() + config_db.connect() + metadata = config_db.get_table('DEVICE_METADATA') + return metadata['localhost']['hwsku'] + +def _get_platform(): + with open('/host/machine.conf') as machine_conf: + for line in machine_conf: + tokens = line.split('=') + if tokens[0].strip() == 'onie_platform' or tokens[0].strip() == 'aboot_platform': + return tokens[1].strip() + return '' + # Callback for confirmation prompt. Aborts if user enters "n" def _abort_if_false(ctx, param, value): if not value: @@ -149,7 +194,7 @@ def reload(filename, yes): client.flushdb() command = "{} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, filename) run_command(command, display_cmd=True) - client.set(config_db.INIT_INDICATOR, True) + client.set(config_db.INIT_INDICATOR, 1) _restart_services() @cli.command() @@ -186,17 +231,51 @@ def load_minigraph(): client = config_db.redis_clients[config_db.CONFIG_DB] client.flushdb() if os.path.isfile('/etc/sonic/init_cfg.json'): - command = "{} -m -j /etc/sonic/init_cfg.json --write-to-db".format(SONIC_CFGGEN_PATH) + command = "{} -H -m -j /etc/sonic/init_cfg.json --write-to-db".format(SONIC_CFGGEN_PATH) else: - command = "{} -m --write-to-db".format(SONIC_CFGGEN_PATH) + command = "{} -H -m --write-to-db".format(SONIC_CFGGEN_PATH) run_command(command, display_cmd=True) - client.set(config_db.INIT_INDICATOR, True) + client.set(config_db.INIT_INDICATOR, 1) + run_command('pfcwd start_default', display_cmd=True) if os.path.isfile('/etc/sonic/acl.json'): run_command("acl-loader update full /etc/sonic/acl.json", display_cmd=True) + run_command("config qos reload", display_cmd=True) #FIXME: After config DB daemon is implemented, we'll no longer need to restart every service. _restart_services() print "Please note setting loaded from minigraph will be lost after system reboot. To preserve setting, run `config save`." +# +# 'qos' group +# +@cli.group() +@click.pass_context +def qos(ctx): + pass + +@qos.command('clear') +def clear(): + _clear_qos() + +@qos.command('reload') +def reload(): + _clear_qos() + platform = _get_platform() + hwsku = _get_hwsku() + buffer_template_file = os.path.join('/usr/share/sonic/device/', platform, hwsku, 'buffers.json.j2') + if os.path.isfile(buffer_template_file): + command = "{} -m -t {} >/tmp/buffers.json".format(SONIC_CFGGEN_PATH, buffer_template_file) + run_command(command, display_cmd=True) + command = "{} -j /tmp/buffers.json --write-to-db".format(SONIC_CFGGEN_PATH) + run_command(command, display_cmd=True) + qos_file = os.path.join('/usr/share/sonic/device/', platform, hwsku, 'qos.json') + if os.path.isfile(qos_file): + command = "{} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, qos_file) + run_command(command, display_cmd=True) + else: + click.secho('QoS definition not found at {}'.format(qos_file), fg='yellow') + else: + click.secho('Buffer definition template not found at {}'.format(buffer_template_file), fg='yellow') + # # 'vlan' group # @@ -310,7 +389,7 @@ def all(verbose): """Shut down all BGP sessions""" bgp_neighbor_ip_list = _get_all_neighbor_ipaddresses() for ipaddress in bgp_neighbor_ip_list: - _switch_bgp_session_status_by_addr(ipaddress, 'down', verbose) + _change_bgp_session_status_by_addr(ipaddress, 'down', verbose) # 'neighbor' subcommand @shutdown.command() @@ -318,7 +397,7 @@ def all(verbose): @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def neighbor(ipaddr_or_hostname, verbose): """Shut down BGP session by neighbor IP address or hostname""" - _switch_bgp_session_status(ipaddr_or_hostname, 'down', verbose) + _change_bgp_session_status(ipaddr_or_hostname, 'down', verbose) @bgp.group() def startup(): @@ -332,7 +411,7 @@ def all(verbose): """Start up all BGP sessions""" bgp_neighbor_ip_list = _get_all_neighbor_ipaddresses() for ipaddress in bgp_neighbor_ip_list: - _switch_bgp_session_status(ipaddress, 'up', verbose) + _change_bgp_session_status(ipaddress, 'up', verbose) # 'neighbor' subcommand @startup.command() @@ -340,7 +419,7 @@ def all(verbose): @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def neighbor(ipaddr_or_hostname, verbose): """Start up BGP session by neighbor IP address or hostname""" - _switch_bgp_session_status(ipaddr_or_hostname, 'up', verbose) + _change_bgp_session_status(ipaddr_or_hostname, 'up', verbose) # # 'interface' group diff --git a/crm/__init__.py b/crm/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/crm/main.py b/crm/main.py new file mode 100644 index 0000000000..696d7fdedf --- /dev/null +++ b/crm/main.py @@ -0,0 +1,500 @@ +#!/usr/bin/env python + +import os +import click +import swsssdk +from tabulate import tabulate +from subprocess import Popen, PIPE + +class Crm: + def __init__(self): + self.cli_mode = None + self.addr_family = None + self.res_type = None + + def config(self, attr, val): + """ + CRM handler for 'config' CLI commands. + """ + configdb = swsssdk.ConfigDBConnector() + configdb.connect() + + configdb.mod_entry("CRM", 'Config', {attr: val}) + + def show_summary(self): + """ + CRM Handler to display general information. + """ + configdb = swsssdk.ConfigDBConnector() + configdb.connect() + + crm_info = configdb.get_entry('CRM', 'Config') + + if crm_info: + print '\nPolling Interval: ' + crm_info['polling_interval'] + ' second(s)\n' + else: + print '\nError! Could not get CRM configuration.\n' + + def show_thresholds(self, resource): + """ + CRM Handler to display thresholds information. + """ + configdb = swsssdk.ConfigDBConnector() + configdb.connect() + + crm_info = configdb.get_entry('CRM', 'Config') + + header = ("Resource Name", "Threshold Type", "Low Threshold", "High Threshold") + data = [] + + if crm_info: + if resource == 'all': + for res in ["ipv4_route", "ipv6_route", "ipv4_nexthop", "ipv6_nexthop", "ipv4_neighbor", "ipv6_neighbor", + "nexthop_group_member", "nexthop_group", "acl_table", "acl_group", "acl_entry", + "acl_counter", "fdb_entry"]: + data.append([res, crm_info[res + "_threshold_type"], crm_info[res + "_low_threshold"], crm_info[res + "_high_threshold"]]) + else: + data.append([resource, crm_info[resource + "_threshold_type"], crm_info[resource + "_low_threshold"], crm_info[resource + "_high_threshold"]]) + else: + print '\nError! Could not get CRM configuration.' + + print '\n' + print tabulate(data, headers=header, tablefmt="simple", missingval="") + print '\n' + + def show_resources(self, resource): + """ + CRM Handler to display resources information. + """ + countersdb = swsssdk.SonicV2Connector(host='127.0.0.1') + countersdb.connect(countersdb.COUNTERS_DB) + + crm_stats = countersdb.get_all(countersdb.COUNTERS_DB, 'CRM:STATS') + + header = ("Resource Name", "Used Count", "Available Count") + data = [] + + if crm_stats: + if resource == 'all': + for res in ["ipv4_route", "ipv6_route", "ipv4_nexthop", "ipv6_nexthop", "ipv4_neighbor", "ipv6_neighbor", + "nexthop_group_member", "nexthop_group", "fdb_entry"]: + data.append([res, crm_stats['crm_stats_' + res + "_used"], crm_stats['crm_stats_' + res + "_available"]]) + else: + data.append([resource, crm_stats['crm_stats_' + resource + "_used"], crm_stats['crm_stats_' + resource + "_available"]]) + else: + print '\nCRM counters are not ready. They would be populated after the polling interval.' + + print '\n' + print tabulate(data, headers=header, tablefmt="simple", missingval="") + print '\n' + + def show_acl_resources(self): + """ + CRM Handler to display ACL recources information. + """ + countersdb = swsssdk.SonicV2Connector(host='127.0.0.1') + countersdb.connect(countersdb.COUNTERS_DB) + + header = ("Stage", "Bind Point", "Resource Name", "Used Count", "Available Count") + data = [] + + for stage in ["INGRESS", "EGRESS"]: + for bind_point in ["PORT", "LAG", "VLAN", "RIF", "SWITCH"]: + crm_stats = countersdb.get_all(countersdb.COUNTERS_DB, 'CRM:ACL_STATS:{0}:{1}'.format(stage, bind_point)) + + if crm_stats: + for res in ["acl_group", "acl_table"]: + data.append([ + stage, bind_point, res, + crm_stats['crm_stats_' + res + "_used"], + crm_stats['crm_stats_' + res + "_available"] + ]) + + print '\n' + print tabulate(data, headers=header, tablefmt="simple", missingval="") + print '\n' + + def show_acl_table_resources(self): + """ + CRM Handler to display ACL table information. + """ + countersdb = swsssdk.SonicV2Connector(host='127.0.0.1') + countersdb.connect(countersdb.COUNTERS_DB) + + header = ("Table ID", "Resource Name", "Used Count", "Available Count") + + # Retrieve all ACL table keys from CRM:ACL_TABLE_STATS + proc = Popen("docker exec -i database redis-cli --raw -n 2 KEYS *CRM:ACL_TABLE_STATS*", stdout=PIPE, stderr=PIPE, shell=True) + out, err = proc.communicate() + + for key in out.splitlines() or [None]: + data = [] + + if key: + id = key.replace('CRM:ACL_TABLE_STATS:', '') + + crm_stats = countersdb.get_all(countersdb.COUNTERS_DB, key) + + for res in ['acl_entry', 'acl_counter']: + if ('crm_stats_' + res + '_used' in crm_stats) and ('crm_stats_' + res + '_available' in crm_stats): + data.append([id, res, crm_stats['crm_stats_' + res + '_used'], crm_stats['crm_stats_' + res + '_available']]) + + print '\n' + print tabulate(data, headers=header, tablefmt="simple", missingval="") + print '\n' + + +@click.group() +@click.pass_context +def cli(ctx): + """ + Utility entry point. + """ + context = { + "crm": Crm() + } + + ctx.obj = context + +@cli.group() +@click.pass_context +def config(ctx): + """CRM related configuration""" + pass + +@config.group() +@click.pass_context +def polling(ctx): + """CRM polling configuration""" + pass + +@polling.command() +@click.pass_context +@click.argument('interval', type=click.INT) +def interval(ctx, interval): + """CRM polling interval configuration""" + ctx.obj["crm"].config('polling_interval', interval) + +@config.group() +@click.pass_context +def thresholds(ctx): + """CRM thresholds configuration""" + pass + +@thresholds.group() +@click.pass_context +def ipv4(ctx): + """CRM resource IPv4 address-family""" + ctx.obj["crm"].addr_family = 'ipv4' + +@thresholds.group() +@click.pass_context +def ipv6(ctx): + """CRM resource IPv6 address-family""" + ctx.obj["crm"].addr_family = 'ipv6' + +@ipv4.group() +@click.pass_context +def route(ctx): + """CRM configuration for route resource""" + ctx.obj["crm"].res_type = 'route' + +@ipv4.group() +@click.pass_context +def neighbor(ctx): + """CRM configuration for neigbor resource""" + ctx.obj["crm"].res_type = 'neighbor' + +@ipv4.group() +@click.pass_context +def nexthop(ctx): + """CRM configuration for nexthop resource""" + ctx.obj["crm"].res_type = 'nexthop' + +@route.command() +@click.argument('value', type=click.Choice(['percentage', 'used', 'free'])) +@click.pass_context +def type(ctx, value): + """CRM threshod type configuration""" + attr = '' + + if ctx.obj["crm"].addr_family != None: + attr += ctx.obj["crm"].addr_family + '_' + + attr += ctx.obj["crm"].res_type + '_' + 'threshold_type' + + ctx.obj["crm"].config(attr, value) + +@route.command() +@click.argument('value', type=click.INT) +@click.pass_context +def low(ctx, value): + """CRM low threshod configuration""" + attr = '' + + if ctx.obj["crm"].addr_family != None: + attr += ctx.obj["crm"].addr_family + '_' + + attr += ctx.obj["crm"].res_type + '_' + 'low_threshold' + + ctx.obj["crm"].config(attr, value) + +@route.command() +@click.argument('value', type=click.INT) +@click.pass_context +def high(ctx, value): + """CRM high threshod configuration""" + attr = '' + + if ctx.obj["crm"].addr_family != None: + attr += ctx.obj["crm"].addr_family + '_' + + attr += ctx.obj["crm"].res_type + '_' + 'high_threshold' + + ctx.obj["crm"].config(attr, value) + +neighbor.add_command(type) +neighbor.add_command(low) +neighbor.add_command(high) +nexthop.add_command(type) +nexthop.add_command(low) +nexthop.add_command(high) +ipv6.add_command(route) +ipv6.add_command(neighbor) +ipv6.add_command(nexthop) + +@thresholds.group() +@click.pass_context +def nexthop(ctx): + """CRM configuration for nexthop resource""" + pass +@nexthop.group() +@click.pass_context +def group(ctx): + """CRM configuration for nexthop group resource""" + pass +@group.group() +@click.pass_context +def member(ctx): + """CRM configuration for nexthop group member resource""" + ctx.obj["crm"].res_type = 'nexthop_group_member' +@group.group() +@click.pass_context +def object(ctx): + """CRM configuration for nexthop group resource""" + ctx.obj["crm"].res_type = 'nexthop_group' + +member.add_command(type) +member.add_command(low) +member.add_command(high) +object.add_command(type) +object.add_command(low) +object.add_command(high) + +@thresholds.group() +@click.pass_context +def fdb(ctx): + """CRM configuration for FDB resource""" + ctx.obj["crm"].res_type = 'fdb_entry' + +fdb.add_command(type) +fdb.add_command(low) +fdb.add_command(high) + +@thresholds.group() +@click.pass_context +def acl(ctx): + """CRM configuration for ACL resource""" + pass + +@acl.group() +@click.pass_context +def table(ctx): + """CRM configuration for ACL table resource""" + ctx.obj["crm"].res_type = 'acl_table' + +table.add_command(type) +table.add_command(low) +table.add_command(high) + +@acl.group() +@click.pass_context +def group(ctx): + """CRM configuration for ACL group resource""" + ctx.obj["crm"].res_type = 'acl_group' + +group.add_command(type) +group.add_command(low) +group.add_command(high) + +@group.group() +@click.pass_context +def entry(ctx): + """CRM configuration for ACL entry resource""" + ctx.obj["crm"].res_type = 'acl_entry' + +entry.add_command(type) +entry.add_command(low) +entry.add_command(high) + +@group.group() +@click.pass_context +def counter(ctx): + """CRM configuration for ACL counter resource""" + ctx.obj["crm"].res_type = 'acl_counter' + +counter.add_command(type) +counter.add_command(low) +counter.add_command(high) + +@cli.group() +@click.pass_context +def show(ctx): + """Show CRM related information""" + pass + +@show.command() +@click.pass_context +def summary(ctx): + """Show CRM general information""" + ctx.obj["crm"].show_summary() + +@show.group() +@click.pass_context +def resources(ctx): + """Show CRM resources information""" + ctx.obj["crm"].cli_mode = 'resources' + +@show.group() +@click.pass_context +def thresholds(ctx): + """Show CRM thresholds information""" + ctx.obj["crm"].cli_mode = 'thresholds' + +@resources.command() +@click.pass_context +def all(ctx): + """Show CRM information for all resources""" + if ctx.obj["crm"].cli_mode == 'thresholds': + ctx.obj["crm"].show_thresholds('all') + elif ctx.obj["crm"].cli_mode == 'resources': + ctx.obj["crm"].show_resources('all') + ctx.obj["crm"].show_acl_resources() + ctx.obj["crm"].show_acl_table_resources() + +@resources.group() +@click.pass_context +def ipv4(ctx): + """CRM resource IPv4 address family""" + ctx.obj["crm"].addr_family = 'ipv4' + +@resources.group() +@click.pass_context +def ipv6(ctx): + """CRM resource IPv6 address family""" + ctx.obj["crm"].addr_family = 'ipv6' + +@ipv4.command() +@click.pass_context +def route(ctx): + """Show CRM information for route resource""" + if ctx.obj["crm"].cli_mode == 'thresholds': + ctx.obj["crm"].show_thresholds('{0}_route'.format(ctx.obj["crm"].addr_family)) + elif ctx.obj["crm"].cli_mode == 'resources': + ctx.obj["crm"].show_resources('{0}_route'.format(ctx.obj["crm"].addr_family)) + +@ipv4.command() +@click.pass_context +def neighbor(ctx): + """Show CRM information for neighbor resource""" + if ctx.obj["crm"].cli_mode == 'thresholds': + ctx.obj["crm"].show_thresholds('{0}_neighbor'.format(ctx.obj["crm"].addr_family)) + elif ctx.obj["crm"].cli_mode == 'resources': + ctx.obj["crm"].show_resources('{0}_neighbor'.format(ctx.obj["crm"].addr_family)) + +@ipv4.command() +@click.pass_context +def nexthop(ctx): + """Show CRM information for nexthop resource""" + if ctx.obj["crm"].cli_mode == 'thresholds': + ctx.obj["crm"].show_thresholds('{0}_nexthop'.format(ctx.obj["crm"].addr_family)) + elif ctx.obj["crm"].cli_mode == 'resources': + ctx.obj["crm"].show_resources('{0}_nexthop'.format(ctx.obj["crm"].addr_family)) + +ipv6.add_command(route) +ipv6.add_command(neighbor) +ipv6.add_command(nexthop) + +@resources.group() +@click.pass_context +def nexthop(ctx): + """Show CRM information for nexthop resource""" + pass + +@nexthop.group() +@click.pass_context +def group(ctx): + """Show CRM information for nexthop group resource""" + pass + +@group.command() +@click.pass_context +def member(ctx): + """Show CRM information for nexthop group member resource""" + if ctx.obj["crm"].cli_mode == 'thresholds': + ctx.obj["crm"].show_thresholds('nexthop_group_member') + elif ctx.obj["crm"].cli_mode == 'resources': + ctx.obj["crm"].show_resources('nexthop_group_member') + +@group.command() +@click.pass_context +def object(ctx): + """Show CRM information for nexthop group resource""" + if ctx.obj["crm"].cli_mode == 'thresholds': + ctx.obj["crm"].show_thresholds('nexthop_group') + elif ctx.obj["crm"].cli_mode == 'resources': + ctx.obj["crm"].show_resources('nexthop_group') + +@resources.group() +@click.pass_context +def acl(ctx): + """Show CRM information for acl resource""" + pass + +@acl.command() +@click.pass_context +def table(ctx): + """Show CRM information for acl table resource""" + if ctx.obj["crm"].cli_mode == 'thresholds': + ctx.obj["crm"].show_thresholds('acl_table') + elif ctx.obj["crm"].cli_mode == 'resources': + ctx.obj["crm"].show_acl_table_resources() + +@acl.command() +@click.pass_context +def group(ctx): + """Show CRM information for acl group resource""" + if ctx.obj["crm"].cli_mode == 'thresholds': + ctx.obj["crm"].show_thresholds('acl_group') + elif ctx.obj["crm"].cli_mode == 'resources': + ctx.obj["crm"].show_acl_resources() + +@resources.command() +@click.pass_context +def fdb(ctx): + """Show CRM information for fdb resource""" + if ctx.obj["crm"].cli_mode == 'thresholds': + ctx.obj["crm"].show_thresholds('fdb_entry') + elif ctx.obj["crm"].cli_mode == 'resources': + ctx.obj["crm"].show_resources('fdb_entry') + +thresholds.add_command(acl) +thresholds.add_command(all) +thresholds.add_command(fdb) +thresholds.add_command(ipv4) +thresholds.add_command(ipv6) +thresholds.add_command(nexthop) + + +if __name__ == '__main__': + cli() diff --git a/data/etc/bash_completion.d/crm b/data/etc/bash_completion.d/crm new file mode 100644 index 0000000000..70d3b25384 --- /dev/null +++ b/data/etc/bash_completion.d/crm @@ -0,0 +1,8 @@ +_crm_completion() { + COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \ + COMP_CWORD=$COMP_CWORD \ + _CRM_COMPLETE=complete $1 ) ) + return 0 +} + +complete -F _crm_completion -o default crm; diff --git a/pfcwd/main.py b/pfcwd/main.py index 22f636d25f..d5d39caaae 100644 --- a/pfcwd/main.py +++ b/pfcwd/main.py @@ -5,6 +5,13 @@ from tabulate import tabulate from natsort import natsorted +# Default configuration +DEFAULT_DETECTION_TIME = 200 +DEFAULT_RESTORATION_TIME = 200 +DEFAULT_POLL_INTERVAL = 200 +DEFAULT_PORT_NUM = 32 +DEFAULT_ACTION = 'drop' + STATS_DESCRIPTION = [ ('STORM DETECTED/RESTORED', 'PFC_WD_QUEUE_STATS_DEADLOCK_DETECTED', 'PFC_WD_QUEUE_STATS_DEADLOCK_RESTORED'), ('TX OK/DROP', 'PFC_WD_QUEUE_STATS_TX_PACKETS', 'PFC_WD_QUEUE_STATS_TX_DROPPED_PACKETS'), @@ -35,6 +42,17 @@ def get_all_ports(db): port_names = db.get_all(db.COUNTERS_DB, 'COUNTERS_PORT_NAME_MAP') return natsorted(port_names.keys()) +def get_server_facing_ports(db): + candidates = db.get_table('DEVICE_NEIGHBOR') + server_facing_ports = [] + for port in candidates.keys(): + neighbor = db.get_entry('DEVICE_NEIGHBOR_METADATA', candidates[port]['name']) + if neighbor and neighbor['type'].lower() == 'server': + server_facing_ports.append(port) + if not server_facing_ports: + server_facing_ports = [p[1] for p in db.get_table('VLAN_MEMBER').keys()] + return server_facing_ports + # Show commands @cli.group() def show(): @@ -92,7 +110,9 @@ def config(ports): line = config_entry.get(config[1], config[2]) config_list.append(line) table.append([port] + config_list) - + poll_interval = configdb.get_entry( 'PFC_WD_TABLE', 'GLOBAL').get('POLL_INTERVAL') + if poll_interval is not None: + click.echo("Changed polling interval to " + poll_interval + "ms") click.echo(tabulate(table, CONFIG_HEADER, stralign='right', numalign='right', tablefmt='simple')) # Start WD @@ -127,6 +147,19 @@ def start(action, restoration_time, ports, detection_time): configdb.mod_entry("PFC_WD_TABLE", port, None) configdb.mod_entry("PFC_WD_TABLE", port, pfcwd_info) +# Set WD poll interval +@cli.command() +@click.argument('poll_interval', type=click.IntRange(100, 3000)) +def interval(poll_interval): + """ Set PFC watchdog counter polling interval """ + configdb = swsssdk.ConfigDBConnector() + configdb.connect() + pfcwd_info = {} + if poll_interval is not None: + pfcwd_info['POLL_INTERVAL'] = poll_interval + + configdb.mod_entry("PFC_WD_TABLE", "GLOBAL", pfcwd_info) + # Stop WD @cli.command() @click.argument('ports', nargs = -1) @@ -147,5 +180,39 @@ def stop(ports): continue configdb.mod_entry("PFC_WD_TABLE", port, None) +# Set WD default configuration on server facing ports when enable flag is on +@cli.command() +def start_default(): + """ Start PFC WD by default configurations """ + configdb = swsssdk.ConfigDBConnector() + configdb.connect() + enable = configdb.get_entry('DEVICE_METADATA', 'localhost').get('default_pfcwd_status') + + server_facing_ports = get_server_facing_ports(configdb) + + if not enable or enable.lower() != "enable": + return + + device_type = configdb.get_entry('DEVICE_METADATA', 'localhost').get('type') + if device_type.lower() != "torrouter": + return + + port_num = len(configdb.get_table('PORT').keys()) + + # Paramter values positively correlate to the number of ports. + multiply = max(1, (port_num-1)/DEFAULT_PORT_NUM+1) + pfcwd_info = { + 'detection_time': DEFAULT_DETECTION_TIME * multiply, + 'restoration_time': DEFAULT_RESTORATION_TIME * multiply, + 'action': DEFAULT_ACTION + } + + for port in server_facing_ports: + configdb.set_entry("PFC_WD_TABLE", port, pfcwd_info) + + pfcwd_info = {} + pfcwd_info['POLL_INTERVAL'] = DEFAULT_POLL_INTERVAL * multiply + configdb.mod_entry("PFC_WD_TABLE", "GLOBAL", pfcwd_info) + if __name__ == '__main__': cli() diff --git a/psuutil/main.py b/psuutil/main.py index 002619e0b4..4e347ab9ef 100644 --- a/psuutil/main.py +++ b/psuutil/main.py @@ -28,8 +28,8 @@ PLATFORM_ROOT_PATH_DOCKER = '/usr/share/sonic/platform' SONIC_CFGGEN_PATH = '/usr/local/bin/sonic-cfggen' MINIGRAPH_PATH = '/etc/sonic/minigraph.xml' -HWSKU_KEY = "DEVICE_METADATA['localhost']['hwsku']" -PLATFORM_KEY = 'platform' +HWSKU_KEY = 'DEVICE_METADATA.localhost.hwsku' +PLATFORM_KEY = 'DEVICE_METADATA.localhost.platform' # Global platform-specific psuutil class instance platform_psuutil = None @@ -70,7 +70,7 @@ def log_error(msg, also_print_to_console=False): # Returns platform and HW SKU def get_platform_and_hwsku(): try: - proc = subprocess.Popen([SONIC_CFGGEN_PATH, '-v', PLATFORM_KEY], + proc = subprocess.Popen([SONIC_CFGGEN_PATH, '-H', '-v', PLATFORM_KEY], stdout=subprocess.PIPE, shell=False, stderr=subprocess.STDOUT) diff --git a/scripts/decode-syseeprom b/scripts/decode-syseeprom index f060ce47b9..6b70a4daab 100755 --- a/scripts/decode-syseeprom +++ b/scripts/decode-syseeprom @@ -22,7 +22,7 @@ except ImportError, e: SONIC_CFGGEN = '/usr/local/bin/sonic-cfggen' -PLATFORM_KEY = 'platform' +PLATFORM_KEY = 'DEVICE_METADATA.localhost.platform' PLATFORM_ROOT = '/usr/share/sonic/device' @@ -32,7 +32,7 @@ CACHE_FILE = 'syseeprom_cache' # Returns platform and HW SKU def get_platform(): try: - proc = subprocess.Popen([SONIC_CFGGEN, '-v', PLATFORM_KEY], + proc = subprocess.Popen([SONIC_CFGGEN, '-H', '-v', PLATFORM_KEY], stdout=subprocess.PIPE, shell=False, stderr=subprocess.STDOUT) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index ba397e5ea0..b793058530 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -1,7 +1,7 @@ #!/bin/bash # Check root privileges -if [ "$EUID" -ne 0 ] +if [[ "$EUID" -ne 0 ]] then echo "Please run as root" exit @@ -9,16 +9,25 @@ fi # Unload the previously loaded kernel if any loaded -if [ "$(cat /sys/kernel/kexec_loaded)" -eq 1 ] +if [[ "$(cat /sys/kernel/kexec_loaded)" -eq 1 ]] then /sbin/kexec -u fi # Kernel and initrd image NEXT_SONIC_IMAGE=$(sonic_installer list | grep "Next: " | cut -d ' ' -f 2) -KERNEL_OPTIONS=$(cat /host/grub/grub.cfg | sed "/$NEXT_SONIC_IMAGE'/,/}/"'!'"g" | grep linux) -KERNEL_IMAGE="/host$(echo $KERNEL_OPTIONS | cut -d ' ' -f 2)" -BOOT_OPTIONS="$(echo $KERNEL_OPTIONS | sed -e 's/\s*linux\s*/BOOT_IMAGE=/') fast-reboot" +if grep -q aboot_platform= /host/machine.conf; then + IMAGE_PATH="/host/image-${NEXT_SONIC_IMAGE#SONiC-OS-}" + KERNEL_IMAGE="$(ls $IMAGE_PATH/boot/vmlinuz-*)" + BOOT_OPTIONS="$(cat "$IMAGE_PATH/kernel-cmdline" | tr '\n' ' ') fast-reboot" +elif grep -q onie_platform= /host/machine.conf; then + KERNEL_OPTIONS=$(cat /host/grub/grub.cfg | sed "/$NEXT_SONIC_IMAGE'/,/}/"'!'"g" | grep linux) + KERNEL_IMAGE="/host$(echo $KERNEL_OPTIONS | cut -d ' ' -f 2)" + BOOT_OPTIONS="$(echo $KERNEL_OPTIONS | sed -e 's/\s*linux\s*/BOOT_IMAGE=/') fast-reboot" +else + echo "Unknown bootloader. fast-reboot is not supported." + exit 1 +fi INITRD=$(echo $KERNEL_IMAGE | sed 's/vmlinuz/initrd.img/g') sonic_asic_type=$(sonic-cfggen -y /etc/sonic/sonic_version.yml -v asic_type) @@ -26,32 +35,51 @@ sonic_asic_type=$(sonic-cfggen -y /etc/sonic/sonic_version.yml -v asic_type) # Load kernel into the memory /sbin/kexec -l "$KERNEL_IMAGE" --initrd="$INITRD" --append="$BOOT_OPTIONS" -# Dump the ARP and FDB tables to files -/usr/bin/fast-reboot-dump.py -docker cp /tmp/fdb.json swss:/ -docker cp /tmp/arp.json swss:/ +# Dump the ARP and FDB tables to files also as default routes for both IPv4 and IPv6 +# into /host/fast-reboot +mkdir -p /host/fast-reboot +/usr/bin/fast-reboot-dump.py -t /host/fast-reboot -# Kill bgpd to enable graceful restart of BGP -docker exec -ti bgp killall -9 watchquagga -docker exec -ti bgp killall -9 zebra -docker exec -ti bgp killall -9 bgpd +# Kill bgpd to start the bgp graceful restart procedure +docker exec -i bgp killall -9 zebra +docker exec -i bgp killall -9 bgpd # Kill lldp, otherwise it sends informotion about reboot -docker kill lldp +docker kill lldp > /dev/null # Kill teamd, otherwise it gets down all LAGs -docker kill teamd +docker kill teamd > /dev/null + +# syncd graceful stop is supported only for Broadcoms platforms only for now +if [[ "$sonic_asic_type" = 'broadcom' ]]; +then + # Gracefully stop syncd + docker exec -i syncd /usr/bin/syncd_request_shutdown --cold > /dev/null + + # Check that syncd was stopped + while docker top syncd | grep -q /usr/bin/syncd + do + sleep 0.1 + done +fi -# Kill other containers to make reboot faster -docker ps -qa | xargs docker kill +# Kill other containers to make the reboot faster +docker ps -q | xargs docker kill > /dev/null # Stop the docker container engine. Otherwise we will have a broken docker storage systemctl stop docker.service # Stop opennsl modules for Broadcom platform -if [ "$sonic_asic_type" = 'broadcom' ]; +if [[ "$sonic_asic_type" = 'broadcom' ]]; +then + service_name=$(systemctl list-units --plain --no-pager --no-legend --type=service | grep opennsl | cut -f 1 -d' ') + systemctl stop "$service_name" +fi + +# Stop kernel modules for Nephos platform +if [[ "$sonic_asic_type" = 'nephos' ]]; then - systemctl stop opennsl-modules-3.16.0-4-amd64.service + systemctl stop nps-modules-`uname -r`.service fi # Wait until all buffers synced with disk diff --git a/scripts/fast-reboot-dump.py b/scripts/fast-reboot-dump.py index 910851d9b2..7488b8831f 100644 --- a/scripts/fast-reboot-dump.py +++ b/scripts/fast-reboot-dump.py @@ -4,8 +4,11 @@ import json import socket import struct +import sys +import os from fcntl import ioctl import binascii +import argparse ARP_CHUNK = binascii.unhexlify('08060001080006040001') # defines a part of the packet for ARP Request @@ -93,20 +96,30 @@ def get_map_bridge_port_id_2_iface_name(db): return bridge_port_id_2_iface_name +def get_vlan_oid_by_vlan_id(db, vlan_id): + keys = db.keys(db.ASIC_DB, 'ASIC_STATE:SAI_OBJECT_TYPE_VLAN:oid:*') + keys = [] if keys is None else keys + for key in keys: + value = db.get_all(db.ASIC_DB, key) + if 'SAI_VLAN_ATTR_VLAN_ID' in value and int(value['SAI_VLAN_ATTR_VLAN_ID']) == vlan_id: + return key.replace('ASIC_STATE:SAI_OBJECT_TYPE_VLAN:', '') + + raise Exception('Not found bvi oid for vlan_id: %d' % vlan_id) + def get_fdb(db, vlan_name, vlan_id, bridge_id_2_iface): fdb_types = { 'SAI_FDB_ENTRY_TYPE_DYNAMIC': 'dynamic', 'SAI_FDB_ENTRY_TYPE_STATIC' : 'static' } + bvid = get_vlan_oid_by_vlan_id(db, vlan_id) available_macs = set() map_mac_ip = {} fdb_entries = [] - keys = db.keys(db.ASIC_DB, 'ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY:{*\"vlan\":\"%d\"}' % vlan_id) + keys = db.keys(db.ASIC_DB, 'ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY:{*\"bvid\":\"%s\"*}' % bvid) keys = [] if keys is None else keys for key in keys: key_obj = json.loads(key.replace('ASIC_STATE:SAI_OBJECT_TYPE_FDB_ENTRY:', '')) - vlan = str(key_obj['vlan']) mac = str(key_obj['mac']) if not is_mac_unicast(mac): continue @@ -213,10 +226,51 @@ def garp_send(arp_entries, map_mac_ip_per_vlan): return +def get_default_entries(db, route): + key = 'ROUTE_TABLE:%s' % route + keys = db.keys(db.APPL_DB, key) + if keys is None: + return None + + entry = db.get_all(db.APPL_DB, key) + obj = { + key: entry, + 'OP': 'SET' + } + + return obj + +def generate_default_route_entries(filename): + db = swsssdk.SonicV2Connector() + db.connect(db.APPL_DB, False) # Make one attempt only + + default_routes_output = [] + + ipv4_default = get_default_entries(db, '0.0.0.0/0') + if ipv4_default is not None: + default_routes_output.append(ipv4_default) + + ipv6_default = get_default_entries(db, '::/0') + if ipv6_default is not None: + default_routes_output.append(ipv6_default) + + db.close(db.APPL_DB) + + with open(filename, 'w') as fp: + json.dump(default_routes_output, fp, indent=2, separators=(',', ': ')) + def main(): - all_available_macs, map_mac_ip_per_vlan = generate_fdb_entries('/tmp/fdb.json') - arp_entries = generate_arp_entries('/tmp/arp.json', all_available_macs) + parser = argparse.ArgumentParser() + parser.add_argument('-t', '--target', type=str, default='/tmp', help='target directory for files') + args = parser.parse_args() + root_dir = args.target + if not os.path.isdir(root_dir): + print "Target directory '%s' not found" % root_dir + sys.exit(1) + all_available_macs, map_mac_ip_per_vlan = generate_fdb_entries(root_dir + '/fdb.json') + arp_entries = generate_arp_entries(root_dir + '/arp.json', all_available_macs) + generate_default_route_entries(root_dir + '/default_routes.json') garp_send(arp_entries, map_mac_ip_per_vlan) return diff --git a/scripts/fdbclear b/scripts/fdbclear new file mode 100644 index 0000000000..a8100af2cb --- /dev/null +++ b/scripts/fdbclear @@ -0,0 +1,58 @@ +#!/usr/bin/python +""" + Script to clear MAC/FDB entries learnt in Hardware + + usage: fdbclear [-p PORT] [-v VLAN] + optional arguments: + -p, --port FDB learned on specific port: Ethernet0 + -v, --vlan FDB learned on specific Vlan: 1000 + + Example of the output: + +""" + +import argparse +import json +import sys + +from natsort import natsorted +from swsssdk import SonicV2Connector, port_util +from tabulate import tabulate + +class FdbClear(object): + + + def __init__(self): + super(FdbClear,self).__init__() + self.db = SonicV2Connector(host="127.0.0.1") + self.db.connect(self.db.APPL_DB) + return + + def send_notification(self, op, data): + opdata = [op,data] + msg = json.dumps(opdata,separators=(',',':')) + self.db.publish('APPL_DB','FLUSHFDBREQUEST', msg) + return + +def main(): + + parser = argparse.ArgumentParser(description='Clear FDB entries', formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument('-p', '--port', type=str, help='Clear FDB learned on specific port: Ethernet0', default=None) + parser.add_argument('-v', '--vlan', type=str, help='Clear FDB learned on specific Vlan: 1001', default=None) + args = parser.parse_args() + + try: + fdb = FdbClear() + if args.vlan is not None: + print("command not supported yet.") + elif args.port is not None: + print("command not supported yet.") + else: + fdb.send_notification("ALL", "ALL") + print("FDB entries are cleared.") + except Exception as e: + print e.message + sys.exit(1) + +if __name__ == "__main__": + main() diff --git a/scripts/fdbshow b/scripts/fdbshow index 81134fff67..13d3630868 100755 --- a/scripts/fdbshow +++ b/scripts/fdbshow @@ -9,17 +9,17 @@ Example of the output: admin@str~$ fdbshow - No. Vlan MacAddress Port - ----- ------ ----------------- ---------- - 1 1000 7C:FE:90:80:9F:05 Ethernet20 - 2 1000 7C:FE:90:80:9F:10 Ethernet40 - 3 1000 7C:FE:90:80:9F:01 Ethernet4 - 4 1000 7C:FE:90:80:9F:02 Ethernet8 + No. Vlan MacAddress Port Type + ----- ------ ----------------- ---------- ------- + 1 1000 7C:FE:90:80:9F:05 Ethernet20 Dynamic + 2 1000 7C:FE:90:80:9F:10 Ethernet40 Dynamic + 3 1000 7C:FE:90:80:9F:01 Ethernet4 Dynamic + 4 1000 7C:FE:90:80:9F:02 Ethernet8 Dynamic Total number of entries 4 admin@str:~$ fdbshow -p Ethernet4 - No. Vlan MacAddress Port - ----- ------ ----------------- --------- - 1 1000 7C:FE:90:80:9F:01 Ethernet4 + No. Vlan MacAddress Port Type + ----- ------ ----------------- --------- ------- + 1 1000 7C:FE:90:80:9F:01 Ethernet4 Dynamic Total number of entries 1 admin@str:~$ fdbshow -v 1001 1001 is not in list @@ -35,7 +35,7 @@ from tabulate import tabulate class FdbShow(object): - HEADER = ['No.', 'Vlan', 'MacAddress', 'Port'] + HEADER = ['No.', 'Vlan', 'MacAddress', 'Port', 'Type'] FDB_COUNT = 0 def __init__(self): @@ -71,12 +71,17 @@ class FdbShow(object): ent = self.db.get_all('ASIC_DB', s, blocking=True) br_port_id = ent[b"SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID"][oid_pfx:] + ent_type = ent[b"SAI_FDB_ENTRY_ATTR_TYPE"] + fdb_type = ['Dynamic','Static'][ent_type == "SAI_FDB_ENTRY_TYPE_STATIC"] if br_port_id not in self.if_br_oid_map: continue port_id = self.if_br_oid_map[br_port_id] if_name = self.if_oid_map[port_id] - - self.bridge_mac_list.append((int(fdb["vlan"]),) + (fdb["mac"],) + (if_name,)) + if 'vlan' in fdb: + vlan_id = fdb["vlan"] + elif 'bvid' in fdb: + vlan_id = port_util.get_vlan_id_from_bvid(self.db, fdb["bvid"]) + self.bridge_mac_list.append((int(vlan_id),) + (fdb["mac"],) + (if_name,) + (fdb_type,)) self.bridge_mac_list.sort(key = lambda x: x[0]) return @@ -115,7 +120,7 @@ class FdbShow(object): for fdb in self.bridge_mac_list: self.FDB_COUNT += 1 - output.append([self.FDB_COUNT, fdb[0], fdb[1], fdb[2]]) + output.append([self.FDB_COUNT, fdb[0], fdb[1], fdb[2], fdb[3]]) print tabulate(output, self.HEADER) print "Total number of entries {0} ".format(self.FDB_COUNT) diff --git a/scripts/generate_dump b/scripts/generate_dump index 74083e1765..fade14f7cd 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -286,7 +286,7 @@ main() { save_cmd "docker exec -it syncd saidump" "saidump" - local platform="$(/usr/local/bin/sonic-cfggen -v platform)" + local platform="$(/usr/local/bin/sonic-cfggen -H -v DEVICE_METADATA.localhost.platform)" if [[ $platform == *"mlnx"* ]]; then local sai_dump_filename="/tmp/sai_sdk_dump_$(date +"%m_%d_%Y_%I_%M_%p")" docker exec -it syncd saisdkdump -f $sai_dump_filename diff --git a/scripts/pfcstat b/scripts/pfcstat new file mode 100755 index 0000000000..0eb135d798 --- /dev/null +++ b/scripts/pfcstat @@ -0,0 +1,260 @@ +#!/usr/bin/env python + +##################################################################### +# +# pfcstat is a tool for summarizing Priority-based Flow Control (PFC) statistics. +# +##################################################################### + +import swsssdk +import sys +import argparse +import cPickle as pickle +import datetime +import getopt +import json +import os.path +import time + +from collections import namedtuple, OrderedDict +from natsort import natsorted +from tabulate import tabulate + + +PStats = namedtuple("PStats", "pfc0, pfc1, pfc2, pfc3, pfc4, pfc5, pfc6, pfc7") +header_Rx = ['Port Rx', 'PFC0', 'PFC1', 'PFC2', 'PFC3', 'PFC4', 'PFC5', 'PFC6', 'PFC7'] + +header_Tx = ['Port Tx', 'PFC0', 'PFC1', 'PFC2', 'PFC3', 'PFC4', 'PFC5', 'PFC6', 'PFC7'] + +counter_bucket_rx_dict = { + 'SAI_PORT_STAT_PFC_0_RX_PKTS': 0, + 'SAI_PORT_STAT_PFC_1_RX_PKTS': 1, + 'SAI_PORT_STAT_PFC_2_RX_PKTS': 2, + 'SAI_PORT_STAT_PFC_3_RX_PKTS': 3, + 'SAI_PORT_STAT_PFC_4_RX_PKTS': 4, + 'SAI_PORT_STAT_PFC_5_RX_PKTS': 5, + 'SAI_PORT_STAT_PFC_6_RX_PKTS': 6, + 'SAI_PORT_STAT_PFC_7_RX_PKTS': 7 +} + +counter_bucket_tx_dict = { + 'SAI_PORT_STAT_PFC_0_TX_PKTS': 0, + 'SAI_PORT_STAT_PFC_1_TX_PKTS': 1, + 'SAI_PORT_STAT_PFC_2_TX_PKTS': 2, + 'SAI_PORT_STAT_PFC_3_TX_PKTS': 3, + 'SAI_PORT_STAT_PFC_4_TX_PKTS': 4, + 'SAI_PORT_STAT_PFC_5_TX_PKTS': 5, + 'SAI_PORT_STAT_PFC_6_TX_PKTS': 6, + 'SAI_PORT_STAT_PFC_7_TX_PKTS': 7 +} + +STATUS_NA = 'N/A' + +COUNTER_TABLE_PREFIX = "COUNTERS:" +COUNTERS_PORT_NAME_MAP = "COUNTERS_PORT_NAME_MAP" + +class Pfcstat(object): + def __init__(self): + self.db = swsssdk.SonicV2Connector(host='127.0.0.1') + self.db.connect(self.db.COUNTERS_DB) + + def get_cnstat(self, rx): + """ + Get the counters info from database. + """ + def get_counters(table_id): + """ + Get the counters from specific table. + """ + fields = ["0","0","0","0","0","0","0","0"] + if rx: + bucket_dict = counter_bucket_rx_dict + else: + bucket_dict = counter_bucket_tx_dict + for counter_name, pos in bucket_dict.iteritems(): + full_table_id = COUNTER_TABLE_PREFIX + table_id + counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, counter_name) + if counter_data is None: + fields[pos] = STATUS_NA + else: + fields[pos] = str(int(counter_data)) + cntr = PStats._make(fields) + return cntr + + # Get the info from database + counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) + # Build a dictionary of the stats + cnstat_dict = OrderedDict() + cnstat_dict['time'] = datetime.datetime.now() + if counter_port_name_map is None: + return cnstat_dict + for port in natsorted(counter_port_name_map): + cnstat_dict[port] = get_counters(counter_port_name_map[port]) + return cnstat_dict + + def cnstat_print(self, cnstat_dict, rx): + """ + Print the cnstat. + """ + table = [] + + for key, data in cnstat_dict.iteritems(): + if key == 'time': + continue + table.append((key, + data.pfc0, data.pfc1, + data.pfc2, data.pfc3, + data.pfc4, data.pfc5, + data.pfc6, data.pfc7)) + + if rx: + print tabulate(table, header_Rx, tablefmt='simple', stralign='right') + else: + print tabulate(table, header_Tx, tablefmt='simple', stralign='right') + + def cnstat_diff_print(self, cnstat_new_dict, cnstat_old_dict, rx): + """ + Print the difference between two cnstat results. + """ + def ns_diff(newstr, oldstr): + """ + Calculate the diff. + """ + if newstr == STATUS_NA or oldstr == STATUS_NA: + return STATUS_NA + else: + new, old = int(newstr), int(oldstr) + return '{:,}'.format(new - old) + + table = [] + + for key, cntr in cnstat_new_dict.iteritems(): + if key == 'time': + continue + old_cntr = None + if key in cnstat_old_dict: + old_cntr = cnstat_old_dict.get(key) + + if old_cntr is not None: + table.append((key, + ns_diff(cntr.pfc0, old_cntr.pfc0), + ns_diff(cntr.pfc1, old_cntr.pfc1), + ns_diff(cntr.pfc2, old_cntr.pfc2), + ns_diff(cntr.pfc3, old_cntr.pfc3), + ns_diff(cntr.pfc4, old_cntr.pfc4), + ns_diff(cntr.pfc5, old_cntr.pfc5), + ns_diff(cntr.pfc6, old_cntr.pfc6), + ns_diff(cntr.pfc7, old_cntr.pfc7))) + else: + table.append((key, + cntr.pfc0, cntr.pfc1, + cntr.pfc2, cntr.pfc3, + cntr.pfc4, cntr.pfc5, + cntr.pfc6, cntr.pfc7)) + + if rx: + print tabulate(table, header_Rx, tablefmt='simple', stralign='right') + else: + print tabulate(table, header_Tx, tablefmt='simple', stralign='right') + +def main(): + parser = argparse.ArgumentParser(description='Display the pfc counters', + version='1.0.0', + formatter_class=argparse.RawTextHelpFormatter, + epilog=""" +Examples: + pfcstat + pfcstat -c + pfcstat -d +""") + + parser.add_argument('-c', '--clear', action='store_true', help='Clear previous stats and save new ones') + parser.add_argument('-d', '--delete', action='store_true', help='Delete saved stats') + args = parser.parse_args() + + save_fresh_stats = args.clear + delete_all_stats = args.delete + + uid = str(os.getuid()) + cnstat_file = uid + + cnstat_dir = "/tmp/pfcstat-" + uid + cnstat_fqn_file_rx = cnstat_dir + "/" + cnstat_file + "rx" + cnstat_fqn_file_tx = cnstat_dir + "/" + cnstat_file + "tx" + + pfcstat = Pfcstat() + + if delete_all_stats: + for file in os.listdir(cnstat_dir): + os.remove(cnstat_dir + "/" + file) + + try: + os.rmdir(cnstat_dir) + sys.exit(0) + except IOError as e: + print e.errno, e + sys.exit(e) + + """ + Get the counters of pfc rx counter + """ + cnstat_dict_rx = pfcstat.get_cnstat(True) + + """ + Get the counters of pfc tx counter + """ + cnstat_dict_tx = pfcstat.get_cnstat(False) + + # At this point, either we'll create a file or open an existing one. + if not os.path.exists(cnstat_dir): + try: + os.makedirs(cnstat_dir) + except IOError as e: + print e.errno, e + sys.exit(1) + + if save_fresh_stats: + try: + pickle.dump(cnstat_dict_rx, open(cnstat_fqn_file_rx, 'w')) + pickle.dump(cnstat_dict_tx, open(cnstat_fqn_file_tx, 'w')) + except IOError as e: + print e.errno, e + sys.exit(e.errno) + else: + print "Clear saved counters" + sys.exit(0) + + + """ + Print the counters of pfc rx counter + """ + cnstat_cached_dict = OrderedDict() + if os.path.isfile(cnstat_fqn_file_rx): + try: + cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_rx, 'r')) + print "Last cached time was " + str(cnstat_cached_dict.get('time')) + pfcstat.cnstat_diff_print(cnstat_dict_rx, cnstat_cached_dict, True) + except IOError as e: + print e.errno, e + else: + pfcstat.cnstat_print(cnstat_dict_rx, True) + + print + """ + Print the counters of pfc tx counter + """ + cnstat_cached_dict = OrderedDict() + if os.path.isfile(cnstat_fqn_file_tx): + try: + cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_tx, 'r')) + print "Last cached time was " + str(cnstat_cached_dict.get('time')) + pfcstat.cnstat_diff_print(cnstat_dict_tx, cnstat_cached_dict, False) + except IOError as e: + print e.errno, e + else: + pfcstat.cnstat_print(cnstat_dict_tx, False) + + sys.exit(0) + +if __name__ == "__main__": + main() diff --git a/scripts/queuestat b/scripts/queuestat new file mode 100755 index 0000000000..5196b6bd87 --- /dev/null +++ b/scripts/queuestat @@ -0,0 +1,309 @@ +#!/usr/bin/env python + +##################################################################### +# +# queuestat is a tool for summarizing queue statistics of all ports. +# +##################################################################### + +import argparse +import cPickle as pickle +import datetime +import getopt +import json +import os.path +import swsssdk +import sys + +from collections import namedtuple, OrderedDict +from natsort import natsorted +from tabulate import tabulate + + +QueueStats = namedtuple("QueueStats", "queueindex, queuetype, totalpacket, totalbytes, droppacket, dropbytes") +header = ['Port', 'TxQ', 'Counter/pkts', 'Counter/bytes', 'Drop/pkts', 'Drop/bytes'] + +counter_bucket_dict = { + 'SAI_QUEUE_STAT_PACKETS': 2, + 'SAI_QUEUE_STAT_BYTES': 3, + 'SAI_QUEUE_STAT_DROPPED_PACKETS': 4, + 'SAI_QUEUE_STAT_DROPPED_BYTES': 5, +} + +STATUS_NA = 'N/A' +STATUS_INVALID = 'INVALID' + +QUEUE_TYPE_MC = 'MC' +QUEUE_TYPE_UC = 'UC' +SAI_QUEUE_TYPE_MULTICAST = "SAI_QUEUE_TYPE_MULTICAST" +SAI_QUEUE_TYPE_UNICAST = "SAI_QUEUE_TYPE_UNICAST" + + +COUNTER_TABLE_PREFIX = "COUNTERS:" +COUNTERS_PORT_NAME_MAP = "COUNTERS_PORT_NAME_MAP" +COUNTERS_QUEUE_NAME_MAP = "COUNTERS_QUEUE_NAME_MAP" +COUNTERS_QUEUE_TYPE_MAP = "COUNTERS_QUEUE_TYPE_MAP" +COUNTERS_QUEUE_INDEX_MAP = "COUNTERS_QUEUE_INDEX_MAP" +COUNTERS_QUEUE_PORT_MAP = "COUNTERS_QUEUE_PORT_MAP" + +cnstat_dir = 'N/A' +cnstat_fqn_file = 'N/A' + +class Queuestat(object): + def __init__(self): + self.db = swsssdk.SonicV2Connector(host='127.0.0.1') + self.db.connect(self.db.COUNTERS_DB) + + def get_queue_port(table_id): + port_table_id = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_PORT_MAP, table_id) + if port_table_id is None: + print "Port is not available!", table_id + sys.exit(1) + + return port_table_id + + # Get all ports + self.counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) + if self.counter_port_name_map is None: + print "COUNTERS_PORT_NAME_MAP is empty!" + sys.exit(1) + + self.port_queues_map = {} + self.port_name_map = {} + + for port in self.counter_port_name_map: + self.port_queues_map[port] = {} + self.port_name_map[self.counter_port_name_map[port]] = port + + # Get Queues for each port + counter_queue_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_QUEUE_NAME_MAP) + if counter_queue_name_map is None: + print "COUNTERS_QUEUE_NAME_MAP is empty!" + sys.exit(1) + + for queue in counter_queue_name_map: + port = self.port_name_map[get_queue_port(counter_queue_name_map[queue])] + self.port_queues_map[port][queue] = counter_queue_name_map[queue] + + def get_cnstat(self, queue_map): + """ + Get the counters info from database. + """ + def get_counters(table_id): + """ + Get the counters from specific table. + """ + def get_queue_index(table_id): + queue_index = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_INDEX_MAP, table_id) + if queue_index is None: + print "Queue index is not available!", table_id + sys.exit(1) + + return queue_index + + def get_queue_type(table_id): + queue_type = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_TYPE_MAP, table_id) + if queue_type is None: + print "Queue Type is not available!", table_id + sys.exit(1) + elif queue_type == SAI_QUEUE_TYPE_MULTICAST: + return QUEUE_TYPE_MC + elif queue_type == SAI_QUEUE_TYPE_UNICAST: + return QUEUE_TYPE_UC + else: + print "Queue Type is invalid:", table_id, queue_type + sys.exit(1) + + fields = ["0","0","0","0","0","0"] + fields[0] = get_queue_index(table_id) + fields[1] = get_queue_type(table_id) + + for counter_name, pos in counter_bucket_dict.iteritems(): + full_table_id = COUNTER_TABLE_PREFIX + table_id + counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, counter_name) + if counter_data is None: + fields[pos] = STATUS_NA + elif fields[pos] != STATUS_NA: + fields[pos] = str(int(counter_data)) + cntr = QueueStats._make(fields) + return cntr + + # Build a dictionary of the stats + cnstat_dict = OrderedDict() + cnstat_dict['time'] = datetime.datetime.now() + if queue_map is None: + return cnstat_dict + for queue in natsorted(queue_map): + cnstat_dict[queue] = get_counters(queue_map[queue]) + return cnstat_dict + + def cnstat_print(self, port, cnstat_dict): + """ + Print the cnstat. + """ + table = [] + queue_count = len(cnstat_dict) + + for key, data in cnstat_dict.iteritems(): + if key == 'time': + continue + index = int(data.queueindex) % (queue_count / 2) + table.append((port, data.queuetype + str(index), + data.totalpacket, data.totalbytes, + data.droppacket, data.dropbytes)) + + print tabulate(table, header, tablefmt='simple', stralign='right') + print + + def cnstat_diff_print(self, port, cnstat_new_dict, cnstat_old_dict): + """ + Print the difference between two cnstat results. + """ + def ns_diff(newstr, oldstr): + """ + Calculate the diff. + """ + if newstr == STATUS_NA or oldstr == STATUS_NA: + return STATUS_NA + else: + new, old = int(newstr), int(oldstr) + return '{:,}'.format(new - old) + + table = [] + queue_count = len(cnstat_new_dict) + + for key, cntr in cnstat_new_dict.iteritems(): + if key == 'time': + continue + old_cntr = None + if key in cnstat_old_dict: + old_cntr = cnstat_old_dict.get(key) + + index = int(cntr.queueindex) % (queue_count / 2) + + if old_cntr is not None: + table.append((port, cntr.queuetype + str(index), + ns_diff(cntr.totalpacket, old_cntr.totalpacket), + ns_diff(cntr.totalbytes, old_cntr.totalbytes), + ns_diff(cntr.droppacket, old_cntr.droppacket), + ns_diff(cntr.dropbytes, old_cntr.dropbytes))) + else: + table.append((port, cntr.queuetype + str(index), + cntr.totalpacket, cntr.totalbytes, + cntr.droppacket, cntr.dropbytes)) + + print tabulate(table, header, tablefmt='simple', stralign='right') + print + + def get_print_all_stat(self): + # Get stat for each port + for port in natsorted(self.counter_port_name_map): + cnstat_dict = self.get_cnstat(self.port_queues_map[port]) + + cnstat_cached_dict = OrderedDict() + cnstat_fqn_file_name = cnstat_fqn_file + port + if os.path.isfile(cnstat_fqn_file_name): + try: + cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_name, 'r')) + print port + " Last cached time was " + str(cnstat_cached_dict.get('time')) + self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict) + except IOError as e: + print e.errno, e + else: + self.cnstat_print(port, cnstat_dict) + + def get_print_port_stat(self, port): + if not port in self.port_queues_map: + print "Port doesn't exist!", port + sys.exit(1) + + # Get stat for the port queried + cnstat_dict = self.get_cnstat(self.port_queues_map[port]) + cnstat_cached_dict = OrderedDict() + cnstat_fqn_file_name = cnstat_fqn_file + port + if os.path.isfile(cnstat_fqn_file_name): + try: + cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_name, 'r')) + print "Last cached time was " + str(cnstat_cached_dict.get('time')) + self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict) + except IOError as e: + print e.errno, e + else: + self.cnstat_print(port, cnstat_dict) + + def save_fresh_stats(self): + if not os.path.exists(cnstat_dir): + try: + os.makedirs(cnstat_dir) + except IOError as e: + print e.errno, e + sys.exit(1) + + # Get stat for each port and save + for port in natsorted(self.counter_port_name_map): + cnstat_dict = self.get_cnstat(self.port_queues_map[port]) + try: + pickle.dump(cnstat_dict, open(cnstat_fqn_file + port, 'w')) + except IOError as e: + print e.errno, e + sys.exit(e.errno) + else: + print "Clear and update saved counters for " + port + +def main(): + global cnstat_dir + global cnstat_fqn_file + + parser = argparse.ArgumentParser(description='Display the queue state and counters', + version='1.0.0', + formatter_class=argparse.RawTextHelpFormatter, + epilog=""" +Examples: + queuestat + queuestat -p Ethernet0 + queuestat -c + queuestat -d +""") + + parser.add_argument('-p', '--port', type=str, help='Show the queue conters for just one port', default=None) + parser.add_argument('-c', '--clear', action='store_true', help='Clear previous stats and save new ones') + parser.add_argument('-d', '--delete', action='store_true', help='Delete saved stats') + args = parser.parse_args() + + save_fresh_stats = args.clear + delete_all_stats = args.delete + + port_to_show_stats = args.port + + uid = str(os.getuid()) + cnstat_file = uid + + cnstat_dir = "/tmp/queuestat-" + uid + cnstat_fqn_file = cnstat_dir + "/" + cnstat_file + + if delete_all_stats: + for file in os.listdir(cnstat_dir): + os.remove(cnstat_dir + "/" + file) + + try: + os.rmdir(cnstat_dir) + sys.exit(0) + except IOError as e: + print e.errno, e + sys.exit(e) + + queuestat = Queuestat() + + if save_fresh_stats: + queuestat.save_fresh_stats() + sys.exit(0) + + if port_to_show_stats!=None: + queuestat.get_print_port_stat(port_to_show_stats) + else: + queuestat.get_print_all_stat() + + sys.exit(0) + +if __name__ == "__main__": + main() diff --git a/scripts/reboot b/scripts/reboot index 019e5ad762..1a6346ab55 100755 --- a/scripts/reboot +++ b/scripts/reboot @@ -2,16 +2,13 @@ function stop_sonic_services() { - echo "Stopping sonic services..." - systemctl stop swss - systemctl stop teamd - systemctl stop bgp - systemctl stop lldp - systemctl stop snmp + echo "Stopping syncd..." + docker exec -i syncd /usr/bin/syncd_request_shutdown --cold > /dev/null + sleep 3 } # Obtain our platform as we will mount directories with these names in each docker -PLATFORM=`sonic-cfggen -v platform` +PLATFORM=`sonic-cfggen -H -v DEVICE_METADATA.localhost.platform` DEVPATH="/usr/share/sonic/device" REBOOT="platform_reboot" diff --git a/setup.py b/setup.py index 9503d5789e..586bbfa41b 100644 --- a/setup.py +++ b/setup.py @@ -21,6 +21,7 @@ def get_test_suite(): 'acl_loader', 'clear', 'config', + 'crm', 'debug', 'pfcwd', 'sfputil', @@ -42,6 +43,7 @@ def get_test_suite(): 'scripts/ecnconfig', 'scripts/fast-reboot', 'scripts/fast-reboot-dump.py', + 'scripts/fdbclear', 'scripts/fdbshow', 'scripts/generate_dump', 'scripts/intfutil', @@ -60,6 +62,7 @@ def get_test_suite(): 'console_scripts': [ 'acl-loader = acl_loader.main:cli', 'config = config.main:cli', + 'crm = crm.main:cli', 'debug = debug.main:cli', 'pfcwd = pfcwd.main:cli', 'sfputil = sfputil.main:cli', diff --git a/sfputil/main.py b/sfputil/main.py index eb91463a3e..5aa2dd250a 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -28,8 +28,8 @@ PLATFORM_ROOT_PATH = '/usr/share/sonic/device' SONIC_CFGGEN_PATH = '/usr/local/bin/sonic-cfggen' MINIGRAPH_PATH = '/etc/sonic/minigraph.xml' -HWSKU_KEY = "DEVICE_METADATA['localhost']['hwsku']" -PLATFORM_KEY = 'platform' +HWSKU_KEY = 'DEVICE_METADATA.localhost.hwsku' +PLATFORM_KEY = 'DEVICE_METADATA.localhost.platform' # Global platform-specific sfputil class instance platform_sfputil = None @@ -288,7 +288,7 @@ def port_eeprom_data_raw_string_pretty(logical_port_name): # Returns platform and HW SKU def get_platform_and_hwsku(): try: - proc = subprocess.Popen([SONIC_CFGGEN_PATH, '-v', PLATFORM_KEY], + proc = subprocess.Popen([SONIC_CFGGEN_PATH, '-H', '-v', PLATFORM_KEY], stdout=subprocess.PIPE, shell=False, stderr=subprocess.STDOUT) diff --git a/show/main.py b/show/main.py index c8c7f530af..fabfab6714 100755 --- a/show/main.py +++ b/show/main.py @@ -598,8 +598,8 @@ def summary(): username = getpass.getuser() PLATFORM_TEMPLATE_FILE = "/tmp/cli_platform_{0}.j2".format(username) - PLATFORM_TEMPLATE_CONTENTS = "Platform: {{ platform }}\n" \ - "HwSKU: {{ DEVICE_METADATA['localhost']['hwsku'] }}\n" \ + PLATFORM_TEMPLATE_CONTENTS = "Platform: {{ DEVICE_METADATA.localhost.platform }}\n" \ + "HwSKU: {{ DEVICE_METADATA.localhost.hwsku }}\n" \ "ASIC: {{ asic_type }}" # Create a temporary Jinja2 template file to use with sonic-cfggen @@ -764,6 +764,13 @@ def runningconfiguration(): pass +# 'all' subcommand ("show runningconfiguration all") +@runningconfiguration.command() +def all(): + """Show full running configuration""" + run_command('sonic-cfggen -d --print-data') + + # 'bgp' subcommand ("show runningconfiguration bgp") @runningconfiguration.command() def bgp(): @@ -888,20 +895,29 @@ def config(redis_unix_socket_path): data = config_db.get_table('VLAN') keys = data.keys() - def mode(key, data): - info = [] - for m in data.get('members', []): - entry = config_db.get_entry('VLAN_MEMBER', (key, m)) - mode = entry.get('tagging_mode') - if mode == None: - info.append('?') - else: - info.append(mode) - return '\n'.join(info) + def tablelize(keys, data): + table = [] - header = ['Name', 'VID', 'Member', 'Mode'] - click.echo(tabulate([ [k, data[k]['vlanid'], '\n'.join(data[k].get('members', [])), mode(k, data[k])] for k in keys ], header)) + for k in keys: + for m in data[k].get('members', []): + r = [] + r.append(k) + r.append(data[k]['vlanid']) + r.append(m) + entry = config_db.get_entry('VLAN_MEMBER', (k, m)) + mode = entry.get('tagging_mode') + if mode == None: + r.append('?') + else: + r.append(mode) + + table.append(r) + + return table + + header = ['Name', 'VID', 'Member', 'Mode'] + click.echo(tabulate(tablelize(keys, data), header)) @cli.command('services') def services(): @@ -934,7 +950,8 @@ def aaa(): 'fallback': 'True (default)' } } - aaa['authentication'].update(data['authentication']) + if 'authentication' in data: + aaa['authentication'].update(data['authentication']) for row in aaa: entry = aaa[row] for key in entry: @@ -957,7 +974,8 @@ def tacacs(): 'passkey': ' (default)' } } - tacplus['global'].update(data['global']) + if 'global' in data: + tacplus['global'].update(data['global']) for key in tacplus['global']: output += ('TACPLUS global %s %s\n' % (str(key), str(tacplus['global'][key]))) @@ -972,10 +990,17 @@ def tacacs(): # -# 'session' command ### +# 'mirror' group ### # -@cli.command() +@cli.group(cls=AliasedGroup, default_if_no_args=False) +def mirror(): + """Show mirroring (Everflow) information""" + pass + + +# 'session' subcommand ("show mirror session") +@mirror.command() @click.argument('session_name', required=False) def session(session_name): """Show existing everflow sessions""" @@ -995,39 +1020,34 @@ def acl(): pass -# -# 'acl table' command ### -# - +# 'rule' subcommand ("show acl rule") @acl.command() @click.argument('table_name', required=False) -def table(table_name): - """Show existing ACL tables""" +@click.argument('rule_id', required=False) +def rule(table_name, rule_id): + """Show existing ACL rules""" if table_name is None: table_name = "" - run_command("acl-loader show table {}".format(table_name)) + if rule_id is None: + rule_id = "" + run_command("acl-loader show rule {} {}".format(table_name, rule_id)) -# -# 'acl rule' command ### -# +# 'table' subcommand ("show acl table") @acl.command() @click.argument('table_name', required=False) -@click.argument('rule_id', required=False) -def rule(table_name, rule_id): - """Show existing ACL rules""" +def table(table_name): + """Show existing ACL tables""" if table_name is None: table_name = "" - if rule_id is None: - rule_id = "" + run_command("acl-loader show table {}".format(table_name)) - run_command("acl-loader show rule {} {}".format(table_name, rule_id)) # -# 'session' command (show ecn) +# 'ecn' command ("show ecn") # @cli.command('ecn') def ecn():