From 3732ac5845780a7a19d6ff55c25f91853bf3a816 Mon Sep 17 00:00:00 2001 From: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Date: Mon, 18 Apr 2022 21:31:44 +0800 Subject: [PATCH] Add CLI for route flow counter feature (#2031) HLD: https://github.com/Azure/SONiC/pull/908 Command reference : https://github.com/Azure/sonic-utilities/pull/2069 - What I did Add CLIs for route flow counter feature - How I did it Add show command show flowcnt-route config and command group show flowcnt-route stats Add config command group config flowcnt-route pattern Add clear command group sonic-clear flowcnt-route - How to verify it 1. Full unit test cover 2. Manual test 3. sonic-mgmt test cases --- clear/main.py | 49 ++ config/flow_counters.py | 158 +++++ config/main.py | 12 +- counterpoll/main.py | 39 ++ flow_counter_util/__init__.py | 0 flow_counter_util/route.py | 79 +++ scripts/flow_counters_stat | 318 +++++++++- setup.py | 1 + show/flow_counters.py | 71 +++ show/main.py | 1 + tests/counterpoll_input/config_db.json | 5 +- tests/counterpoll_test.py | 42 ++ tests/flow_counter_stats_test.py | 737 ++++++++++++++++++++++- tests/mock_tables/asic0/counters_db.json | 16 + tests/mock_tables/asic1/counters_db.json | 10 + tests/mock_tables/config_db.json | 4 + tests/mock_tables/counters_db.json | 28 + tests/mock_tables/state_db.json | 3 + utilities_common/cli.py | 39 +- 19 files changed, 1594 insertions(+), 18 deletions(-) create mode 100644 config/flow_counters.py create mode 100644 flow_counter_util/__init__.py create mode 100644 flow_counter_util/route.py diff --git a/clear/main.py b/clear/main.py index 1ad42ad786..6980bb8be1 100755 --- a/clear/main.py +++ b/clear/main.py @@ -4,7 +4,9 @@ import sys import click import utilities_common.cli as clicommon +import utilities_common.multi_asic as multi_asic_util +from flow_counter_util.route import exit_if_route_flow_counter_not_support from utilities_common import util_base from show.plugins.pbh import read_pbh_counters from config.plugins.pbh import serialize_pbh_counters @@ -484,6 +486,53 @@ def flowcnt_trap(): run_command(command) +# ("sonic-clear flowcnt-route") +@cli.group(invoke_without_command=True) +@click.option('--namespace', '-n', 'namespace', default=None, type=click.Choice(multi_asic_util.multi_asic_ns_choices()), show_default=True, help='Namespace name or all') +@click.pass_context +def flowcnt_route(ctx, namespace): + """Clear all route flow counters""" + exit_if_route_flow_counter_not_support() + if ctx.invoked_subcommand is None: + command = "flow_counters_stat -c -t route" + # None namespace means default namespace + if namespace is not None: + command += " -n {}".format(namespace) + clicommon.run_command(command) + + +# ("sonic-clear flowcnt-route pattern") +@flowcnt_route.command() +@click.option('--vrf', help='VRF/VNET name or default VRF') +@click.option('--namespace', '-n', 'namespace', default=None, type=click.Choice(multi_asic_util.multi_asic_ns_choices()), show_default=True, help='Namespace name or all') +@click.argument('prefix-pattern', required=True) +def pattern(prefix_pattern, vrf, namespace): + """Clear route flow counters by pattern""" + command = "flow_counters_stat -c -t route --prefix_pattern {}".format(prefix_pattern) + if vrf: + command += ' --vrf {}'.format(vrf) + # None namespace means default namespace + if namespace is not None: + command += " -n {}".format(namespace) + clicommon.run_command(command) + + +# ("sonic-clear flowcnt-route route") +@flowcnt_route.command() +@click.option('--vrf', help='VRF/VNET name or default VRF') +@click.option('--namespace', '-n', 'namespace', default=None, type=click.Choice(multi_asic_util.multi_asic_ns_choices()), show_default=True, help='Namespace name or all') +@click.argument('prefix', required=True) +def route(prefix, vrf, namespace): + """Clear route flow counters by prefix""" + command = "flow_counters_stat -c -t route --prefix {}".format(prefix) + if vrf: + command += ' --vrf {}'.format(vrf) + # None namespace means default namespace + if namespace is not None: + command += " -n {}".format(namespace) + clicommon.run_command(command) + + # Load plugins and register them helper = util_base.UtilHelper() helper.load_and_register_plugins(plugins, cli) diff --git a/config/flow_counters.py b/config/flow_counters.py new file mode 100644 index 0000000000..51ef547434 --- /dev/null +++ b/config/flow_counters.py @@ -0,0 +1,158 @@ +import click +import ipaddress + +from flow_counter_util.route import FLOW_COUNTER_ROUTE_PATTERN_TABLE, FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD, DEFAULT_VRF, PATTERN_SEPARATOR +from flow_counter_util.route import build_route_pattern, extract_route_pattern, exit_if_route_flow_counter_not_support +from utilities_common.cli import AbbreviationGroup, pass_db +from utilities_common import cli # To make mock work in unit test + +# +# 'flowcnt-route' group ('config flowcnt-route ...') +# + + +@click.group(cls=AbbreviationGroup, invoke_without_command=False) +def flowcnt_route(): + """Route flow counter related configuration tasks""" + pass + + +@flowcnt_route.group() +def pattern(): + """Set pattern for route flow counter""" + pass + + +@pattern.command(name='add') +@click.option('-y', '--yes', is_flag=True) +@click.option('--vrf', help='VRF/VNET name or default VRF') +@click.option('--max', 'max_allowed_match', type=click.IntRange(1, 50), default=30, show_default=True, help='Max allowed match count') +@click.argument('prefix-pattern', required=True) +@pass_db +def pattern_add(db, yes, vrf, max_allowed_match, prefix_pattern): + """Add pattern for route flow counter""" + _update_route_flow_counter_config(db, vrf, max_allowed_match, prefix_pattern, True, yes) + + +@pattern.command(name='remove') +@click.option('--vrf', help='VRF/VNET name or default VRF') +@click.argument('prefix-pattern', required=True) +@pass_db +def pattern_remove(db, vrf, prefix_pattern): + """Remove pattern for route flow counter""" + _update_route_flow_counter_config(db, vrf, None, prefix_pattern, False) + + +def _update_route_flow_counter_config(db, vrf, max_allowed_match, prefix_pattern, add, yes=False): + """ + Update route flow counter config + :param db: db object + :param vrf: vrf string, empty vrf will be treated as default vrf + :param max_allowed_match: max allowed match count, $FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD will be used if not specified + :param prefix_pattern: route prefix pattern, automatically add prefix length if not specified + :param add: True to add/set the configuration, otherwise remove + :param yes: Don't ask question if True + :return: + """ + exit_if_route_flow_counter_not_support() + + if add: + try: + net = ipaddress.ip_network(prefix_pattern, strict=False) + except ValueError as e: + click.echo('Invalid prefix pattern: {}'.format(prefix_pattern)) + exit(1) + + if '/' not in prefix_pattern: + prefix_pattern += '/' + str(net.prefixlen) + + key = build_route_pattern(vrf, prefix_pattern) + for _, cfgdb in db.cfgdb_clients.items(): + if _try_find_existing_pattern_by_ip_type(cfgdb, net, key, yes): + entry_data = cfgdb.get_entry(FLOW_COUNTER_ROUTE_PATTERN_TABLE, key) + old_max_allowed_match = entry_data.get(FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD) + if old_max_allowed_match is not None and int(old_max_allowed_match) == max_allowed_match: + click.echo('The route pattern already exists, nothing to be changed') + exit(1) + cfgdb.mod_entry(FLOW_COUNTER_ROUTE_PATTERN_TABLE, + key, + {FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD: str(max_allowed_match)}) + else: + found = False + key = build_route_pattern(vrf, prefix_pattern) + for _, cfgdb in db.cfgdb_clients.items(): + pattern_table = cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + + for existing_key in pattern_table: + exist_vrf, existing_prefix = extract_route_pattern(existing_key) + if (exist_vrf == vrf or (vrf is None and exist_vrf == DEFAULT_VRF)) and existing_prefix == prefix_pattern: + found = True + cfgdb.set_entry(FLOW_COUNTER_ROUTE_PATTERN_TABLE, key, None) + if not found: + click.echo("Failed to remove route pattern: {} does not exist".format(key)) + exit(1) + + +def _try_find_existing_pattern_by_ip_type(cfgdb, input_net, input_key, yes): + """Try to find the same IP type pattern from CONFIG DB. + 1. If found a pattern with the same IP type, but the patter does not equal, ask user if need to replace the old with new one + a. If user types "yes", remove the old one, return False + b. If user types "no", exit + 2. If found a pattern with the same IP type and the pattern equal, return True + 3. If not found a pattern with the same IP type, return False + + Args: + cfgdb (object): CONFIG DB object + input_net (object): Input ip_network object + input_key (str): Input key + yes (bool): Whether ask user question + + Returns: + bool: True if found the same pattern in CONFIG DB + """ + input_type = type(input_net) # IPv4 or IPv6 + found_invalid = [] + found = None + pattern_table = cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + for existing_key in pattern_table: + if isinstance(existing_key, tuple): + existing_prefix = existing_key[1] + existing_key = PATTERN_SEPARATOR.join(existing_key) + else: + _, existing_prefix = extract_route_pattern(existing_key) + + # In case user configures an invalid pattern via CONFIG DB. + if not existing_prefix: # Invalid pattern such as: "vrf1|" + click.echo('Detect invalid route pattern in existing configuration {}'.format(existing_key)) + found_invalid.append(existing_key) + continue + + try: + existing_net = ipaddress.ip_network(existing_prefix, strict=False) + except ValueError as e: # Invalid pattern such as: "vrf1|invalid" + click.echo('Detect invalid route pattern in existing configuration {}'.format(existing_key)) + found_invalid.append(existing_key) + continue + + if type(existing_net) == input_type: + found = existing_key + break + + if found == input_key: + return True + + if not found and found_invalid: + # If not found but there is an invalid one, ask user to replace the invalid one + found = found_invalid[0] + + if found: + if not yes: + answer = cli.query_yes_no('Only support 1 IPv4 route pattern and 1 IPv6 route pattern, remove existing pattern {}?'.format(found)) + else: + answer = True + if answer: + click.echo('Replacing existing route pattern {} with {}'.format(existing_key, input_key)) + cfgdb.set_entry(FLOW_COUNTER_ROUTE_PATTERN_TABLE, existing_key, None) + else: + exit(0) + return False diff --git a/config/main.py b/config/main.py index c3415fbd11..197fb33662 100644 --- a/config/main.py +++ b/config/main.py @@ -35,6 +35,7 @@ from . import chassis_modules from . import console from . import feature +from . import flow_counters from . import kdump from . import kube from . import muxcable @@ -789,7 +790,7 @@ def _per_namespace_swss_ready(service_name): return False def _swss_ready(): - list_of_swss = [] + list_of_swss = [] num_asics = multi_asic.get_num_asics() if num_asics == 1: list_of_swss.append("swss.service") @@ -802,7 +803,7 @@ def _swss_ready(): if _per_namespace_swss_ready(service_name) == False: return False - return True + return True def _is_system_starting(): out = clicommon.run_command("sudo systemctl is-system-running", return_cmd=True) @@ -1076,6 +1077,7 @@ def config(ctx): config.add_command(chassis_modules.chassis) config.add_command(console.console) config.add_command(feature.feature) +config.add_command(flow_counters.flowcnt_route) config.add_command(kdump.kdump) config.add_command(kube.kubernetes) config.add_command(muxcable.muxcable) @@ -1482,10 +1484,10 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, disable_arp_cach config_gen_opts = "" - + if os.path.isfile(INIT_CFG_FILE): config_gen_opts += " -j {} ".format(INIT_CFG_FILE) - + if file_format == 'config_db': config_gen_opts += ' -j {} '.format(file) else: @@ -6239,7 +6241,7 @@ def del_subinterface(ctx, subinterface_name): sub_intfs = [k for k,v in subintf_config_db.items() if type(k) != tuple] if subinterface_name not in sub_intfs: ctx.fail("{} does not exists".format(subinterface_name)) - + ips = {} ips = [ k[1] for k in config_db.get_table('VLAN_SUB_INTERFACE') if type(k) == tuple and k[0] == subinterface_name ] for ip in ips: diff --git a/counterpoll/main.py b/counterpoll/main.py index e23f4b9c59..f3befe1311 100644 --- a/counterpoll/main.py +++ b/counterpoll/main.py @@ -1,5 +1,6 @@ import click import json +from flow_counter_util.route import exit_if_route_flow_counter_not_support from swsscommon.swsscommon import ConfigDBConnector from tabulate import tabulate @@ -347,6 +348,40 @@ def disable(ctx): fc_info['FLEX_COUNTER_STATUS'] = 'disable' ctx.obj.mod_entry("FLEX_COUNTER_TABLE", "FLOW_CNT_TRAP", fc_info) +# Route flow counter commands +@cli.group() +@click.pass_context +def flowcnt_route(ctx): + """ Route flow counter commands """ + exit_if_route_flow_counter_not_support() + ctx.obj = ConfigDBConnector() + ctx.obj.connect() + +@flowcnt_route.command() +@click.argument('poll_interval', type=click.IntRange(1000, 30000)) +@click.pass_context +def interval(ctx, poll_interval): + """ Set route flow counter query interval """ + fc_info = {} + fc_info['POLL_INTERVAL'] = poll_interval + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", "FLOW_CNT_ROUTE", fc_info) + +@flowcnt_route.command() +@click.pass_context +def enable(ctx): + """ Enable route flow counter query """ + fc_info = {} + fc_info['FLEX_COUNTER_STATUS'] = 'enable' + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", "FLOW_CNT_ROUTE", fc_info) + +@flowcnt_route.command() +@click.pass_context +def disable(ctx): + """ Disable route flow counter query """ + fc_info = {} + fc_info['FLEX_COUNTER_STATUS'] = 'disable' + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", "FLOW_CNT_ROUTE", fc_info) + @cli.command() def show(): """ Show the counter configuration """ @@ -363,6 +398,7 @@ def show(): acl_info = configdb.get_entry('FLEX_COUNTER_TABLE', ACL) tunnel_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'TUNNEL') trap_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_TRAP') + route_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_ROUTE') header = ("Type", "Interval (in ms)", "Status") data = [] @@ -388,6 +424,9 @@ def show(): data.append(["TUNNEL_STAT", rif_info.get("POLL_INTERVAL", DEFLT_10_SEC), rif_info.get("FLEX_COUNTER_STATUS", DISABLE)]) if trap_info: data.append(["FLOW_CNT_TRAP_STAT", trap_info.get("POLL_INTERVAL", DEFLT_10_SEC), trap_info.get("FLEX_COUNTER_STATUS", DISABLE)]) + if route_info: + data.append(["FLOW_CNT_ROUTE_STAT", route_info.get("POLL_INTERVAL", DEFLT_10_SEC), + route_info.get("FLEX_COUNTER_STATUS", DISABLE)]) click.echo(tabulate(data, headers=header, tablefmt="simple", missingval="")) diff --git a/flow_counter_util/__init__.py b/flow_counter_util/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/flow_counter_util/route.py b/flow_counter_util/route.py new file mode 100644 index 0000000000..a905b646e7 --- /dev/null +++ b/flow_counter_util/route.py @@ -0,0 +1,79 @@ +import os +import sys +from swsscommon.swsscommon import SonicV2Connector + +try: + if os.environ["UTILITIES_UNIT_TESTING"] == "1" or os.environ["UTILITIES_UNIT_TESTING"] == "2": + modules_path = os.path.join(os.path.dirname(__file__), "..") + test_path = os.path.join(modules_path, "tests") + sys.path.insert(0, modules_path) + sys.path.insert(0, test_path) + import mock_tables.dbconnector # lgtm[py/unused-import] +except KeyError: + pass + + +COUNTERS_ROUTE_TO_PATTERN_MAP = 'COUNTERS_ROUTE_TO_PATTERN_MAP' +FLOW_COUNTER_CAPABILITY_TABLE = 'FLOW_COUNTER_CAPABILITY_TABLE' +FLOW_COUNTER_CAPABILITY_KEY = 'route' +FLOW_COUNTER_CAPABILITY_SUPPORT_FIELD = 'support' +FLOW_COUNTER_ROUTE_PATTERN_TABLE = 'FLOW_COUNTER_ROUTE_PATTERN' +FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD = 'max_match_count' +FLOW_COUNTER_ROUTE_CONFIG_HEADER = ['Route pattern', 'VRF', 'Max'] +DEFAULT_MAX_MATCH = 30 +DEFAULT_VRF = 'default' +PATTERN_SEPARATOR = '|' + + +def extract_route_pattern(route_pattern): + """Extract vrf and prefix from route pattern, route pattrn shall be formated like: "Vrf_1:1.1.1.1/24" + or "1.1.1.1/24" + + Args: + route_pattern (str): route pattern string + sep (str, optional): Defaults to PATTERN_SEPARATOR. + + Returns: + [tuple]: vrf and prefix + """ + if isinstance(route_pattern, tuple): + return route_pattern + items = route_pattern.split(PATTERN_SEPARATOR) + if len(items) == 1: + return DEFAULT_VRF, items[0] + elif len(items) == 2: + return items[0], items[1] + else: + return None, None + + +def build_route_pattern(vrf, prefix): + if vrf and vrf != 'default': + return '{}{}{}'.format(vrf, PATTERN_SEPARATOR, prefix) + else: + return prefix + + +def get_route_flow_counter_capability(): + state_db = SonicV2Connector(host="127.0.0.1") + state_db.connect(state_db.STATE_DB) + + return state_db.get_all(state_db.STATE_DB, '{}|{}'.format(FLOW_COUNTER_CAPABILITY_TABLE, FLOW_COUNTER_CAPABILITY_KEY)) + + +def exit_if_route_flow_counter_not_support(): + capabilities = get_route_flow_counter_capability() + if not capabilities: + print('Waiting for swss to initialize route flow counter capability, please try again later') + exit(1) + + support = capabilities.get(FLOW_COUNTER_CAPABILITY_SUPPORT_FIELD) + if support is None: + print('Waiting for swss to initialize route flow counter capability, please try again later') + exit(1) + + if support != 'true': + print('Route flow counter is not supported on this platform') + exit(1) + + return diff --git a/scripts/flow_counters_stat b/scripts/flow_counters_stat index 8901d92f66..61c754e333 100755 --- a/scripts/flow_counters_stat +++ b/scripts/flow_counters_stat @@ -24,13 +24,19 @@ except KeyError: pass import utilities_common.multi_asic as multi_asic_util +from flow_counter_util.route import build_route_pattern, extract_route_pattern, exit_if_route_flow_counter_not_support, DEFAULT_VRF, COUNTERS_ROUTE_TO_PATTERN_MAP +from utilities_common import constants from utilities_common.netstat import format_number_with_comma, table_as_json, ns_diff, format_prate # Flow counter meta data, new type of flow counters can extend this dictinary to reuse existing logic flow_counter_meta = { 'trap': { 'headers': ['Trap Name', 'Packets', 'Bytes', 'PPS'], - 'name_map': 'COUNTERS_TRAP_NAME_MAP', + 'name_map': 'COUNTERS_TRAP_NAME_MAP' + }, + 'route': { + 'headers': ['Route pattern', 'VRF', 'Matched routes', 'Packets', 'Bytes'], + 'name_map': 'COUNTERS_ROUTE_NAME_MAP' } } flow_counters_fields = ['SAI_COUNTER_STAT_PACKETS', 'SAI_COUNTER_STAT_BYTES'] @@ -41,7 +47,6 @@ diff_column_positions = set([0, 1]) FLOW_COUNTER_TABLE_PREFIX = "COUNTERS:" RATES_TABLE_PREFIX = 'RATES:' PPS_FIELD = 'RX_PPS' -STATUS_NA = 'N/A' class FlowCounterStats(object): @@ -150,7 +155,7 @@ class FlowCounterStats(object): full_table_id = RATES_TABLE_PREFIX + counter_oid counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, PPS_FIELD) - values.append(STATUS_NA if counter_data is None else counter_data) + values.append('0' if counter_data is None else counter_data) values.append(counter_oid) data[ns][name] = values return data @@ -168,7 +173,7 @@ class FlowCounterStats(object): full_table_id = FLOW_COUNTER_TABLE_PREFIX + counter_oid for field in flow_counters_fields: counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, field) - values.append(STATUS_NA if counter_data is None else counter_data) + values.append('0' if counter_data is None else counter_data) return values def _save(self, data): @@ -255,6 +260,295 @@ class FlowCounterStats(object): return need_update_cache +class RouteFlowCounterStats(FlowCounterStats): + SHOW_BY_PREFIX_HEADERS = ['Route', 'VRF', 'Route pattern', 'Packets', 'Bytes'] + + def __init__(self, args): + super(RouteFlowCounterStats,self).__init__(args) + + def _print_data(self, headers, table): + """Print statistic data based on output format + + Args: + headers (list): Table headers + table (list): Table data + """ + if self.args.json: + # The first column of the table might have duplicate value, have to + # add an extra index field to make table_as_json work + print(table_as_json(([i] + line for i, line in enumerate(table)), ['Index'] + headers)) + else: + print(tabulate(table, headers, tablefmt='simple', stralign='right')) + + def _prepare_show_data(self): + """Prepare table headers and table data for output. If "--prefix" is specified, fetch data that matches + the given prefix; if "--prefix_pattern" is specified, fetch data that matches the given pattern; + otherwise, fetch all data. + Returns: + headers (list): Table headers + table (list): Table data + """ + if self.args.prefix: + return self._prepare_show_data_by_prefix() + else: + return self._prepare_show_data_by_pattern() + + def _prepare_show_data_by_prefix(self): + """Prepare table header and table data by given prefix + Returns: + headers (list): Table headers + table (list): Table data + """ + table = [] + + headers = self._adjust_headers(self.SHOW_BY_PREFIX_HEADERS) + for ns, pattern_entry in self.data.items(): + if self.args.namespace is not None and self.args.namespace != ns: + continue + for route_pattern, prefix_entry in pattern_entry.items(): + if self.args.prefix in prefix_entry: + vrf, prefix_pattern = extract_route_pattern(route_pattern) + if vrf != self.args.vrf: + continue + + values = prefix_entry[self.args.prefix] + if self.multi_asic.is_multi_asic: + row = [ns] + else: + row = [] + row.extend([self.args.prefix, + self.args.vrf, + prefix_pattern, + format_number_with_comma(values[0]), + format_number_with_comma(values[1])]) + table.append(row) + + return headers, table + + def _prepare_show_data_by_pattern(self): + """Prepare table header and table data by given pattern. If pattern is not specified, show all data. + Returns: + headers (list): Table headers + table (list): Table data + """ + table = [] + + headers = self._adjust_headers(self.headers) + for ns, pattern_entries in natsorted(self.data.items()): + if self.args.namespace is not None and self.args.namespace != ns: + continue + if self.args.prefix_pattern: + route_pattern = build_route_pattern(self.args.vrf, self.args.prefix_pattern) + if route_pattern in pattern_entries: + self._fill_table_for_prefix_pattern(table, ns, pattern_entries[route_pattern], self.args.prefix_pattern, self.args.vrf) + break + else: + for route_pattern, prefix_entries in natsorted(pattern_entries.items()): + vrf, prefix_pattern = extract_route_pattern(route_pattern) + self._fill_table_for_prefix_pattern(table, ns, prefix_entries, prefix_pattern, vrf) + + return headers, table + + def _fill_table_for_prefix_pattern(self, table, ns, prefix_entries, prefix_pattern, vrf): + """Fill table data for prefix pattern + Args: + table (list): Table data to fill + ns (str): Namespace + prefix_entries (dict): Prefix to value map + prefix_pattern (str): Prefix pattern + vrf (str): VRF + """ + is_first_row = True + for prefix, values in natsorted(prefix_entries.items()): + if self.multi_asic.is_multi_asic: + row = [ns if is_first_row or self.args.json else ''] + else: + row = [] + row.extend([prefix_pattern if is_first_row or self.args.json else '', + vrf if is_first_row or self.args.json else '', + prefix, + format_number_with_comma(values[0]), + format_number_with_comma(values[1])]) + table.append(row) + if is_first_row: + is_first_row = False + + def clear(self): + """Clear statistic based on arguments. + 1. If "--prefix" is specified, clear the data matching the prefix + 2. If "--prefix_pattern" is specified, clear the data matching the pattern + 3. Otherwise, clear all data + If "--namepsace" is not specified, clear the data belongs to the default namespace. + If "--vrf" is not specified, use default VRF. + """ + if self.args.prefix: + self.clear_by_prefix() + elif self.args.prefix_pattern: + self.clear_by_pattern() + else: + super(RouteFlowCounterStats, self).clear() + + @multi_asic_util.run_on_multi_asic + def clear_by_prefix(self): + """Clear the data matching the prefix + """ + ns = constants.DEFAULT_NAMESPACE if self.args.namespace is None else self.args.namespace + if ns != self.multi_asic.current_namespace: + return + + name_map = self.db.get_all(self.db.COUNTERS_DB, self.name_map) + prefix_vrf = build_route_pattern(self.args.vrf, self.args.prefix) + if not name_map or prefix_vrf not in name_map: + print('Cannot find {} in COUNTERS_DB {} table'.format(self.args.prefix, self.name_map)) + return + + route_to_pattern_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_ROUTE_TO_PATTERN_MAP) + if not route_to_pattern_map or prefix_vrf not in route_to_pattern_map: + print('Cannot find {} in {} table'.format(self.args.prefix, COUNTERS_ROUTE_TO_PATTERN_MAP)) + return + + self.data = self._load() + if not self.data: + self.data = {} + + if ns not in self.data: + self.data[ns] = {} + + route_pattern = route_to_pattern_map[prefix_vrf] + if route_pattern not in self.data[ns]: + self.data[ns][route_pattern] = {} + + counter_oid = name_map[prefix_vrf] + values = self._get_stats_value(counter_oid) + values.append(counter_oid) + self.data[ns][route_pattern][self.args.prefix] = values + self._save(self.data) + print('Flow Counters of the specified route were successfully cleared') + + @multi_asic_util.run_on_multi_asic + def clear_by_pattern(self): + """Clear the data matching the specified pattern + """ + ns = constants.DEFAULT_NAMESPACE if self.args.namespace is None else self.args.namespace + if ns != self.multi_asic.current_namespace: + return + + route_to_pattern_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_ROUTE_TO_PATTERN_MAP) + expect_route_pattern = build_route_pattern(self.args.vrf, self.args.prefix_pattern) + matching_prefix_vrf_list = [prefix_vrf for prefix_vrf, route_pattern in route_to_pattern_map.items() if route_pattern == expect_route_pattern] + if not matching_prefix_vrf_list: + print('Cannot find {} in COUNTERS_DB {} table'.format(self.args.prefix_pattern, COUNTERS_ROUTE_TO_PATTERN_MAP)) + return + + data_to_update = {} + name_map = self.db.get_all(self.db.COUNTERS_DB, self.name_map) + for prefix_vrf in matching_prefix_vrf_list: + if prefix_vrf not in name_map: + print('Warning: cannot find {} in {}'.format(prefix_vrf, self.name_map)) + continue + + counter_oid = name_map[prefix_vrf] + values = self._get_stats_value(counter_oid) + values.append(counter_oid) + _, prefix = extract_route_pattern(prefix_vrf) + data_to_update[prefix] = values + + self.data = self._load() + if not self.data: + self.data = {} + + if not self.data or ns not in self.data: + self.data[ns] = {} + + if expect_route_pattern not in self.data[ns]: + self.data[ns][expect_route_pattern] = {} + + self.data[ns][expect_route_pattern].update(data_to_update) + self._save(self.data) + + def _get_stats_from_db(self): + """Get flow counter statistic from DB. + Returns: + dict: A dictionary. E.g: {: {(): {: [, , ]}}} + """ + ns = self.multi_asic.current_namespace + name_map = self.db.get_all(self.db.COUNTERS_DB, self.name_map) + data = {ns: {}} + if not name_map: + return data + + route_to_pattern_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_ROUTE_TO_PATTERN_MAP) + if not route_to_pattern_map: + return data + + for prefix_vrf, route_pattern in route_to_pattern_map.items(): + if route_pattern not in data[ns]: + data[ns][route_pattern] = {} + + counter_oid = name_map[prefix_vrf] + values = self._get_stats_value(counter_oid) + values.append(counter_oid) + _, prefix = extract_route_pattern(prefix_vrf) + data[ns][route_pattern][prefix] = values + + return data + + def _diff(self, old_data, new_data): + """Do a diff between new data and old data. + Args: + old_data (dict): E.g: {: {(): {: [, , ]}}} + new_data (dict): E.g: {: {(): {: [, , ]}}} + """ + need_update_cache = False + if not old_data: + return need_update_cache + + for ns, stats in new_data.items(): + if ns not in old_data: + continue + + old_stats = old_data[ns] + for route_pattern, prefix_entries in stats.items(): + if route_pattern not in old_stats: + continue + + old_prefix_entries = old_stats[route_pattern] + for name, values in prefix_entries.items(): + if name not in old_prefix_entries: + continue + + old_values = old_prefix_entries[name] + if values[-1] != old_values[-1]: + # Counter OID not equal means the generic counter was removed and added again. Removing a generic counter would cause + # the stats value restart from 0. To avoid get minus value here, it should not do diff in case + # counter OID is changed. + old_values[-1] = values[-1] + for i in diff_column_positions: + old_values[i] = '0' + values[i] = ns_diff(values[i], old_values[i]) + need_update_cache = True + continue + + has_negative_diff = False + for i in diff_column_positions: + # If any diff has negative value, set all counter values to 0 and update cache + if int(values[i]) < int(old_values[i]): + has_negative_diff = True + break + + if has_negative_diff: + for i in diff_column_positions: + old_values[i] = '0' + values[i] = ns_diff(values[i], old_values[i]) + need_update_cache = True + continue + + for i in diff_column_positions: + values[i] = ns_diff(values[i], old_values[i]) + + return need_update_cache + + def main(): parser = argparse.ArgumentParser(description='Display the flow counters', formatter_class=argparse.RawTextHelpFormatter, @@ -268,11 +562,23 @@ Examples: parser.add_argument('-d', '--delete', action='store_true', help='Delete saved stats') parser.add_argument('-j', '--json', action='store_true', help='Display in JSON format') parser.add_argument('-n','--namespace', default=None, help='Display flow counters for specific namespace') - parser.add_argument('-t', '--type', required=True, choices=['trap'],help='Flow counters type') + parser.add_argument('-t', '--type', required=True, choices=['trap', 'route'],help='Flow counters type') + group = parser.add_mutually_exclusive_group() + group.add_argument('--prefix_pattern', help='Prefix pattern') # for route flow counter only, ignored by other type + group.add_argument('--prefix', help='Prefix') # for route flow counter only, ignored by other type + parser.add_argument('--vrf', help='VRF name', default=DEFAULT_VRF) # for route flow counter only, ignored by other type args = parser.parse_args() - stats = FlowCounterStats(args) + if args.type == 'trap': + stats = FlowCounterStats(args) + elif args.type == 'route': + exit_if_route_flow_counter_not_support() + stats = RouteFlowCounterStats(args) + else: + print('Invalid flow counter type: {}'.format(args.type)) + exit(1) + if args.clear: stats.clear() else: diff --git a/setup.py b/setup.py index a8bf39170c..7ee1eb8574 100644 --- a/setup.py +++ b/setup.py @@ -40,6 +40,7 @@ 'ssdutil', 'pfc', 'psuutil', + 'flow_counter_util', 'fdbutil', 'fwutil', 'pcieutil', diff --git a/show/flow_counters.py b/show/flow_counters.py index 9870c83080..0767a2a9a5 100644 --- a/show/flow_counters.py +++ b/show/flow_counters.py @@ -2,6 +2,11 @@ import utilities_common.cli as clicommon import utilities_common.multi_asic as multi_asic_util +from tabulate import tabulate + +from flow_counter_util.route import FLOW_COUNTER_ROUTE_PATTERN_TABLE, FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD, FLOW_COUNTER_ROUTE_CONFIG_HEADER, DEFAULT_MAX_MATCH +from flow_counter_util.route import extract_route_pattern, exit_if_route_flow_counter_not_support + # # 'flowcnt-trap' group ### # @@ -20,3 +25,69 @@ def stats(verbose, namespace): if namespace is not None: cmd += " -n {}".format(namespace) clicommon.run_command(cmd, display_cmd=verbose) + +# +# 'flowcnt-route' group ### +# + +@click.group(cls=clicommon.AliasedGroup) +def flowcnt_route(): + """Show route flow counter related information""" + exit_if_route_flow_counter_not_support() + + +@flowcnt_route.command() +@clicommon.pass_db +def config(db): + """Show route flow counter configuration""" + route_pattern_table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + data = [] + for key, entry in route_pattern_table.items(): + vrf, prefix = extract_route_pattern(key) + max = entry.get(FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD, str(DEFAULT_MAX_MATCH)) + data.append([prefix, vrf, max]) + + click.echo(tabulate(data, headers=FLOW_COUNTER_ROUTE_CONFIG_HEADER, tablefmt="simple", missingval="")) + + +@flowcnt_route.group(invoke_without_command=True) +@click.option('--verbose', is_flag=True, help="Enable verbose output") +@click.option('--namespace', '-n', 'namespace', default=None, type=click.Choice(multi_asic_util.multi_asic_ns_choices()), show_default=True, help='Namespace name or all') +@click.pass_context +def stats(ctx, verbose, namespace): + """Show statistics of all route flow counters""" + if ctx.invoked_subcommand is None: + command = "flow_counters_stat -t route" + if namespace is not None: + command += " -n {}".format(namespace) + clicommon.run_command(command, display_cmd=verbose) + + +@stats.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +@click.option('--vrf', help='VRF/VNET name or default VRF') +@click.option('--namespace', '-n', 'namespace', default=None, type=click.Choice(multi_asic_util.multi_asic_ns_choices()), show_default=True, help='Namespace name or all') +@click.argument('prefix-pattern', required=True) +def pattern(prefix_pattern, vrf, verbose, namespace): + """Show statistics of route flow counters by pattern""" + command = "flow_counters_stat -t route --prefix_pattern \"{}\"".format(prefix_pattern) + if vrf: + command += ' --vrf {}'.format(vrf) + if namespace is not None: + command += " -n {}".format(namespace) + clicommon.run_command(command, display_cmd=verbose) + + +@stats.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +@click.option('--vrf', help='VRF/VNET name or default VRF') +@click.option('--namespace', '-n', 'namespace', default=None, type=click.Choice(multi_asic_util.multi_asic_ns_choices()), show_default=True, help='Namespace name or all') +@click.argument('prefix', required=True) +def route(prefix, vrf, verbose, namespace): + """Show statistics of route flow counters by prefix""" + command = "flow_counters_stat -t route --prefix {}".format(prefix) + if vrf: + command += ' --vrf {}'.format(vrf) + if namespace is not None: + command += " -n {}".format(namespace) + clicommon.run_command(command, display_cmd=verbose) diff --git a/show/main.py b/show/main.py index 3f3e367463..01fd592c7a 100755 --- a/show/main.py +++ b/show/main.py @@ -177,6 +177,7 @@ def cli(ctx): cli.add_command(dropcounters.dropcounters) cli.add_command(feature.feature) cli.add_command(fgnhg.fgnhg) +cli.add_command(flow_counters.flowcnt_route) cli.add_command(flow_counters.flowcnt_trap) cli.add_command(kdump.kdump) cli.add_command(interfaces.interfaces) diff --git a/tests/counterpoll_input/config_db.json b/tests/counterpoll_input/config_db.json index 61ceb071c2..40ff750db6 100644 --- a/tests/counterpoll_input/config_db.json +++ b/tests/counterpoll_input/config_db.json @@ -787,6 +787,9 @@ }, "FLOW_CNT_TRAP": { "FLEX_COUNTER_STATUS": "enable" + }, + "FLOW_CNT_ROUTE": { + "FLEX_COUNTER_STATUS": "enable" } }, "PORT": { @@ -2666,4 +2669,4 @@ "size": "56368" } } -} +} \ No newline at end of file diff --git a/tests/counterpoll_test.py b/tests/counterpoll_test.py index 7f5251e998..7a8171825a 100644 --- a/tests/counterpoll_test.py +++ b/tests/counterpoll_test.py @@ -27,6 +27,7 @@ PG_DROP_STAT 10000 enable ACL 10000 enable FLOW_CNT_TRAP_STAT 10000 enable +FLOW_CNT_ROUTE_STAT 10000 enable """ class TestCounterpoll(object): @@ -155,6 +156,18 @@ def test_update_trap_counter_status(self, status): table = db.cfgdb.get_table('FLEX_COUNTER_TABLE') assert status == table["FLOW_CNT_TRAP"]["FLEX_COUNTER_STATUS"] + @pytest.mark.parametrize("status", ["disable", "enable"]) + def test_update_route_flow_counter_status(self, status): + runner = CliRunner() + db = Db() + + result = runner.invoke(counterpoll.cli.commands["flowcnt-route"].commands[status], [], obj=db.cfgdb) + print(result.exit_code, result.output) + assert result.exit_code == 0 + + table = db.cfgdb.get_table('FLEX_COUNTER_TABLE') + assert status == table["FLOW_CNT_ROUTE"]["FLEX_COUNTER_STATUS"] + def test_update_trap_counter_interval(self): runner = CliRunner() db = Db() @@ -179,6 +192,35 @@ def test_update_trap_counter_interval(self): assert result.exit_code == 2 assert expected in result.output + def test_update_route_counter_interval(self): + runner = CliRunner() + db = Db() + test_interval = "20000" + + result = runner.invoke(counterpoll.cli.commands["flowcnt-route"].commands["interval"], [test_interval], + obj=db.cfgdb) + print(result.exit_code, result.output) + assert result.exit_code == 0 + + table = db.cfgdb.get_table('FLEX_COUNTER_TABLE') + assert test_interval == table["FLOW_CNT_ROUTE"]["POLL_INTERVAL"] + + test_interval = "500" + result = runner.invoke(counterpoll.cli.commands["flowcnt-route"].commands["interval"], [test_interval], + obj=db.cfgdb) + expected = "Invalid value for \"POLL_INTERVAL\": 500 is not in the valid range of 1000 to 30000." + assert result.exit_code == 2 + assert expected in result.output + + test_interval = "40000" + result = runner.invoke(counterpoll.cli.commands["flowcnt-route"].commands["interval"], [test_interval], + obj=db.cfgdb) + + expected = "Invalid value for \"POLL_INTERVAL\": 40000 is not in the valid range of 1000 to 30000." + assert result.exit_code == 2 + assert expected in result.output + + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/flow_counter_stats_test.py b/tests/flow_counter_stats_test.py index 807ed61223..dc5bb22dee 100644 --- a/tests/flow_counter_stats_test.py +++ b/tests/flow_counter_stats_test.py @@ -7,8 +7,11 @@ import show.main as show import clear.main as clear +import config.main as config from .utils import get_result_and_return_code +from flow_counter_util.route import FLOW_COUNTER_ROUTE_PATTERN_TABLE, FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD +from utilities_common.db import Db from utilities_common.general import load_module_from_source test_path = os.path.dirname(os.path.abspath(__file__)) @@ -72,9 +75,202 @@ asic1 dhcp 0 0 45.25/s """ +expect_show_route_pattern = """\ +Route pattern VRF Max +--------------- ------- ----- +1.1.0.0/24 Vrf1 30 +2000::1/64 default 30 +""" + +expect_show_all_route_stats = """\ + Route pattern VRF Matched routes Packets Bytes +--------------- ------- ---------------- --------- ------- + 1.1.1.0/24 default 1.1.1.1/31 100 2,000 + 1.1.1.2/31 1,000 2,000 + 2001::/64 default 2001::1/64 50 1,000 + 2001::/64 Vrf_1 2001::1/64 1,000 25,000 +""" + +expect_show_route_stats_by_pattern_v4 = """\ + Route pattern VRF Matched routes Packets Bytes +--------------- ------- ---------------- --------- ------- + 1.1.1.0/24 default 1.1.1.1/31 100 2,000 + 1.1.1.2/31 1,000 2,000 +""" + +expect_show_route_stats_by_pattern_v6 = """\ + Route pattern VRF Matched routes Packets Bytes +--------------- ------- ---------------- --------- ------- + 2001::/64 default 2001::1/64 50 1,000 +""" + +expect_show_route_stats_by_pattern_and_vrf_v6 = """\ + Route pattern VRF Matched routes Packets Bytes +--------------- ----- ---------------- --------- ------- + 2001::/64 Vrf_1 2001::1/64 1,000 25,000 +""" + +expect_show_route_stats_by_pattern_empty = """\ + Route pattern VRF Matched routes Packets Bytes +--------------- ----- ---------------- --------- ------- +""" + +expect_show_route_stats_by_route_v4 = """\ + Route VRF Route pattern Packets Bytes +---------- ------- --------------- --------- ------- +1.1.1.1/31 default 1.1.1.0/24 100 2,000 +""" + +expect_show_route_stats_by_route_v6 = """\ + Route VRF Route pattern Packets Bytes +---------- ------- --------------- --------- ------- +2001::1/64 default 2001::/64 50 1,000 +""" + +expect_show_route_stats_by_route_and_vrf_v6 = """\ + Route VRF Route pattern Packets Bytes +---------- ----- --------------- --------- ------- +2001::1/64 Vrf_1 2001::/64 1,000 25,000 +""" + +expect_after_clear_route_stats_all = """\ + Route pattern VRF Matched routes Packets Bytes +--------------- ------- ---------------- --------- ------- + 1.1.1.0/24 default 1.1.1.1/31 0 0 + 1.1.1.2/31 0 0 + 2001::/64 default 2001::1/64 0 0 + 2001::/64 Vrf_1 2001::1/64 0 0 +""" + +expect_after_clear_route_stats_by_pattern_v4 = """\ + Route pattern VRF Matched routes Packets Bytes +--------------- ------- ---------------- --------- ------- + 1.1.1.0/24 default 1.1.1.1/31 0 0 + 1.1.1.2/31 0 0 + 2001::/64 default 2001::1/64 50 1,000 + 2001::/64 Vrf_1 2001::1/64 1,000 25,000 +""" + +expect_after_clear_route_stats_by_pattern_v6 = """\ + Route pattern VRF Matched routes Packets Bytes +--------------- ------- ---------------- --------- ------- + 1.1.1.0/24 default 1.1.1.1/31 0 0 + 1.1.1.2/31 0 0 + 2001::/64 default 2001::1/64 0 0 + 2001::/64 Vrf_1 2001::1/64 1,000 25,000 +""" + +expect_show_route_stats_all_json = """\ +{ + "0": { + "Bytes": "2,000", + "Matched routes": "1.1.1.1/31", + "Packets": "100", + "Route pattern": "1.1.1.0/24", + "VRF": "default" + }, + "1": { + "Bytes": "2,000", + "Matched routes": "1.1.1.2/31", + "Packets": "1,000", + "Route pattern": "1.1.1.0/24", + "VRF": "default" + }, + "2": { + "Bytes": "1,000", + "Matched routes": "2001::1/64", + "Packets": "50", + "Route pattern": "2001::/64", + "VRF": "default" + }, + "3": { + "Bytes": "25,000", + "Matched routes": "2001::1/64", + "Packets": "1,000", + "Route pattern": "2001::/64", + "VRF": "Vrf_1" + } +} +""" + +expect_show_route_stats_by_pattern_v4_json = """\ +{ + "0": { + "Bytes": "2,000", + "Matched routes": "1.1.1.1/31", + "Packets": "100", + "Route pattern": "1.1.1.0/24", + "VRF": "default" + }, + "1": { + "Bytes": "2,000", + "Matched routes": "1.1.1.2/31", + "Packets": "1,000", + "Route pattern": "1.1.1.0/24", + "VRF": "default" + } +} +""" + +expect_show_route_stats_by_pattern_and_vrf_v6_json = """\ +{ + "0": { + "Bytes": "25,000", + "Packets": "1,000", + "Route": "2001::1/64", + "Route pattern": "2001::/64", + "VRF": "Vrf_1" + } +} +""" + +expect_show_route_stats_all_multi_asic = """\ + ASIC ID Route pattern VRF Matched routes Packets Bytes +--------- --------------- ------- ---------------- --------- ------- + asic0 1.1.1.0/24 default 1.1.1.1/31 100 2,000 + 1.1.1.3/31 200 4,000 + asic1 1.1.1.0/24 default 1.1.1.2/31 1,000 2,000 +""" -def delete_cache(): - cmd = 'flow_counters_stat -t trap -d' +expect_show_route_stats_all_json_multi_asic = """\ +{ + "0": { + "ASIC ID": "asic0", + "Bytes": "2,000", + "Matched routes": "1.1.1.1/31", + "Packets": "100", + "Route pattern": "1.1.1.0/24", + "VRF": "default" + }, + "1": { + "ASIC ID": "asic0", + "Bytes": "4,000", + "Matched routes": "1.1.1.3/31", + "Packets": "200", + "Route pattern": "1.1.1.0/24", + "VRF": "default" + }, + "2": { + "ASIC ID": "asic1", + "Bytes": "2,000", + "Matched routes": "1.1.1.2/31", + "Packets": "1,000", + "Route pattern": "1.1.1.0/24", + "VRF": "default" + } +} +""" + +expect_after_clear_route_stats_all_multi_asic = """\ + ASIC ID Route pattern VRF Matched routes Packets Bytes +--------- --------------- ------- ---------------- --------- ------- + asic0 1.1.1.0/24 default 1.1.1.1/31 0 0 + 1.1.1.3/31 0 0 + asic1 1.1.1.0/24 default 1.1.1.2/31 0 0 +""" + +def delete_cache(stat_type='trap'): + cmd = 'flow_counters_stat -t {} -d'.format(stat_type) get_result_and_return_code(cmd) @@ -165,12 +361,12 @@ def setup_class(cls): @classmethod def teardown_class(cls): print("TEARDOWN") + delete_cache() os.environ["PATH"] = os.pathsep.join( os.environ["PATH"].split(os.pathsep)[:-1] ) os.environ["UTILITIES_UNIT_TESTING"] = "0" os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" - delete_cache() from .mock_tables import mock_single_asic importlib.reload(mock_single_asic) @@ -207,3 +403,538 @@ def test_clear(self): assert result.exit_code == 0 assert result.output == expect_show_output_multi_asic_after_clear + + +class TestConfigRoutePattern: + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join( + os.environ["PATH"].split(os.pathsep)[:-1] + ) + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + + @mock.patch('utilities_common.cli.query_yes_no') + def test_add_remove_pattern(self, mock_input): + runner = CliRunner() + db = Db() + prefix = '1.1.1.1/24' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + [prefix], obj=db + ) + + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + assert prefix in table + assert '30' == table[prefix][FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD] + + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--max', '50', prefix], obj=db + ) + + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + assert '50' == table[prefix][FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD] + + mock_input.return_value = False + vrf = 'Vrf1' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--max', '50', '--vrf', vrf, prefix], obj=db + ) + + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + assert prefix in table + + prefix_v6 = '2000::/64' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--max', '50', prefix_v6], obj=db + ) + + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + assert prefix_v6 in table + + mock_input.return_value = True + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--max', '50', '--vrf', vrf, prefix], obj=db + ) + + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + + assert (vrf, prefix) in table + assert '50' == table[(vrf, prefix)][FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD] + + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["remove"], + ['--vrf', vrf, prefix], obj=db + ) + + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + assert (vrf, prefix) not in table + + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["remove"], + [prefix_v6], obj=db + ) + + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + assert prefix_v6 not in table + + @mock.patch('utilities_common.cli.query_yes_no', mock.MagicMock(return_value=True)) + def test_replace_invalid_pattern(self): + runner = CliRunner() + db = Db() + db.cfgdb.mod_entry(FLOW_COUNTER_ROUTE_PATTERN_TABLE, + 'vrf1|', + {FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD: '30'}) + prefix = '1.1.1.0/24' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + [prefix], obj=db + ) + + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + assert prefix in table + + db.cfgdb.set_entry(FLOW_COUNTER_ROUTE_PATTERN_TABLE, prefix, None) + db.cfgdb.mod_entry(FLOW_COUNTER_ROUTE_PATTERN_TABLE, + 'vrf1|invalid', + {FLOW_COUNTER_ROUTE_MAX_MATCH_FIELD: '30'}) + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + [prefix], obj=db + ) + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + assert prefix in table + + def test_add_invalid_pattern(self): + runner = CliRunner() + prefix = 'invalid' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + [prefix] + ) + + print(result.output) + assert result.exit_code == 1 + + def test_remove_non_exist_pattern(self): + runner = CliRunner() + prefix = '1.1.1.1/24' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["remove"], + [prefix] + ) + + assert result.exit_code == 1 + assert 'Failed to remove route pattern: {} does not exist'.format(prefix) in result.output + + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["remove"], + ['invalid'] + ) + assert result.exit_code == 1 + assert 'Failed to remove route pattern: {} does not exist'.format('invalid') in result.output + + def test_add_pattern_repeatly(self): + runner = CliRunner() + db = Db() + prefix = '1.1.1.1/24' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + [prefix], obj=db + ) + + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + [prefix], obj=db + ) + + print(result.output) + assert result.exit_code == 1 + assert 'already exists' in result.output + + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--vrf', 'vnet1', '-y', prefix], obj=db + ) + + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--vrf', 'vnet1', '-y', prefix], obj=db + ) + + assert result.exit_code == 1 + assert 'already exists' in result.output + + prefix_v6 = '2000::/64' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--max', '50', prefix_v6], obj=db + ) + + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--max', '50', prefix_v6], obj=db + ) + + assert result.exit_code == 1 + assert 'already exists' in result.output + + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--max', '50', '--vrf', 'vrf1', '-y', prefix_v6], obj=db + ) + + assert result.exit_code == 0 + + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--max', '50', '--vrf', 'vrf1', '-y', prefix_v6], obj=db + ) + + assert result.exit_code == 1 + assert 'already exists' in result.output + + + def test_add_pattern_without_prefix_length(self): + runner = CliRunner() + db = Db() + prefix = '1.1.0.0' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + [prefix], obj=db + ) + + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + assert (prefix + '/32') in table + + prefix_v6 = '2000::1' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + [prefix_v6], obj=db + ) + + assert result.exit_code == 0 + table = db.cfgdb.get_table(FLOW_COUNTER_ROUTE_PATTERN_TABLE) + assert (prefix_v6 + '/128') in table + + def test_show_config(self): + runner = CliRunner() + db = Db() + prefix = '1.1.0.0/24' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + ['--vrf', 'Vrf1', prefix], obj=db + ) + + prefix_v6 = '2000::1/64' + result = runner.invoke( + config.config.commands["flowcnt-route"].commands["pattern"].commands["add"], + [prefix_v6], obj=db + ) + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["config"], + [], obj=db, catch_exceptions=False + ) + + assert result.exit_code == 0 + print(result.output) + assert result.output == expect_show_route_pattern + + +class TestRouteStats: + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + delete_cache(stat_type='route') + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join( + os.environ["PATH"].split(os.pathsep)[:-1] + ) + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + + def test_show_all_stats(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"], + [] + ) + + assert result.exit_code == 0 + assert expect_show_all_route_stats == result.output + + def test_show_by_pattern(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"].commands["pattern"], + ['1.1.1.0/24'] + ) + + assert result.exit_code == 0 + assert result.output == expect_show_route_stats_by_pattern_v4 + print(result.output) + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"].commands["pattern"], + ['2001::/64'] + ) + + assert result.exit_code == 0 + assert result.output == expect_show_route_stats_by_pattern_v6 + print(result.output) + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"].commands["pattern"], + ['--vrf', 'Vrf_1', '2001::/64'] + ) + + assert result.exit_code == 0 + assert result.output == expect_show_route_stats_by_pattern_and_vrf_v6 + print(result.output) + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"].commands["pattern"], + ['invalid'] + ) + + assert result.exit_code == 0 + assert result.output == expect_show_route_stats_by_pattern_empty + print(result.output) + + def test_show_by_route(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"].commands["route"], + ['1.1.1.1/31'] + ) + + assert result.exit_code == 0 + assert result.output == expect_show_route_stats_by_route_v4 + print(result.output) + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"].commands["route"], + ['2001::1/64'] + ) + + assert result.exit_code == 0 + assert result.output == expect_show_route_stats_by_route_v6 + print(result.output) + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"].commands["route"], + ['--vrf', 'Vrf_1', '2001::1/64'] + ) + + assert result.exit_code == 0 + assert result.output == expect_show_route_stats_by_route_and_vrf_v6 + print(result.output) + + def test_show_json(self): + cmd = 'flow_counters_stat -t route -j' + return_code, result = get_result_and_return_code(cmd) + assert return_code == 0 + assert result == expect_show_route_stats_all_json + + cmd = 'flow_counters_stat -t route -j --prefix_pattern 1.1.1.0/24' + return_code, result = get_result_and_return_code(cmd) + assert return_code == 0 + assert result == expect_show_route_stats_by_pattern_v4_json + + cmd = 'flow_counters_stat -t route -j --prefix 2001::1/64 --vrf Vrf_1' + return_code, result = get_result_and_return_code(cmd) + assert return_code == 0 + assert result == expect_show_route_stats_by_pattern_and_vrf_v6_json + + def test_clear_all(self): + delete_cache(stat_type='route') + runner = CliRunner() + result = runner.invoke( + clear.cli.commands["flowcnt-route"], [] + ) + + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"], + [] + ) + + assert result.exit_code == 0 + assert expect_after_clear_route_stats_all == result.output + print(result.output) + + def test_clear_by_pattern(self): + delete_cache(stat_type='route') + runner = CliRunner() + result = runner.invoke( + clear.cli.commands["flowcnt-route"].commands['pattern'], + ['1.1.1.0/24'] + ) + + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"], + [] + ) + + assert result.exit_code == 0 + assert expect_after_clear_route_stats_by_pattern_v4 == result.output + print(result.output) + + result = runner.invoke( + clear.cli.commands["flowcnt-route"].commands['pattern'], + ['2001::/64'] + ) + + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"], + [] + ) + + assert result.exit_code == 0 + assert expect_after_clear_route_stats_by_pattern_v6 == result.output + print(result.output) + + result = runner.invoke( + clear.cli.commands["flowcnt-route"].commands['pattern'], + ['--vrf', 'Vrf_1', '2001::/64'] + ) + + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"], + [] + ) + + assert result.exit_code == 0 + assert expect_after_clear_route_stats_all == result.output + print(result.output) + + def test_diff(self): + args = mock.MagicMock() + args.type = 'route' + args.delete = False + args.namespace = None + args.json = False + stats = flow_counters_stat.RouteFlowCounterStats(args) + stats._collect = mock.MagicMock() + old_data = { + '': { + '1.1.1.0/24': { + '1.1.1.1/24': ['100', '200', '1'], + '1.1.1.2/24': ['100', '100', '2'], + '1.1.1.3/24': ['100', '200', '3'] + } + } + } + stats._save(old_data) + stats.data = { + '': { + '1.1.1.0/24': { + '1.1.1.1/24': ['200', '300', '4'], + '1.1.1.2/24': ['100', '50', '2'], + '1.1.1.3/24': ['200', '300', '3'] + } + } + } + + stats._collect_and_diff() + cached_data = stats._load() + assert cached_data['']['1.1.1.0/24']['1.1.1.1/24'] == ['0', '0', '4'] + assert cached_data['']['1.1.1.0/24']['1.1.1.2/24'] == ['0', '0', '2'] + assert cached_data['']['1.1.1.0/24']['1.1.1.3/24'] == ['100', '200', '3'] + + +class TestRouteStatsMultiAsic: + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + delete_cache(stat_type='route') + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + delete_cache(stat_type='route') + os.environ["PATH"] = os.pathsep.join( + os.environ["PATH"].split(os.pathsep)[:-1] + ) + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + + def test_show_all_stats(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"], + [] + ) + + assert result.exit_code == 0 + print(result.output) + assert expect_show_route_stats_all_multi_asic == result.output + + def test_show_json(self): + cmd = 'flow_counters_stat -t route -j' + return_code, result = get_result_and_return_code(cmd) + assert return_code == 0 + assert result == expect_show_route_stats_all_json_multi_asic + + def test_clear_all(self): + delete_cache(stat_type='route') + runner = CliRunner() + result = runner.invoke( + clear.cli.commands["flowcnt-route"], [] + ) + + assert result.exit_code == 0 + + result = runner.invoke( + show.cli.commands["flowcnt-route"].commands["stats"], + [] + ) + + assert result.exit_code == 0 + assert expect_after_clear_route_stats_all_multi_asic == result.output + print(result.output) diff --git a/tests/mock_tables/asic0/counters_db.json b/tests/mock_tables/asic0/counters_db.json index 9b1688c743..5c144393f2 100644 --- a/tests/mock_tables/asic0/counters_db.json +++ b/tests/mock_tables/asic0/counters_db.json @@ -1815,5 +1815,21 @@ }, "RATES:oid:0x1500000000034e":{ "RX_PPS": 50.25 + }, + "COUNTERS_ROUTE_NAME_MAP":{ + "1.1.1.3/31": "oid:0x1600000000034d", + "1.1.1.1/31": "oid:0x1600000000034e" + }, + "COUNTERS_ROUTE_TO_PATTERN_MAP": { + "1.1.1.1/31": "1.1.1.0/24", + "1.1.1.3/31": "1.1.1.0/24" + }, + "COUNTERS:oid:0x1600000000034e":{ + "SAI_COUNTER_STAT_PACKETS": 100, + "SAI_COUNTER_STAT_BYTES": 2000 + }, + "COUNTERS:oid:0x1600000000034d":{ + "SAI_COUNTER_STAT_PACKETS": 200, + "SAI_COUNTER_STAT_BYTES": 4000 } } diff --git a/tests/mock_tables/asic1/counters_db.json b/tests/mock_tables/asic1/counters_db.json index 720b0f099f..aed3b22b58 100644 --- a/tests/mock_tables/asic1/counters_db.json +++ b/tests/mock_tables/asic1/counters_db.json @@ -1010,5 +1010,15 @@ }, "RATES:oid:0x1500000000034e":{ "RX_PPS": 45.25 + }, + "COUNTERS_ROUTE_NAME_MAP":{ + "1.1.1.2/31": "oid:0x1600000000034f" + }, + "COUNTERS_ROUTE_TO_PATTERN_MAP": { + "1.1.1.2/31": "1.1.1.0/24" + }, + "COUNTERS:oid:0x1600000000034f":{ + "SAI_COUNTER_STAT_PACKETS": 1000, + "SAI_COUNTER_STAT_BYTES": 2000 } } diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 4d01db96ad..f416e4a941 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -1621,6 +1621,10 @@ "POLL_INTERVAL": "10000", "FLEX_COUNTER_STATUS": "enable" }, + "FLEX_COUNTER_TABLE|FLOW_CNT_ROUTE": { + "POLL_INTERVAL": "10000", + "FLEX_COUNTER_STATUS": "enable" + }, "PFC_WD|Ethernet0": { "action": "drop", "detection_time": "600", diff --git a/tests/mock_tables/counters_db.json b/tests/mock_tables/counters_db.json index b0b386303b..b79c839288 100644 --- a/tests/mock_tables/counters_db.json +++ b/tests/mock_tables/counters_db.json @@ -2041,5 +2041,33 @@ }, "RATES:oid:0x1500000000034e":{ "RX_PPS": 50.25 + }, + "COUNTERS_ROUTE_NAME_MAP":{ + "1.1.1.1/31": "oid:0x1600000000034e", + "1.1.1.2/31": "oid:0x1600000000034f", + "2001::1/64": "oid:0x1600000000035e", + "Vrf_1|2001::1/64": "oid:0x1600000000035f" + }, + "COUNTERS_ROUTE_TO_PATTERN_MAP": { + "1.1.1.1/31": "1.1.1.0/24", + "1.1.1.2/31": "1.1.1.0/24", + "2001::1/64": "2001::/64", + "Vrf_1|2001::1/64": "Vrf_1|2001::/64" + }, + "COUNTERS:oid:0x1600000000034e":{ + "SAI_COUNTER_STAT_PACKETS": 100, + "SAI_COUNTER_STAT_BYTES": 2000 + }, + "COUNTERS:oid:0x1600000000034f":{ + "SAI_COUNTER_STAT_PACKETS": 1000, + "SAI_COUNTER_STAT_BYTES": 2000 + }, + "COUNTERS:oid:0x1600000000035e":{ + "SAI_COUNTER_STAT_PACKETS": 50, + "SAI_COUNTER_STAT_BYTES": 1000 + }, + "COUNTERS:oid:0x1600000000035f":{ + "SAI_COUNTER_STAT_PACKETS": 1000, + "SAI_COUNTER_STAT_BYTES": 25000 } } diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 2a5ab76181..f9e4c54e2e 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -793,6 +793,9 @@ "xcvrd_switch_standby_end": "2021-May-13 10:01:15.696051", "xcvrd_switch_standby_start": "2021-May-13 10:01:15.690835" }, + "FLOW_COUNTER_CAPABILITY_TABLE|route": { + "support": "true" + }, "LINK_PROBE_STATS|Ethernet0": { "pck_loss_count": "612", "pck_expected_count": "840", diff --git a/utilities_common/cli.py b/utilities_common/cli.py index bd678ed2f0..c05069adcf 100644 --- a/utilities_common/cli.py +++ b/utilities_common/cli.py @@ -577,11 +577,11 @@ def json_dump(data): data, sort_keys=True, indent=2, ensure_ascii=False, default=json_serial ) - + def interface_is_untagged_member(db, interface_name): - """ Check if interface is already untagged member""" + """ Check if interface is already untagged member""" vlan_member_table = db.get_table('VLAN_MEMBER') - + for key,val in vlan_member_table.items(): if(key[1] == interface_name): if (val['tagging_mode'] == 'untagged'): @@ -628,3 +628,36 @@ def handle_parse_result(self, ctx, opts, args): "Illegal usage: %s is mutually exclusive with arguments %s" % (self.name, ', '.join(self.mutually_exclusive)) ) return super(MutuallyExclusiveOption, self).handle_parse_result(ctx, opts, args) + + +def query_yes_no(question, default="yes"): + """Ask a yes/no question via input() and return their answer. + + "question" is a string that is presented to the user. + "default" is the presumed answer if the user just hits . + It must be "yes" (the default), "no" or None (meaning + an answer is required of the user). + + The "answer" return value is True for "yes" or False for "no". + """ + valid = {"yes": True, "y": True, "ye": True, + "no": False, "n": False} + if default is None: + prompt = " [y/n] " + elif default == "yes": + prompt = " [Y/n] " + elif default == "no": + prompt = " [y/N] " + else: + raise ValueError("invalid default answer: '%s'" % default) + + while True: + sys.stdout.write(question + prompt) + choice = input().lower().strip() + if default is not None and choice == '': + return valid[default] + elif choice in valid: + return valid[choice] + else: + sys.stdout.write("Please respond with 'yes' or 'no' " + "(or 'y' or 'n').\n")