From 999983c7cff0c2a246ba90fd84a909ad65804c11 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Sat, 7 Aug 2021 23:30:10 +0000 Subject: [PATCH 01/60] Commit for Ref Signed-off-by: Vivek Reddy Karri --- scripts/coredump_gen_handler | 128 ++++++++++++++++++ scripts/techsupport_cleanup | 70 ++++++++++ setup.py | 24 +++- .../coredump_gen_handler_test.py | 18 +++ .../shared_state_mock.py | 92 +++++++++++++ .../techsupport_cleanup_test.py | 87 ++++++++++++ 6 files changed, 415 insertions(+), 4 deletions(-) create mode 100644 scripts/coredump_gen_handler create mode 100644 scripts/techsupport_cleanup create mode 100644 tests/auto_techsupport_tests/coredump_gen_handler_test.py create mode 100644 tests/auto_techsupport_tests/shared_state_mock.py create mode 100644 tests/auto_techsupport_tests/techsupport_cleanup_test.py diff --git a/scripts/coredump_gen_handler b/scripts/coredump_gen_handler new file mode 100644 index 0000000000..3e27c1f04a --- /dev/null +++ b/scripts/coredump_gen_handler @@ -0,0 +1,128 @@ +#!/usr/bin/env python3 + +""" +coredump_gen_handler script. + This script is invoked by the coredump-compress script + for auto techsupport invocation and cleanup core dumps. + For more info, refer to the Event Driven TechSupport & CoreDump Mgmt HLD +""" +import os, re +import sys +import glob +import time +import argparse +import subprocess +import syslog +from swsscommon.swsscommon import SonicV2Connector +from utilities_common.auto_techsupport_helper import * + +class CoreDumpCreateHandle(): + def __init__(self, core_name): + self.core_name = core_name + self.db = None + self.proc_mp = {} + self.core_ts_map = {} + self.curr_ts_list = [] + + def handle_techsupport_creation_event(self): + file_path = os.path.join(CORE_DUMP_DIR, self.core_name) + if not verify_recent_file_creation(file_path): + return + + self.db = SonicV2Connector(host="127.0.0.1") + self.db.connect(CFG_DB) + self.db.connect(STATE_DB) + if cfg_db.get(CFG_DB, AUTO_TS, CFG_STATE) != "enabled": + sys.exit() + self.fetch_critical_procs() + proc = self.core_name.split(".")[0] + if proc not in self.proc_mp: + sys.exit() # Only handles the critical processes + + FEATURE_KEY = FEATURE.format(self.proc_mp[proc]) + if cfg_db.get(CFG_DB, FEATURE_KEY, TS) != "enabled": + sys.exit() # Should be set "enabled" in the FEATURE Table + + global_cooloff = cfg_db.get(CFG_DB, AUTO_TS, COOLOFF) + proc_cooloff = cfg_db.get(CFG_DB, FEATURE_KEY, COOLOFF) + + cooloff_passed = self.verify_cooloff(global_cooloff, proc_cooloff, proc) + if cooloff_passed: + new_file = self.invoke_ts_cmd() + if new_file: + self.db.set(STATE_DB, TS_MAP, new_file[0], "{};{}".format(self.core_name, int(time.time()))) + + core_usage = 0 + if cfg_db.hexists(CFG_DB, AUTO_TS, CFG_CORE_USAGE): + core_usage = cfg_db.get(CFG_DB, AUTO_TS, CFG_CORE_USAGE) + + if core_usage == 0: + _ , num_bytes = get_stats(os.path.join(CORE_DUMP_DIR, CORE_DUMP_PTRN)) + syslog.syslog(syslog.LOG_INFO, "No Cleanup is performed, current size occupied: {}".format(pretty_size(num_bytes))) + return + + cleanup_process(core_usage, CORE_DUMP_PTRN, CORE_DUMP_DIR) + + def invoke_ts_cmd(self): + _, out, _ = subprocess_exec(["show", "techsupport"]) + new_list = get_ts_dumps(True) + diff = list(set(new_list).difference(set(self.curr_ts_list))) + self.curr_ts_list = new_list + if len(diff) == 0: + syslog.syslog(syslog.LOG_ERR, "'show techsupport' invocation was successful but no TechSupport Dump was found") + else: + syslog.syslog(syslog.LOG_INFO, "'show techsupport' invocation is successful, {} is created".format(diff)) + return diff + + def verify_cooloff(self, global_cooloff, proc_cooloff, proc): + """Verify both the global cooloff and per-proc cooloff has passed""" + self.curr_ts_list = get_ts_dumps(True) + if len(curr_list) != 0: + last_ts_dump_creation = os.path.getmtime(self.curr_ts_list[-1]) + if time.time() - last_ts_dump_creation < global_cooloff: + syslog.syslog(syslog.LOG_INFO, "Cooloff period has not yet passed. No Techsupport Invocation is performed ") + return False + + ts_map = self.db.get_all(STATE_DB, TS_MAP) + self.parse_ts_map(ts_map) + + if proc in self.core_ts_map: + last_creation_time = self.core_ts_map[proc][0] + if time.time() - last_creation_time < proc_cooloff: + syslog.syslog(syslog.LOG_INFO, "Cooloff period for {} prcess has not yet passed. No Techsupport Invocation is performed".format(proc)) + return False + return True + + def parse_ts_map(self, ts_map): + """Create core_dump, ts_dump & creation_time map""" + for ts_dump, tup in ts_map: + core_dump, creation_time = tup.split(";") + if core_dump not in self.core_ts_map: + self.core_ts_map[core_dump] = [] + self.core_ts_map[core_dump].append((int(creation_time), ts_dump)) + for core_dump in self.core_ts_map: + self.core_ts_map[core_dump].sort() + + def fetch_critical_procs(self): + """Fetches the critical_procs and corresponding docker names""" + keys = self.db.keys(CFG_DB, FEATURE.format("*")) + containers = [key.split()[-1] for key in keys] + for container in containers: + rc, stdout, _ = subprocess_exec("docker exec -t {} cat /etc/supervisor/critical_processes".format(docker)) + if rc != 0: + continue + procs = stdout.split() + for proc in procs: + if re.match("program:*", proc): + self.proc_mp[proc.split(":")[-1]] = container + +def main(): + parser = argparse.ArgumentParser(description='Auto Techsupport Invocation and CoreDump Mgmt Script') + parser.add_argument('name', type=str, help='Core Dump Name', required=True) + args = parser.parse_args() + syslog.openlog(logoption=syslog.LOG_PID) + cls = CoreDumpCreateHandle() + cls.handle_core_dump_creation_event(args.name) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/scripts/techsupport_cleanup b/scripts/techsupport_cleanup new file mode 100644 index 0000000000..d43dfd1e60 --- /dev/null +++ b/scripts/techsupport_cleanup @@ -0,0 +1,70 @@ +#!/usr/bin/env python3 + +""" +techsupport_cleanup script. + This script is invoked by the generate_dump script for techsupport cleanup + For more info, refer to the Event Driven TechSupport & CoreDump Mgmt HLD +""" +import os +import sys +import glob +import time +import argparse +import subprocess +import syslog +import shutil +from swsscommon.swsscommon import SonicV2Connector +from utilities_common.auto_techsupport_helper import * + +def mock_clean_state_db_entries(removed_files, db): + all_keys = db.get_all(STATE_DB, TS_MAP) + for file in removed_files: + if file in all_keys: + del all_keys[file] + for key in all_keys: + db.set(STATE_DB, TS_MAP, key, all_keys[key]) + +def clean_state_db_entries(removed_files, db): + db_conn = db.get_redis_client(STATE_DB) + for file in removed_files: + db_conn.hdel(TS_MAP, file) + +def handle_techsupport_creation_event(dump_name): + file_path = os.path.join(TS_DIR, dump_name) + + if not verify_recent_file_creation(file_path): + return + + curr_list = get_ts_dumps() + db = SonicV2Connector(host="127.0.0.1") + db.connect(CFG_DB) + db.connect(STATE_DB) + + print("STATE: ", db.get(CFG_DB, AUTO_TS, CFG_STATE)) + if db.get(CFG_DB, AUTO_TS, CFG_STATE) != "enabled": + return + + max_ts = db.get("CONFIG_DB", AUTO_TS, CFG_MAX_TS) + print("Max TS ", max_ts) + if max_ts: + max_ts = int(max_ts) + else: + max_ts = 0 + + if max_ts == 0: + _ , num_bytes = get_stats(os.path.join(TS_DIR, TS_PTRN)) + syslog.syslog(syslog.LOG_INFO, "No Cleanup is performed, current size occupied: {}".format(pretty_size(num_bytes))) + return + + removed_files = cleanup_process(max_ts, TS_PTRN, TS_DIR) + clean_state_db_entries(removed_files, db) + +def main(): + parser = argparse.ArgumentParser(description='Auto Techsupport Invocation and CoreDump Mgmt Script') + parser.add_argument('name', type=str, help='TechSupport Dump Name', required=True) + args = parser.parse_args() + syslog.openlog(logoption=syslog.LOG_PID) + handle_techsupport_creation_event(args.name) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/setup.py b/setup.py index 5847b6e6ee..90d3ca4efd 100644 --- a/setup.py +++ b/setup.py @@ -5,9 +5,21 @@ # under scripts/. Consider stop using scripts and use console_scripts instead # # https://stackoverflow.com/questions/18787036/difference-between-entry-points-console-scripts-and-scripts-in-setup-py -import fastentrypoints +import fastentrypoints, sys from setuptools import setup +from setuptools.command.test import test as TestCommand + +class PyTest(TestCommand): + user_options = [("pytest-args=", "a", "Arguments to pass to pytest")] + def initialize_options(self): + TestCommand.initialize_options(self) + self.pytest_args = "" + def run_tests(self): + import shlex + import pytest + errno = pytest.main(shlex.split(self.pytest_args)) + sys.exit(errno) setup( name='sonic-utilities', @@ -31,6 +43,8 @@ 'crm', 'debug', 'generic_config_updater', + 'dump', + 'dump.plugins', 'pfcwd', 'sfputil', 'ssdutil', @@ -71,7 +85,8 @@ 'filter_fdb_input/*', 'pfcwd_input/*', 'wm_input/*', - 'ecn_input/*'] + 'ecn_input/*', + 'dump_input/*'] }, scripts=[ 'scripts/aclshow', @@ -130,8 +145,7 @@ 'scripts/watermarkstat', 'scripts/watermarkcfg', 'scripts/sonic-kdump-config', - 'scripts/centralize_database', - 'scripts/null_route_helper' + 'scripts/centralize_database' ], entry_points={ 'console_scripts': [ @@ -142,6 +156,7 @@ 'counterpoll = counterpoll.main:cli', 'crm = crm.main:cli', 'debug = debug.main:cli', + 'dump = dump.main:dump', 'filter_fdb_entries = fdbutil.filter_fdb_entries:main', 'pfcwd = pfcwd.main:cli', 'sfputil = sfputil.main:cli', @@ -217,5 +232,6 @@ 'Topic :: Utilities', ], keywords='sonic SONiC utilities command line cli CLI', + cmdclass={"pytest": PyTest}, test_suite='setup.get_test_suite' ) diff --git a/tests/auto_techsupport_tests/coredump_gen_handler_test.py b/tests/auto_techsupport_tests/coredump_gen_handler_test.py new file mode 100644 index 0000000000..71bc2f8c04 --- /dev/null +++ b/tests/auto_techsupport_tests/coredump_gen_handler_test.py @@ -0,0 +1,18 @@ +import os +import sys +import pytest +import pyfakefs +from unittest import mock +from pyfakefs.fake_filesystem_unittest import Patcher +from utilities_common.general import load_module_from_source + +from .mock_tables import dbconnector + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, 'scripts') +sys.path.insert(0, modules_path) + +# Load the file under test +script_path = os.path.join(scripts_path, 'coredump_gen_handler') +cdump_handle = load_module_from_source('coredump_gen_handler', script_path) \ No newline at end of file diff --git a/tests/auto_techsupport_tests/shared_state_mock.py b/tests/auto_techsupport_tests/shared_state_mock.py new file mode 100644 index 0000000000..da96f2d638 --- /dev/null +++ b/tests/auto_techsupport_tests/shared_state_mock.py @@ -0,0 +1,92 @@ +import re +from swsscommon.swsscommon import SonicDBConfig + +class RedisSingleton: + "Introduced to modify/check Redis DB's data outside of the scripts" + __instance = None + + seperator_map = {"APPL_DB" : ":", + "ASIC_DB" : ":"} + + @staticmethod + def getInstance(): + """ Static access method. """ + if RedisSingleton.__instance == None: + RedisSingleton() + return RedisSingleton.__instance + + @staticmethod + def clearState(): + """ Clear the Redis State """ + if RedisSingleton.__instance != None: + RedisSingleton.__instance.data.clear() + + def __init__(self): + if RedisSingleton.__instance != None: + raise Exception("This class is a singleton!") + else: + self.data = dict() + RedisSingleton.__instance = self + +class MockConn(object): + + def __init__(self, host): + self.redis = RedisSingleton.getInstance() + + def connect(self, db_name): + if db_name not in self.redis: + self.redis.data[db_name] = {} + + def get(self, db_name, key, field): + return self.redis.data.get(db_name, {}).get(key, {}).get(field, "") + + def keys(self, db_name, pattern): + pattern = re.escape(pattern) + pattern = pattern.replace("\\*", ".*") + filtered_keys = [] + all_keys = self.redis.data[db_name].keys() + for key in all_keys: + if re.match(pattern, key): + filtered_keys.append(key) + return filtered_keys + + def get_all(self, db_name, key): + return self.redis.data.get(db_name, {}).get(key, {}) + + def set(self, db_name, key, field, value, blocking=True): + self.redis.data[db_name][key][field] = value + + def hmset(self, db_name, key, hash): + self.redis.data[db_name][key] = hash + + def hexists(self, db_name, key, field): + if key in self.redis.data[db_name]: + return True + else: + return False + + def exists(self, db_name, key): + if key in self.redis.data[db_name]: + return True + else: + return False + + def get_redis_client(self, db_name): + return MockClient(db_name) + +class MockClient(object): + def __init__(self, db_name): + self.redis = RedisSingleton.getInstance() + self.db_name = db_name + + def hdel(self, key, field): + try: + del self.redis.data[self.db_name][key][field] + except: + continue + + def hset(self, key, field, value): + try: + self.redis.data[self.db_name][key][field] = value + except: + continue \ No newline at end of file diff --git a/tests/auto_techsupport_tests/techsupport_cleanup_test.py b/tests/auto_techsupport_tests/techsupport_cleanup_test.py new file mode 100644 index 0000000000..bcff647271 --- /dev/null +++ b/tests/auto_techsupport_tests/techsupport_cleanup_test.py @@ -0,0 +1,87 @@ +import os +import sys +import pytest +import pyfakefs +import unittest +from pyfakefs.fake_filesystem_unittest import Patcher +from utilities_common.general import load_module_from_source + +from .mock_tables import dbconnector + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, 'scripts') +sys.path.insert(0, modules_path) + +# Load the file under test +script_path = os.path.join(scripts_path, 'techsupport_cleanup') +ts_mod = load_module_from_source('techsupport_cleanup', script_path) + +ts_mod.SonicV2Connector = dbconnector.SonicV2Connector +redis_handle = dbconnector.SonicV2Connector(host="127.0.0.1") + +""" +AUTO_TS = "AUTO_TECHSUPPORT|global" +CFG_DB = "CONFIG_DB" +CFG_STATE = "state" +CFG_MAX_TS = "max_techsupport_size" +TS_DIR = "/var/dump" +TS_PTRN = "sonic_dump_*.tar*" +TIME_BUF = 20 + +# State DB Attributes +STATE_DB = "STATE_DB" +TS_MAP = "AUTO_TECHSUPPORT|TS_CORE_MAP" +""" + +class TestTechsupportCreationEvent(unittest.TestCase): + + def setUp(self): + self.orig_time_buf = ts_mod.TIME_BUF + ts_mod.TIME_BUF = 1 # Patch the buf to 1 sec + redis_handle.connect("CONFIG_DB") + redis_handle.connect("STATE_DB") + + def tearDown(self): + ts_mod.TIME_BUF = self.orig_time_buf + + def set_auto_ts_cfg(self, **kwargs): + state = kwargs[ts_mod.CFG_STATE] if ts_mod.CFG_STATE in kwargs else "disabled" + max_ts = kwargs[ts_mod.CFG_MAX_TS] if ts_mod.CFG_MAX_TS in kwargs else "0" + redis_handle.set(ts_mod.CFG_DB, ts_mod.AUTO_TS, ts_mod.CFG_STATE, state) + redis_handle.set(ts_mod.CFG_DB, ts_mod.AUTO_TS, ts_mod.CFG_MAX_TS, max_ts) + print("state: {}, max_techsupport_size: {}".format(state, max_ts)) + + def test_no_cleanup(self): + self.set_auto_ts_cfg(state="enabled", max_techsupport_size="10") + with Patcher() as patcher: + patcher.fs.set_disk_usage(1000, path="/var/dump/") + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz", st_size=30) + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz", st_size=30) + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=30) + ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz") + current_fs = os.listdir(ts_mod.TS_DIR) + print(current_fs) + assert len(current_fs) == 3 + assert "sonic_dump_random1.tar.gz" in current_fs + assert "sonic_dump_random2.tar.gz" in current_fs + assert "sonic_dump_random3.tar.gz" in current_fs + + def test_dump_cleanup(self): + self.set_auto_ts_cfg(state="enabled", max_techsupport_size="5") + with Patcher() as patcher: + patcher.fs.set_disk_usage(1000, path="/var/dump/") + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz", st_size=25) + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz", st_size=25) + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=25) + ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz") + current_fs = os.listdir(ts_mod.TS_DIR) + print(current_fs) + assert len(current_fs) == 2 + assert "sonic_dump_random1.tar.gz" not in current_fs + assert "sonic_dump_random2.tar.gz" in current_fs + assert "sonic_dump_random3.tar.gz" in current_fs + + + + \ No newline at end of file From 104a412511f5c245a7e71b81853dfe85ed27dbc9 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Sun, 8 Aug 2021 03:23:51 +0000 Subject: [PATCH 02/60] TechSupport Tests Completed Signed-off-by: Vivek Reddy Karri --- scripts/techsupport_cleanup | 16 +-- tests/auto_techsupport_tests/__init__.py | 0 .../coredump_gen_handler_test.py | 20 ++-- .../shared_state_mock.py | 22 ++-- .../techsupport_cleanup_test.py | 104 ++++++++++++------ 5 files changed, 99 insertions(+), 63 deletions(-) create mode 100644 tests/auto_techsupport_tests/__init__.py diff --git a/scripts/techsupport_cleanup b/scripts/techsupport_cleanup index d43dfd1e60..84758127b3 100644 --- a/scripts/techsupport_cleanup +++ b/scripts/techsupport_cleanup @@ -16,18 +16,12 @@ import shutil from swsscommon.swsscommon import SonicV2Connector from utilities_common.auto_techsupport_helper import * -def mock_clean_state_db_entries(removed_files, db): - all_keys = db.get_all(STATE_DB, TS_MAP) - for file in removed_files: - if file in all_keys: - del all_keys[file] - for key in all_keys: - db.set(STATE_DB, TS_MAP, key, all_keys[key]) - def clean_state_db_entries(removed_files, db): + if not removed_files: + return db_conn = db.get_redis_client(STATE_DB) for file in removed_files: - db_conn.hdel(TS_MAP, file) + db_conn.hdel(TS_MAP, os.path.basename(file)) def handle_techsupport_creation_event(dump_name): file_path = os.path.join(TS_DIR, dump_name) @@ -39,13 +33,11 @@ def handle_techsupport_creation_event(dump_name): db = SonicV2Connector(host="127.0.0.1") db.connect(CFG_DB) db.connect(STATE_DB) - - print("STATE: ", db.get(CFG_DB, AUTO_TS, CFG_STATE)) + if db.get(CFG_DB, AUTO_TS, CFG_STATE) != "enabled": return max_ts = db.get("CONFIG_DB", AUTO_TS, CFG_MAX_TS) - print("Max TS ", max_ts) if max_ts: max_ts = int(max_ts) else: diff --git a/tests/auto_techsupport_tests/__init__.py b/tests/auto_techsupport_tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/auto_techsupport_tests/coredump_gen_handler_test.py b/tests/auto_techsupport_tests/coredump_gen_handler_test.py index 71bc2f8c04..ec35757481 100644 --- a/tests/auto_techsupport_tests/coredump_gen_handler_test.py +++ b/tests/auto_techsupport_tests/coredump_gen_handler_test.py @@ -1,18 +1,24 @@ import os import sys -import pytest -import pyfakefs -from unittest import mock +import pyfakefs +import unittest from pyfakefs.fake_filesystem_unittest import Patcher +from swsscommon import swsscommon +from .shared_state_mock import RedisSingleton, MockConn from utilities_common.general import load_module_from_source -from .mock_tables import dbconnector +# Mock the SonicV2Connector +swsscommon.SonicV2Connector = MockConn -test_path = os.path.dirname(os.path.abspath(__file__)) -modules_path = os.path.dirname(test_path) +curr_test_path = os.path.abspath(os.path.join(os.path.abspath(__file__), "../")) +test_dir_path = os.path.dirname(curr_test_path) +modules_path = os.path.dirname(test_dir_path) scripts_path = os.path.join(modules_path, 'scripts') sys.path.insert(0, modules_path) # Load the file under test script_path = os.path.join(scripts_path, 'coredump_gen_handler') -cdump_handle = load_module_from_source('coredump_gen_handler', script_path) \ No newline at end of file +cdump_handle = load_module_from_source('coredump_gen_handler', script_path) + +# Mock Handle to the data inside the Redis +RedisHandle = RedisSingleton.getInstance() \ No newline at end of file diff --git a/tests/auto_techsupport_tests/shared_state_mock.py b/tests/auto_techsupport_tests/shared_state_mock.py index da96f2d638..591f2dc6db 100644 --- a/tests/auto_techsupport_tests/shared_state_mock.py +++ b/tests/auto_techsupport_tests/shared_state_mock.py @@ -1,13 +1,12 @@ import re -from swsscommon.swsscommon import SonicDBConfig class RedisSingleton: - "Introduced to modify/check Redis DB's data outside of the scripts" + """ + Introduced to modify/check Redis DB's data outside of the scripts + Usage: Clear and Set the state of the mock before every test case + """ __instance = None - - seperator_map = {"APPL_DB" : ":", - "ASIC_DB" : ":"} - + @staticmethod def getInstance(): """ Static access method. """ @@ -29,12 +28,15 @@ def __init__(self): RedisSingleton.__instance = self class MockConn(object): - + """ + SonicV2Connector Mock for the usecases to verify/modify the Redis State outside + of the scope of the connector class + """ def __init__(self, host): self.redis = RedisSingleton.getInstance() def connect(self, db_name): - if db_name not in self.redis: + if db_name not in self.redis.data: self.redis.data[db_name] = {} def get(self, db_name, key, field): @@ -83,10 +85,10 @@ def hdel(self, key, field): try: del self.redis.data[self.db_name][key][field] except: - continue + pass def hset(self, key, field, value): try: self.redis.data[self.db_name][key][field] = value except: - continue \ No newline at end of file + pass \ No newline at end of file diff --git a/tests/auto_techsupport_tests/techsupport_cleanup_test.py b/tests/auto_techsupport_tests/techsupport_cleanup_test.py index bcff647271..1060a5806c 100644 --- a/tests/auto_techsupport_tests/techsupport_cleanup_test.py +++ b/tests/auto_techsupport_tests/techsupport_cleanup_test.py @@ -1,15 +1,18 @@ import os import sys -import pytest import pyfakefs import unittest from pyfakefs.fake_filesystem_unittest import Patcher +from swsscommon import swsscommon +from .shared_state_mock import RedisSingleton, MockConn from utilities_common.general import load_module_from_source -from .mock_tables import dbconnector +# Mock the SonicV2Connector +swsscommon.SonicV2Connector = MockConn -test_path = os.path.dirname(os.path.abspath(__file__)) -modules_path = os.path.dirname(test_path) +curr_test_path = os.path.abspath(os.path.join(os.path.abspath(__file__), "../")) +test_dir_path = os.path.dirname(curr_test_path) +modules_path = os.path.dirname(test_dir_path) scripts_path = os.path.join(modules_path, 'scripts') sys.path.insert(0, modules_path) @@ -17,43 +20,50 @@ script_path = os.path.join(scripts_path, 'techsupport_cleanup') ts_mod = load_module_from_source('techsupport_cleanup', script_path) -ts_mod.SonicV2Connector = dbconnector.SonicV2Connector -redis_handle = dbconnector.SonicV2Connector(host="127.0.0.1") +# Mock Handle to the data inside the Redis +RedisHandle = RedisSingleton.getInstance() -""" -AUTO_TS = "AUTO_TECHSUPPORT|global" -CFG_DB = "CONFIG_DB" -CFG_STATE = "state" -CFG_MAX_TS = "max_techsupport_size" -TS_DIR = "/var/dump" -TS_PTRN = "sonic_dump_*.tar*" -TIME_BUF = 20 - -# State DB Attributes -STATE_DB = "STATE_DB" -TS_MAP = "AUTO_TECHSUPPORT|TS_CORE_MAP" -""" +def set_auto_ts_cfg(**kwargs): + state = kwargs[ts_mod.CFG_STATE] if ts_mod.CFG_STATE in kwargs else "disabled" + max_ts = kwargs[ts_mod.CFG_MAX_TS] if ts_mod.CFG_MAX_TS in kwargs else "0" + RedisHandle.data[ts_mod.CFG_DB] = {ts_mod.AUTO_TS : {ts_mod.CFG_STATE : state, ts_mod.CFG_MAX_TS : max_ts}} class TestTechsupportCreationEvent(unittest.TestCase): def setUp(self): self.orig_time_buf = ts_mod.TIME_BUF - ts_mod.TIME_BUF = 1 # Patch the buf to 1 sec - redis_handle.connect("CONFIG_DB") - redis_handle.connect("STATE_DB") + ts_mod.TIME_BUF = 0.5 # Patch the buf to 1 sec def tearDown(self): ts_mod.TIME_BUF = self.orig_time_buf - def set_auto_ts_cfg(self, **kwargs): - state = kwargs[ts_mod.CFG_STATE] if ts_mod.CFG_STATE in kwargs else "disabled" - max_ts = kwargs[ts_mod.CFG_MAX_TS] if ts_mod.CFG_MAX_TS in kwargs else "0" - redis_handle.set(ts_mod.CFG_DB, ts_mod.AUTO_TS, ts_mod.CFG_STATE, state) - redis_handle.set(ts_mod.CFG_DB, ts_mod.AUTO_TS, ts_mod.CFG_MAX_TS, max_ts) - print("state: {}, max_techsupport_size: {}".format(state, max_ts)) + def test_no_cleanup_state_disabled(self): + """ + Scenario: AUTO_TECHSUPPORT is disabled. + Check no cleanup is performed, even though the techsupport limit is already crossed + """ + RedisSingleton.clearState() + set_auto_ts_cfg(max_techsupport_size="5") + with Patcher() as patcher: + patcher.fs.set_disk_usage(1000, path="/var/dump/") + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz", st_size=30) + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz", st_size=30) + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=30) + ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz") + current_fs = os.listdir(ts_mod.TS_DIR) + print(current_fs) + assert len(current_fs) == 3 + assert "sonic_dump_random1.tar.gz" in current_fs + assert "sonic_dump_random2.tar.gz" in current_fs + assert "sonic_dump_random3.tar.gz" in current_fs - def test_no_cleanup(self): - self.set_auto_ts_cfg(state="enabled", max_techsupport_size="10") + def test_no_cleanup_state_enabled(self): + """ + Scenario: AUTO_TECHSUPPORT is enabled. + Verify no cleanup is performed, as the techsupport limit haven't crossed yet + """ + RedisSingleton.clearState() + set_auto_ts_cfg(state="enabled", max_techsupport_size="10") with Patcher() as patcher: patcher.fs.set_disk_usage(1000, path="/var/dump/") patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz", st_size=30) @@ -68,7 +78,12 @@ def test_no_cleanup(self): assert "sonic_dump_random3.tar.gz" in current_fs def test_dump_cleanup(self): - self.set_auto_ts_cfg(state="enabled", max_techsupport_size="5") + """ + Scenario: AUTO_TECHSUPPORT is enabled. techsupport size limit is crosed + Verify Whether is cleanup is performed or not + """ + RedisSingleton.clearState() + set_auto_ts_cfg(state="enabled", max_techsupport_size="5") with Patcher() as patcher: patcher.fs.set_disk_usage(1000, path="/var/dump/") patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz", st_size=25) @@ -82,6 +97,27 @@ def test_dump_cleanup(self): assert "sonic_dump_random2.tar.gz" in current_fs assert "sonic_dump_random3.tar.gz" in current_fs - - - \ No newline at end of file + def test_state_db_update(self): + """ + Scenario: AUTO_TECHSUPPORT is enabled. techsupport size limit is crosed + Verify Whether is cleanup is performed and the state_db is updated + """ + RedisSingleton.clearState() + set_auto_ts_cfg(state="enabled", max_techsupport_size="5") + RedisHandle.data["STATE_DB"] = {} + RedisHandle.data["STATE_DB"][ts_mod.TS_MAP] = {"sonic_dump_random1.tar.gz" : "orchagent;1575985", + "sonic_dump_random2.tar.gz" : "syncd;1575988"} + with Patcher() as patcher: + patcher.fs.set_disk_usage(1000, path="/var/dump/") + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz", st_size=25) + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz", st_size=25) + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=25) + ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz") + current_fs = os.listdir(ts_mod.TS_DIR) + print(current_fs) + assert len(current_fs) == 2 + assert "sonic_dump_random1.tar.gz" not in current_fs + assert "sonic_dump_random2.tar.gz" in current_fs + assert "sonic_dump_random3.tar.gz" in current_fs + assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][ts_mod.TS_MAP] + assert "sonic_dump_random1.tar.gz" not in RedisHandle.data["STATE_DB"][ts_mod.TS_MAP] \ No newline at end of file From 6bcfb5d8ebb9c4f23e90fdaf75563a3a27325144 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Sun, 8 Aug 2021 03:29:06 +0000 Subject: [PATCH 03/60] auto_techsupport helper added Signed-off-by: Vivek Reddy Karri --- utilities_common/auto_techsupport_helper.py | 132 ++++++++++++++++++++ 1 file changed, 132 insertions(+) create mode 100644 utilities_common/auto_techsupport_helper.py diff --git a/utilities_common/auto_techsupport_helper.py b/utilities_common/auto_techsupport_helper.py new file mode 100644 index 0000000000..e82da3c1b8 --- /dev/null +++ b/utilities_common/auto_techsupport_helper.py @@ -0,0 +1,132 @@ +import os, re +import sys +import glob +import time +import argparse +import subprocess +import shutil +import math +import syslog + +AUTO_TS = "AUTO_TECHSUPPORT|global" +CFG_DB = "CONFIG_DB" +CFG_STATE = "state" +CFG_MAX_TS = "max_techsupport_size" +COOLOFF = "cooloff" +CFG_CORE_USAGE = "core_usage" + +CORE_DUMP_DIR = "/var/core" +CORE_DUMP_PTRN = "*.core.gz" + +TS_DIR = "/var/dump" +TS_PTRN = "sonic_dump_*.tar*" + +# FEATURE Table fields +FEATURE = "FEATURE|{}" +TS = "auto_techsupport" + +# State DB Attributes +STATE_DB = "STATE_DB" +TS_MAP = "AUTO_TECHSUPPORT|TS_CORE_MAP" + +TIME_BUF = 20 + +##### Helper methods +def subprocess_exec(cmd): + output = subprocess.run( + cmd, + capture_output=True, + text=True + ) + return output.returncode, output.stdout, output.stderr + +def get_ts_dumps(full_path=False): + """ Get the list of TS dumps in the TS_DIR, sorted by the creation time """ + curr_list = glob.glob(os.path.join(TS_DIR, TS_PTRN)) + curr_list.sort(key=os.path.getmtime) + if full_path: + return curr_list + return [os.path.basename(name) for name in curr_list] + +def verify_recent_file_creation(file_path, in_last_sec=TIME_BUF): + """ Verify if the file exists and is created within the last TIME_BUF sec """ + curr = time.time() + try: + was_created_on = os.path.getmtime(file_path) + except: + return False + if curr - was_created_on < in_last_sec: + return True + else: + return False + +def get_stats(ptrn, collect_stats=True): + """ + Returns the size of the files (matched by the ptrn) occupied. + Also returns the list of files Sorted by the Descending order of creation time & size + """ + files = glob.glob(ptrn) + file_stats = [] + total_size = 0 + for file in files: + file_size = os.path.getsize(file) + if collect_stats: + file_stats.append((os.path.getmtime(file), file_size, file)) + total_size += file_size + if collect_stats: + # Sort by the Descending order of file_creation_time, size_of_file + file_stats = sorted(file_stats, key = lambda sub: (-sub[0], sub[1], sub[2])) + return (file_stats, total_size) + +def pretty_size(bytes): + """Get human-readable file sizes""" + UNITS_MAPPING = [ + (1<<50, ' PB'), + (1<<40, ' TB'), + (1<<30, ' GB'), + (1<<20, ' MB'), + (1<<10, ' KB'), + (1, (' byte', ' bytes')), + ] + for factor, suffix in UNITS_MAPPING: + if bytes >= factor: + break + amount = int(bytes / factor) + + if isinstance(suffix, tuple): + singular, multiple = suffix + if amount == 1: + suffix = singular + else: + suffix = multiple + return str(amount) + suffix + +def cleanup_process(limit, file_ptrn, dir): + """Deletes the oldest files incrementally until the size is under limit""" + print("---- Reached Here ------ ") + if not(1 <= limit and limit <= 100): + syslog.syslog(syslog.LOG_ERR, "core_usage_limit can only be between 1 and 100, whereas the configured value is: {}".format(limit)) + return + + fs_stats, curr_size = get_stats(os.path.join(dir, file_ptrn)) + orig_dumps = len(fs_stats) + disk_stats = shutil.disk_usage(dir) + max_limit_bytes = math.floor((limit*disk_stats.total/100)) + + if curr_size <= max_limit_bytes: + return + + num_bytes_to_del = curr_size - max_limit_bytes + num_deleted = 0 + removed_files = [] + # Preserve the latest file created + while num_deleted < num_bytes_to_del and len(fs_stats) > 1: + stat = fs_stats.pop() + try: + os.remove(stat[2]) + removed_files.append(stat[2]) + except OSError as error: + continue + num_deleted += stat[1] + syslog.syslog(syslog.LOG_INFO, "{} deleted from {}".format(pretty_size(num_deleted), dir)) + return removed_files \ No newline at end of file From 6b5ba3fbad76fc0f57899cfcd1427d048a043f1e Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Sun, 8 Aug 2021 05:37:53 +0000 Subject: [PATCH 04/60] coredump_gen_script in progress Signed-off-by: Vivek Reddy Karri --- scripts/coredump_gen_handler | 38 +++++---- .../coredump_gen_handler_test.py | 82 ++++++++++++++++++- .../shared_state_mock.py | 2 + .../techsupport_cleanup_test.py | 4 +- 4 files changed, 107 insertions(+), 19 deletions(-) diff --git a/scripts/coredump_gen_handler b/scripts/coredump_gen_handler index 3e27c1f04a..c5f0ef8a21 100644 --- a/scripts/coredump_gen_handler +++ b/scripts/coredump_gen_handler @@ -24,7 +24,7 @@ class CoreDumpCreateHandle(): self.core_ts_map = {} self.curr_ts_list = [] - def handle_techsupport_creation_event(self): + def handle_core_dump_creation_event(self): file_path = os.path.join(CORE_DUMP_DIR, self.core_name) if not verify_recent_file_creation(file_path): return @@ -32,19 +32,20 @@ class CoreDumpCreateHandle(): self.db = SonicV2Connector(host="127.0.0.1") self.db.connect(CFG_DB) self.db.connect(STATE_DB) - if cfg_db.get(CFG_DB, AUTO_TS, CFG_STATE) != "enabled": - sys.exit() + if self.db.get(CFG_DB, AUTO_TS, CFG_STATE) != "enabled": + return self.fetch_critical_procs() proc = self.core_name.split(".")[0] if proc not in self.proc_mp: - sys.exit() # Only handles the critical processes + print(self.proc_mp) + return # Only handles the critical processes FEATURE_KEY = FEATURE.format(self.proc_mp[proc]) - if cfg_db.get(CFG_DB, FEATURE_KEY, TS) != "enabled": - sys.exit() # Should be set "enabled" in the FEATURE Table + if self.db.get(CFG_DB, FEATURE_KEY, TS) != "enabled": + return # Should be set "enabled" in the FEATURE Table - global_cooloff = cfg_db.get(CFG_DB, AUTO_TS, COOLOFF) - proc_cooloff = cfg_db.get(CFG_DB, FEATURE_KEY, COOLOFF) + global_cooloff = self.db.get(CFG_DB, AUTO_TS, COOLOFF) + proc_cooloff = self.db.get(CFG_DB, FEATURE_KEY, COOLOFF) cooloff_passed = self.verify_cooloff(global_cooloff, proc_cooloff, proc) if cooloff_passed: @@ -53,9 +54,13 @@ class CoreDumpCreateHandle(): self.db.set(STATE_DB, TS_MAP, new_file[0], "{};{}".format(self.core_name, int(time.time()))) core_usage = 0 - if cfg_db.hexists(CFG_DB, AUTO_TS, CFG_CORE_USAGE): - core_usage = cfg_db.get(CFG_DB, AUTO_TS, CFG_CORE_USAGE) - + if self.db.hexists(CFG_DB, AUTO_TS, CFG_CORE_USAGE): + core_usage = self.db.get(CFG_DB, AUTO_TS, CFG_CORE_USAGE) + try: + core_usage = int(core_usage) + except: + core_usage = 0 + if core_usage == 0: _ , num_bytes = get_stats(os.path.join(CORE_DUMP_DIR, CORE_DUMP_PTRN)) syslog.syslog(syslog.LOG_INFO, "No Cleanup is performed, current size occupied: {}".format(pretty_size(num_bytes))) @@ -77,7 +82,8 @@ class CoreDumpCreateHandle(): def verify_cooloff(self, global_cooloff, proc_cooloff, proc): """Verify both the global cooloff and per-proc cooloff has passed""" self.curr_ts_list = get_ts_dumps(True) - if len(curr_list) != 0: + if global_cooloff and len(self.curr_ts_list) != 0: + global_cooloff = int(global_cooloff) last_ts_dump_creation = os.path.getmtime(self.curr_ts_list[-1]) if time.time() - last_ts_dump_creation < global_cooloff: syslog.syslog(syslog.LOG_INFO, "Cooloff period has not yet passed. No Techsupport Invocation is performed ") @@ -86,8 +92,9 @@ class CoreDumpCreateHandle(): ts_map = self.db.get_all(STATE_DB, TS_MAP) self.parse_ts_map(ts_map) - if proc in self.core_ts_map: + if proc_cooloff and proc in self.core_ts_map: last_creation_time = self.core_ts_map[proc][0] + proc_cooloff = int(proc_cooloff) if time.time() - last_creation_time < proc_cooloff: syslog.syslog(syslog.LOG_INFO, "Cooloff period for {} prcess has not yet passed. No Techsupport Invocation is performed".format(proc)) return False @@ -106,9 +113,10 @@ class CoreDumpCreateHandle(): def fetch_critical_procs(self): """Fetches the critical_procs and corresponding docker names""" keys = self.db.keys(CFG_DB, FEATURE.format("*")) - containers = [key.split()[-1] for key in keys] + containers = [key.split("|")[-1] for key in keys] + print(keys, containers) for container in containers: - rc, stdout, _ = subprocess_exec("docker exec -t {} cat /etc/supervisor/critical_processes".format(docker)) + rc, stdout, _ = subprocess_exec("docker exec -t {} cat /etc/supervisor/critical_processes".format(container)) if rc != 0: continue procs = stdout.split() diff --git a/tests/auto_techsupport_tests/coredump_gen_handler_test.py b/tests/auto_techsupport_tests/coredump_gen_handler_test.py index ec35757481..cb211b54b0 100644 --- a/tests/auto_techsupport_tests/coredump_gen_handler_test.py +++ b/tests/auto_techsupport_tests/coredump_gen_handler_test.py @@ -18,7 +18,85 @@ # Load the file under test script_path = os.path.join(scripts_path, 'coredump_gen_handler') -cdump_handle = load_module_from_source('coredump_gen_handler', script_path) +cdump_mod = load_module_from_source('coredump_gen_handler', script_path) # Mock Handle to the data inside the Redis -RedisHandle = RedisSingleton.getInstance() \ No newline at end of file +RedisHandle = RedisSingleton.getInstance() + +def set_auto_ts_cfg(**kwargs): + state = kwargs[cdump_mod.CFG_STATE] if cdump_mod.CFG_STATE in kwargs else "disabled" + cooloff = kwargs[cdump_mod.COOLOFF] if cdump_mod.COOLOFF in kwargs else "0" + core_usage = kwargs[cdump_mod.CFG_CORE_USAGE] if cdump_mod.CFG_CORE_USAGE in kwargs else "0" + if cdump_mod.CFG_DB not in RedisHandle.data: + RedisHandle.data[cdump_mod.CFG_DB] = {} + RedisHandle.data[cdump_mod.CFG_DB][cdump_mod.AUTO_TS] = {cdump_mod.CFG_STATE : state, + cdump_mod.COOLOFF : cooloff, + cdump_mod.CFG_CORE_USAGE : core_usage} + +def set_feature_table_cfg(ts_swss="disabled", ts_syncd="disabled", cooloff_swss="0", cooloff_syncd="0"): + if cdump_mod.CFG_DB not in RedisHandle.data: + RedisHandle.data[cdump_mod.CFG_DB] = {} + RedisHandle.data[cdump_mod.CFG_DB][cdump_mod.FEATURE.format("swss")] = {cdump_mod.TS : ts_swss, + cdump_mod.COOLOFF : cooloff_swss} + RedisHandle.data[cdump_mod.CFG_DB][cdump_mod.FEATURE.format("syncd")] = {cdump_mod.TS : ts_syncd, + cdump_mod.COOLOFF : cooloff_syncd} + +swss_critical_proc = """\ +program:orchagent +program:portsyncd +program:neighsyncd +program:vlanmgrd +program:intfmgrd +program:portmgrd +program:buffermgrd +program:vrfmgrd +program:nbrmgrd +program:vxlanmgrd +""" + +syncd_critical_proc = """\ +program:syncd +""" + +def mock_generic_cmd(cmd): + if "docker exec -t swss cat /etc/supervisor/critical_processes" in cmd: + return 0, swss_critical_proc, "" + elif "docker exec -t syncd cat /etc/supervisor/critical_processes" in cmd: + return 0, syncd_critical_proc, "" + else: + print("ERR: Invalid Command Invoked: " + cmd) + return 1, "", "Invalid Command: " + +class TestCoreDumpCreationEvent(unittest.TestCase): + + def setUp(self): + self.orig_time_buf = cdump_mod.TIME_BUF + cdump_mod.TIME_BUF = 0.5 # Patch the buf + + def tearDown(self): + cdump_mod.TIME_BUF = self.orig_time_buf + + def test_invoc_ts_without_cooloff(self): + """ + Scenario: AUTO_TECHSUPPORT is enabled. No gloal Cooloff and per process cooloff specified + Check if techsupport is invoked and file is created + """ + RedisSingleton.clearState() + set_auto_ts_cfg(state="enabled") + set_feature_table_cfg(ts_swss="enabled") + with Patcher() as patcher: + def mock_cmd(cmd): + cmd_str = " ".join(cmd) + if "show techsupport" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random999.tar.gz") + else: + return mock_generic_cmd(cmd) + return 0, "", "" + cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/dump/sonic_dump_random998.tar.gz") + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") + cls = cdump_mod.CoreDumpCreateHandle("orchagent.12345.123.core.gz") + cls.handle_core_dump_creation_event() + assert "sonic_dump_random999.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random998.tar.gz" in os.listdir(cdump_mod.TS_DIR) + \ No newline at end of file diff --git a/tests/auto_techsupport_tests/shared_state_mock.py b/tests/auto_techsupport_tests/shared_state_mock.py index 591f2dc6db..f36b57e14d 100644 --- a/tests/auto_techsupport_tests/shared_state_mock.py +++ b/tests/auto_techsupport_tests/shared_state_mock.py @@ -56,6 +56,8 @@ def get_all(self, db_name, key): return self.redis.data.get(db_name, {}).get(key, {}) def set(self, db_name, key, field, value, blocking=True): + if key not in self.redis.data[db_name]: + self.redis.data[db_name][key] = {} self.redis.data[db_name][key][field] = value def hmset(self, db_name, key, hash): diff --git a/tests/auto_techsupport_tests/techsupport_cleanup_test.py b/tests/auto_techsupport_tests/techsupport_cleanup_test.py index 1060a5806c..ce4119aa04 100644 --- a/tests/auto_techsupport_tests/techsupport_cleanup_test.py +++ b/tests/auto_techsupport_tests/techsupport_cleanup_test.py @@ -25,9 +25,9 @@ def set_auto_ts_cfg(**kwargs): state = kwargs[ts_mod.CFG_STATE] if ts_mod.CFG_STATE in kwargs else "disabled" - max_ts = kwargs[ts_mod.CFG_MAX_TS] if ts_mod.CFG_MAX_TS in kwargs else "0" + max_ts = kwargs[ts_mod.COOLOFF] if ts_mod.COOLOFF in kwargs else "0" RedisHandle.data[ts_mod.CFG_DB] = {ts_mod.AUTO_TS : {ts_mod.CFG_STATE : state, ts_mod.CFG_MAX_TS : max_ts}} - + class TestTechsupportCreationEvent(unittest.TestCase): def setUp(self): From 843d329a504d0b2efb6ce1b60e127ebb4ebf59c8 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Mon, 9 Aug 2021 07:56:54 +0000 Subject: [PATCH 05/60] CoredumpHandler UT's completd and script fixed Signed-off-by: Vivek Reddy Karri --- scripts/coredump_gen_handler | 13 +- .../coredump_gen_handler_test.py | 220 ++++++++++++++++-- 2 files changed, 212 insertions(+), 21 deletions(-) diff --git a/scripts/coredump_gen_handler b/scripts/coredump_gen_handler index c5f0ef8a21..198b31d8f6 100644 --- a/scripts/coredump_gen_handler +++ b/scripts/coredump_gen_handler @@ -51,7 +51,7 @@ class CoreDumpCreateHandle(): if cooloff_passed: new_file = self.invoke_ts_cmd() if new_file: - self.db.set(STATE_DB, TS_MAP, new_file[0], "{};{}".format(self.core_name, int(time.time()))) + self.db.set(STATE_DB, TS_MAP, os.path.basename(new_file[0]), "{};{}".format(self.core_name, int(time.time()))) core_usage = 0 if self.db.hexists(CFG_DB, AUTO_TS, CFG_CORE_USAGE): @@ -83,9 +83,11 @@ class CoreDumpCreateHandle(): """Verify both the global cooloff and per-proc cooloff has passed""" self.curr_ts_list = get_ts_dumps(True) if global_cooloff and len(self.curr_ts_list) != 0: - global_cooloff = int(global_cooloff) + global_cooloff = float(global_cooloff) last_ts_dump_creation = os.path.getmtime(self.curr_ts_list[-1]) + print(last_ts_dump_creation, global_cooloff, time.time()) if time.time() - last_ts_dump_creation < global_cooloff: + print("Reached Here!!!@") syslog.syslog(syslog.LOG_INFO, "Cooloff period has not yet passed. No Techsupport Invocation is performed ") return False @@ -93,8 +95,8 @@ class CoreDumpCreateHandle(): self.parse_ts_map(ts_map) if proc_cooloff and proc in self.core_ts_map: - last_creation_time = self.core_ts_map[proc][0] - proc_cooloff = int(proc_cooloff) + last_creation_time = self.core_ts_map[proc][0][0] + proc_cooloff = float(proc_cooloff) if time.time() - last_creation_time < proc_cooloff: syslog.syslog(syslog.LOG_INFO, "Cooloff period for {} prcess has not yet passed. No Techsupport Invocation is performed".format(proc)) return False @@ -102,7 +104,8 @@ class CoreDumpCreateHandle(): def parse_ts_map(self, ts_map): """Create core_dump, ts_dump & creation_time map""" - for ts_dump, tup in ts_map: + print(ts_map) + for ts_dump, tup in ts_map.items(): core_dump, creation_time = tup.split(";") if core_dump not in self.core_ts_map: self.core_ts_map[core_dump] = [] diff --git a/tests/auto_techsupport_tests/coredump_gen_handler_test.py b/tests/auto_techsupport_tests/coredump_gen_handler_test.py index cb211b54b0..30ede6b4ea 100644 --- a/tests/auto_techsupport_tests/coredump_gen_handler_test.py +++ b/tests/auto_techsupport_tests/coredump_gen_handler_test.py @@ -1,4 +1,4 @@ -import os +import os, time import sys import pyfakefs import unittest @@ -33,13 +33,11 @@ def set_auto_ts_cfg(**kwargs): cdump_mod.COOLOFF : cooloff, cdump_mod.CFG_CORE_USAGE : core_usage} -def set_feature_table_cfg(ts_swss="disabled", ts_syncd="disabled", cooloff_swss="0", cooloff_syncd="0"): +def set_feature_table_cfg(ts="disabled", cooloff="0", container_name="swss"): if cdump_mod.CFG_DB not in RedisHandle.data: RedisHandle.data[cdump_mod.CFG_DB] = {} - RedisHandle.data[cdump_mod.CFG_DB][cdump_mod.FEATURE.format("swss")] = {cdump_mod.TS : ts_swss, - cdump_mod.COOLOFF : cooloff_swss} - RedisHandle.data[cdump_mod.CFG_DB][cdump_mod.FEATURE.format("syncd")] = {cdump_mod.TS : ts_syncd, - cdump_mod.COOLOFF : cooloff_syncd} + RedisHandle.data[cdump_mod.CFG_DB][cdump_mod.FEATURE.format(container_name)] = {cdump_mod.TS : ts, + cdump_mod.COOLOFF : cooloff} swss_critical_proc = """\ program:orchagent @@ -68,22 +66,15 @@ def mock_generic_cmd(cmd): return 1, "", "Invalid Command: " class TestCoreDumpCreationEvent(unittest.TestCase): - - def setUp(self): - self.orig_time_buf = cdump_mod.TIME_BUF - cdump_mod.TIME_BUF = 0.5 # Patch the buf - - def tearDown(self): - cdump_mod.TIME_BUF = self.orig_time_buf - + def test_invoc_ts_without_cooloff(self): """ - Scenario: AUTO_TECHSUPPORT is enabled. No gloal Cooloff and per process cooloff specified + Scenario: AUTO_TECHSUPPORT is enabled. No global Cooloff and per process cooloff specified Check if techsupport is invoked and file is created """ RedisSingleton.clearState() set_auto_ts_cfg(state="enabled") - set_feature_table_cfg(ts_swss="enabled") + set_feature_table_cfg(ts="enabled") with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) @@ -99,4 +90,201 @@ def mock_cmd(cmd): cls.handle_core_dump_creation_event() assert "sonic_dump_random999.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random998.tar.gz" in os.listdir(cdump_mod.TS_DIR) + + def test_invoc_ts_state_db_update(self): + """ + Scenario: AUTO_TECHSUPPORT is enabled. No global Cooloff and per process cooloff specified + Check if techsupport is invoked, file is created and State DB in updated + """ + RedisSingleton.clearState() + set_auto_ts_cfg(state="enabled") + set_feature_table_cfg(ts="enabled") + RedisHandle.data["STATE_DB"] = {} + RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] = {"sonic_dump_random1.tar.gz" : "portsyncd;1575985", + "sonic_dump_random2.tar.gz" : "syncd;1575988"} + with Patcher() as patcher: + def mock_cmd(cmd): + cmd_str = " ".join(cmd) + if "show techsupport" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + else: + return mock_generic_cmd(cmd) + return 0, "", "" + cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") + cls = cdump_mod.CoreDumpCreateHandle("orchagent.12345.123.core.gz") + cls.handle_core_dump_creation_event() + assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random3.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random1.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + assert "sonic_dump_random3.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + assert "orchagent" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP]["sonic_dump_random3.tar.gz"] + + def test_global_cooloff(self): + """ + Scenario: AUTO_TECHSUPPORT is enabled. But global cooloff is not passed + Check if techsupport is not invoked + """ + RedisSingleton.clearState() + set_auto_ts_cfg(state="enabled", cooloff="1") + set_feature_table_cfg(ts="enabled") + RedisHandle.data["STATE_DB"] = {} + RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] = {"sonic_dump_random1.tar.gz" : "portsyncd;1575985", + "sonic_dump_random2.tar.gz" : "syncd;1575988"} + with Patcher() as patcher: + def mock_cmd(cmd): + cmd_str = " ".join(cmd) + if "show techsupport" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + else: + return mock_generic_cmd(cmd) + return 0, "", "" + cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") + cls = cdump_mod.CoreDumpCreateHandle("orchagent.12345.123.core.gz") + cls.handle_core_dump_creation_event() + assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random3.tar.gz" not in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random1.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + assert "sonic_dump_random3.tar.gz" not in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + + def test_per_proc_cooloff(self): + """ + Scenario: AUTO_TECHSUPPORT is enabled. Global Cooloff is passed but per process isn't + Check if techsupport is not invoked + """ + RedisSingleton.clearState() + set_auto_ts_cfg(state="enabled", cooloff="0.25") + set_feature_table_cfg(ts="enabled", cooloff="10") + RedisHandle.data["STATE_DB"] = {} + RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] = {"sonic_dump_random1.tar.gz" : "orchagent;{}".format(int(time.time())), + "sonic_dump_random2.tar.gz" : "syncd;1575988"} + with Patcher() as patcher: + def mock_cmd(cmd): + cmd_str = " ".join(cmd) + if "show techsupport" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + else: + return mock_generic_cmd(cmd) + return 0, "", "" + cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") + cls = cdump_mod.CoreDumpCreateHandle("orchagent.12345.123.core.gz") + time.sleep(0.5) # wait for cooloff to pass + cls.handle_core_dump_creation_event() + assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random3.tar.gz" not in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random1.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + assert "sonic_dump_random3.tar.gz" not in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + + def test_invoc_ts_after_cooloff(self): + """ + Scenario: AUTO_TECHSUPPORT is enabled. Global Cooloff is passed but per process isn't + Check if techsupport is not invoked + """ + RedisSingleton.clearState() + set_auto_ts_cfg(state="enabled", cooloff="0.1") + set_feature_table_cfg(ts="enabled", cooloff="0.5") + RedisHandle.data["STATE_DB"] = {} + RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] = {"sonic_dump_random1.tar.gz" : "orchagent;{}".format(int(time.time())), + "sonic_dump_random2.tar.gz" : "syncd;1575988"} + with Patcher() as patcher: + def mock_cmd(cmd): + cmd_str = " ".join(cmd) + if "show techsupport" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + else: + return mock_generic_cmd(cmd) + return 0, "", "" + cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") + cls = cdump_mod.CoreDumpCreateHandle("orchagent.12345.123.core.gz") + time.sleep(0.5) # wait for cooloff to pass + cls.handle_core_dump_creation_event() + assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random3.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random1.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + assert "sonic_dump_random3.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + assert "orchagent" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP]["sonic_dump_random3.tar.gz"] + + def test_non_critical_proc(self): + """ + Scenario: AUTO_TECHSUPPORT is enabled. A Non-critical Process dump is used to invoke this script + Check if techsupport is not invoked + """ + RedisSingleton.clearState() + set_auto_ts_cfg(state="enabled") + set_feature_table_cfg(ts="enabled") + RedisHandle.data["STATE_DB"] = {} + RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] = {"sonic_dump_random1.tar.gz" : "portsyncd;1575985", + "sonic_dump_random2.tar.gz" : "syncd;1575988"} + with Patcher() as patcher: + def mock_cmd(cmd): + cmd_str = " ".join(cmd) + if "show techsupport" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + else: + return mock_generic_cmd(cmd) + return 0, "", "" + cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") + patcher.fs.create_file("/var/core/snmpd.12345.123.core.gz") + cls = cdump_mod.CoreDumpCreateHandle("snmpd.12345.123.core.gz") + cls.handle_core_dump_creation_event() + assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random3.tar.gz" not in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random1.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + assert "sonic_dump_random3.tar.gz" not in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + + def test_feature_table_not_set(self): + """ + Scenario: AUTO_TECHSUPPORT is enabled. A critical Process dump is used to invoke this script + But it is not enabled in FEATURE|* table. Check if techsupport is not invoked + """ + RedisSingleton.clearState() + set_auto_ts_cfg(state="enabled") + set_feature_table_cfg(ts="disabled", cooloff="0.2", container_name="syncd") + RedisHandle.data["STATE_DB"] = {} + RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] = {"sonic_dump_random1.tar.gz" : "portsyncd;{}".format(int(time.time())), + "sonic_dump_random2.tar.gz" : "syncd;1575988"} + with Patcher() as patcher: + def mock_cmd(cmd): + cmd_str = " ".join(cmd) + if "show techsupport" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + else: + return mock_generic_cmd(cmd) + return 0, "", "" + cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") + patcher.fs.create_file("/var/core/portsyncd.12345.123.core.gz") + cls = cdump_mod.CoreDumpCreateHandle("portsyncd.12345.123.core.gz") + time.sleep(0.2) + cls.handle_core_dump_creation_event() + assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random3.tar.gz" not in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random1.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + assert "sonic_dump_random3.tar.gz" not in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] \ No newline at end of file From c89f15f43da792e0327061ec26b9d3b794c53fa9 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Mon, 9 Aug 2021 20:08:20 +0000 Subject: [PATCH 06/60] Added original Setup.py Signed-off-by: Vivek Reddy Karri --- setup.py | 24 ++++-------------------- 1 file changed, 4 insertions(+), 20 deletions(-) diff --git a/setup.py b/setup.py index 90d3ca4efd..5847b6e6ee 100644 --- a/setup.py +++ b/setup.py @@ -5,21 +5,9 @@ # under scripts/. Consider stop using scripts and use console_scripts instead # # https://stackoverflow.com/questions/18787036/difference-between-entry-points-console-scripts-and-scripts-in-setup-py -import fastentrypoints, sys +import fastentrypoints from setuptools import setup -from setuptools.command.test import test as TestCommand - -class PyTest(TestCommand): - user_options = [("pytest-args=", "a", "Arguments to pass to pytest")] - def initialize_options(self): - TestCommand.initialize_options(self) - self.pytest_args = "" - def run_tests(self): - import shlex - import pytest - errno = pytest.main(shlex.split(self.pytest_args)) - sys.exit(errno) setup( name='sonic-utilities', @@ -43,8 +31,6 @@ def run_tests(self): 'crm', 'debug', 'generic_config_updater', - 'dump', - 'dump.plugins', 'pfcwd', 'sfputil', 'ssdutil', @@ -85,8 +71,7 @@ def run_tests(self): 'filter_fdb_input/*', 'pfcwd_input/*', 'wm_input/*', - 'ecn_input/*', - 'dump_input/*'] + 'ecn_input/*'] }, scripts=[ 'scripts/aclshow', @@ -145,7 +130,8 @@ def run_tests(self): 'scripts/watermarkstat', 'scripts/watermarkcfg', 'scripts/sonic-kdump-config', - 'scripts/centralize_database' + 'scripts/centralize_database', + 'scripts/null_route_helper' ], entry_points={ 'console_scripts': [ @@ -156,7 +142,6 @@ def run_tests(self): 'counterpoll = counterpoll.main:cli', 'crm = crm.main:cli', 'debug = debug.main:cli', - 'dump = dump.main:dump', 'filter_fdb_entries = fdbutil.filter_fdb_entries:main', 'pfcwd = pfcwd.main:cli', 'sfputil = sfputil.main:cli', @@ -232,6 +217,5 @@ def run_tests(self): 'Topic :: Utilities', ], keywords='sonic SONiC utilities command line cli CLI', - cmdclass={"pytest": PyTest}, test_suite='setup.get_test_suite' ) From 4b1faa270ead0c6294a309523a4d46d61beeacd4 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Wed, 11 Aug 2021 08:05:58 +0000 Subject: [PATCH 07/60] Test Changes Signed-off-by: Vivek Reddy Karri --- scripts/coredump-compress | 2 + scripts/coredump_gen_handler | 22 +++++-- scripts/generate_dump | 15 ++++- scripts/techsupport_cleanup | 6 +- setup.py | 15 ++++- .../coredump_gen_handler_test.py | 66 +++++++++++++++---- .../shared_state_mock.py | 4 +- .../techsupport_cleanup_test.py | 18 ++--- utilities_common/auto_techsupport_helper.py | 3 +- 9 files changed, 111 insertions(+), 40 deletions(-) diff --git a/scripts/coredump-compress b/scripts/coredump-compress index 53381fc00e..783bc9e02d 100755 --- a/scripts/coredump-compress +++ b/scripts/coredump-compress @@ -15,3 +15,5 @@ if [ $# > 0 ]; then fi /bin/gzip -1 - > /var/core/${PREFIX}core.gz + +nohup /usr/local/bin/coredump_gen_handler ${PREFIX}core.gz & \ No newline at end of file diff --git a/scripts/coredump_gen_handler b/scripts/coredump_gen_handler index 198b31d8f6..e7582dcf8f 100644 --- a/scripts/coredump_gen_handler +++ b/scripts/coredump_gen_handler @@ -49,7 +49,8 @@ class CoreDumpCreateHandle(): cooloff_passed = self.verify_cooloff(global_cooloff, proc_cooloff, proc) if cooloff_passed: - new_file = self.invoke_ts_cmd() + since_cfg = self.get_since_arg() + new_file = self.invoke_ts_cmd(since_cfg) if new_file: self.db.set(STATE_DB, TS_MAP, os.path.basename(new_file[0]), "{};{}".format(self.core_name, int(time.time()))) @@ -68,8 +69,19 @@ class CoreDumpCreateHandle(): cleanup_process(core_usage, CORE_DUMP_PTRN, CORE_DUMP_DIR) - def invoke_ts_cmd(self): - _, out, _ = subprocess_exec(["show", "techsupport"]) + def get_since_arg(self): + since_cfg = self.db.get(CFG_DB, AUTO_TS, CFG_SINCE) + if not since_cfg: + return SINCE_DEFAULT + rc, _, _ = subprocess_exec(["date", "--date=\"{}\"".format(since_cfg)]) + if rc != 0: + return since_cfg + return SINCE_DEFAULT + + + def invoke_ts_cmd(self, since_cfg): + since_cfg = "\"" + since_cfg + "\"" + _, out, _ = subprocess_exec(["show", "techsupport", "--since", since_cfg]) new_list = get_ts_dumps(True) diff = list(set(new_list).difference(set(self.curr_ts_list))) self.curr_ts_list = new_list @@ -119,7 +131,7 @@ class CoreDumpCreateHandle(): containers = [key.split("|")[-1] for key in keys] print(keys, containers) for container in containers: - rc, stdout, _ = subprocess_exec("docker exec -t {} cat /etc/supervisor/critical_processes".format(container)) + rc, stdout, _ = subprocess_exec(["docker", "exec", "-t", container, "cat", "/etc/supervisor/critical_processes"]) if rc != 0: continue procs = stdout.split() @@ -131,7 +143,7 @@ def main(): parser = argparse.ArgumentParser(description='Auto Techsupport Invocation and CoreDump Mgmt Script') parser.add_argument('name', type=str, help='Core Dump Name', required=True) args = parser.parse_args() - syslog.openlog(logoption=syslog.LOG_PID) + syslog.openlog(logoption=syslog.LOG_PID) cls = CoreDumpCreateHandle() cls.handle_core_dump_creation_event(args.name) diff --git a/scripts/generate_dump b/scripts/generate_dump index bc33c0bcc8..c94d810e6e 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -1018,6 +1018,14 @@ save_counter_snapshot() { save_cmd_all_ns "ifconfig -a" "ifconfig.counters_$idx" } +handle_error() { + if [ "$1" != "0" ]; then + echo "ERR: RC:-$1 observed on line $2" >&2 + RETURN_CODE=1 + fi + return $1 +} + ############################################################################### # Main generate_dump routine # Globals: @@ -1028,6 +1036,7 @@ save_counter_snapshot() { # None ############################################################################### main() { + trap 'handle_error $? $LINENO' ERR local start_t=0 local end_t=0 if [ `whoami` != root ] && ! $NOOP; @@ -1208,7 +1217,9 @@ main() { echo "WARNING: gzip operation appears to have failed." >&2 fi fi - + + nohup /usr/local/bin/techsupport_cleanup ${TARFILE} & + echo ${TARFILE} } @@ -1269,7 +1280,7 @@ OPTIONS EOF } -while getopts ":xnvhzas:t:" opt; do +while getopts ":xnvhzas:t:" opt; do case $opt in x) # enable bash debugging diff --git a/scripts/techsupport_cleanup b/scripts/techsupport_cleanup index 84758127b3..28f0c47bcc 100644 --- a/scripts/techsupport_cleanup +++ b/scripts/techsupport_cleanup @@ -33,11 +33,13 @@ def handle_techsupport_creation_event(dump_name): db = SonicV2Connector(host="127.0.0.1") db.connect(CFG_DB) db.connect(STATE_DB) - + + print(db.get_all(CFG_DB, AUTO_TS)) + if db.get(CFG_DB, AUTO_TS, CFG_STATE) != "enabled": return - max_ts = db.get("CONFIG_DB", AUTO_TS, CFG_MAX_TS) + max_ts = db.get(CFG_DB, AUTO_TS, CFG_MAX_TS) if max_ts: max_ts = int(max_ts) else: diff --git a/setup.py b/setup.py index 5847b6e6ee..46eee5f925 100644 --- a/setup.py +++ b/setup.py @@ -5,9 +5,21 @@ # under scripts/. Consider stop using scripts and use console_scripts instead # # https://stackoverflow.com/questions/18787036/difference-between-entry-points-console-scripts-and-scripts-in-setup-py -import fastentrypoints +import fastentrypoints, sys from setuptools import setup +from setuptools.command.test import test as TestCommand + +class PyTest(TestCommand): + user_options = [("pytest-args=", "a", "Arguments to pass to pytest")] + def initialize_options(self): + TestCommand.initialize_options(self) + self.pytest_args = "" + def run_tests(self): + import shlex + import pytest + errno = pytest.main(shlex.split(self.pytest_args)) + sys.exit(errno) setup( name='sonic-utilities', @@ -217,5 +229,6 @@ 'Topic :: Utilities', ], keywords='sonic SONiC utilities command line cli CLI', + cmdclass={"pytest": PyTest}, test_suite='setup.get_test_suite' ) diff --git a/tests/auto_techsupport_tests/coredump_gen_handler_test.py b/tests/auto_techsupport_tests/coredump_gen_handler_test.py index 30ede6b4ea..0eea8b82c5 100644 --- a/tests/auto_techsupport_tests/coredump_gen_handler_test.py +++ b/tests/auto_techsupport_tests/coredump_gen_handler_test.py @@ -4,11 +4,8 @@ import unittest from pyfakefs.fake_filesystem_unittest import Patcher from swsscommon import swsscommon -from .shared_state_mock import RedisSingleton, MockConn from utilities_common.general import load_module_from_source - -# Mock the SonicV2Connector -swsscommon.SonicV2Connector = MockConn +from .shared_state_mock import RedisSingleton, MockConn curr_test_path = os.path.abspath(os.path.join(os.path.abspath(__file__), "../")) test_dir_path = os.path.dirname(curr_test_path) @@ -20,6 +17,9 @@ script_path = os.path.join(scripts_path, 'coredump_gen_handler') cdump_mod = load_module_from_source('coredump_gen_handler', script_path) +# Mock the SonicV2Connector +cdump_mod.SonicV2Connector = MockConn + # Mock Handle to the data inside the Redis RedisHandle = RedisSingleton.getInstance() @@ -27,6 +27,7 @@ def set_auto_ts_cfg(**kwargs): state = kwargs[cdump_mod.CFG_STATE] if cdump_mod.CFG_STATE in kwargs else "disabled" cooloff = kwargs[cdump_mod.COOLOFF] if cdump_mod.COOLOFF in kwargs else "0" core_usage = kwargs[cdump_mod.CFG_CORE_USAGE] if cdump_mod.CFG_CORE_USAGE in kwargs else "0" + since_cfg = kwargs[cdump_mod.CFG_SINCE] if cdump_mod.CFG_SINCE in kwargs else "None" if cdump_mod.CFG_DB not in RedisHandle.data: RedisHandle.data[cdump_mod.CFG_DB] = {} RedisHandle.data[cdump_mod.CFG_DB][cdump_mod.AUTO_TS] = {cdump_mod.CFG_STATE : state, @@ -61,8 +62,11 @@ def mock_generic_cmd(cmd): return 0, swss_critical_proc, "" elif "docker exec -t syncd cat /etc/supervisor/critical_processes" in cmd: return 0, syncd_critical_proc, "" + elif "date --date=\"2 days ago\"" in cmd: + return 0, "", "" + elif "date --date=\"random\"" in cmd: + return 1, "", "Invalid Date Format" else: - print("ERR: Invalid Command Invoked: " + cmd) return 1, "", "Invalid Command: " class TestCoreDumpCreationEvent(unittest.TestCase): @@ -81,7 +85,7 @@ def mock_cmd(cmd): if "show techsupport" in cmd_str: patcher.fs.create_file("/var/dump/sonic_dump_random999.tar.gz") else: - return mock_generic_cmd(cmd) + return mock_generic_cmd(cmd_str) return 0, "", "" cdump_mod.subprocess_exec = mock_cmd patcher.fs.create_file("/var/dump/sonic_dump_random998.tar.gz") @@ -108,7 +112,7 @@ def mock_cmd(cmd): if "show techsupport" in cmd_str: patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") else: - return mock_generic_cmd(cmd) + return mock_generic_cmd(cmd_str) return 0, "", "" cdump_mod.subprocess_exec = mock_cmd patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") @@ -141,7 +145,7 @@ def mock_cmd(cmd): if "show techsupport" in cmd_str: patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") else: - return mock_generic_cmd(cmd) + return mock_generic_cmd(cmd_str) return 0, "", "" cdump_mod.subprocess_exec = mock_cmd patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") @@ -173,7 +177,7 @@ def mock_cmd(cmd): if "show techsupport" in cmd_str: patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") else: - return mock_generic_cmd(cmd) + return mock_generic_cmd(cmd_str) return 0, "", "" cdump_mod.subprocess_exec = mock_cmd patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") @@ -191,8 +195,8 @@ def mock_cmd(cmd): def test_invoc_ts_after_cooloff(self): """ - Scenario: AUTO_TECHSUPPORT is enabled. Global Cooloff is passed but per process isn't - Check if techsupport is not invoked + Scenario: AUTO_TECHSUPPORT is enabled. Global Cooloff and per proc cooloff is passed + Check if techsupport is invoked """ RedisSingleton.clearState() set_auto_ts_cfg(state="enabled", cooloff="0.1") @@ -206,7 +210,7 @@ def mock_cmd(cmd): if "show techsupport" in cmd_str: patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") else: - return mock_generic_cmd(cmd) + return mock_generic_cmd(cmd_str) return 0, "", "" cdump_mod.subprocess_exec = mock_cmd patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") @@ -240,7 +244,7 @@ def mock_cmd(cmd): if "show techsupport" in cmd_str: patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") else: - return mock_generic_cmd(cmd) + return mock_generic_cmd(cmd_str) return 0, "", "" cdump_mod.subprocess_exec = mock_cmd patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") @@ -272,7 +276,7 @@ def mock_cmd(cmd): if "show techsupport" in cmd_str: patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") else: - return mock_generic_cmd(cmd) + return mock_generic_cmd(cmd_str) return 0, "", "" cdump_mod.subprocess_exec = mock_cmd patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") @@ -287,4 +291,38 @@ def mock_cmd(cmd): assert "sonic_dump_random1.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] assert "sonic_dump_random3.tar.gz" not in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + + def test_since_argument(self): + """ + Scenario: AUTO_TECHSUPPORT is enabled. Global Cooloff and per proc cooloff is passed + Check if techsupport is invoked and since argument in properly applied + """ + RedisSingleton.clearState() + set_auto_ts_cfg(state="enabled", cooloff="0.1", since="random") + set_feature_table_cfg(ts="enabled", cooloff="0.5") + RedisHandle.data["STATE_DB"] = {} + RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] = {"sonic_dump_random1.tar.gz" : "orchagent;{}".format(int(time.time())), + "sonic_dump_random2.tar.gz" : "syncd;1575988"} + with Patcher() as patcher: + def mock_cmd(cmd): + cmd_str = " ".join(cmd) + if "show techsupport --since \"2 days ago\"" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + else: + return mock_generic_cmd(cmd_str) + return 0, "", "" + cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") + cls = cdump_mod.CoreDumpCreateHandle("orchagent.12345.123.core.gz") + time.sleep(0.5) # wait for cooloff to pass + cls.handle_core_dump_creation_event() + assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random3.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random1.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + assert "sonic_dump_random3.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + assert "orchagent" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP]["sonic_dump_random3.tar.gz"] \ No newline at end of file diff --git a/tests/auto_techsupport_tests/shared_state_mock.py b/tests/auto_techsupport_tests/shared_state_mock.py index f36b57e14d..9a978ac350 100644 --- a/tests/auto_techsupport_tests/shared_state_mock.py +++ b/tests/auto_techsupport_tests/shared_state_mock.py @@ -40,7 +40,7 @@ def connect(self, db_name): self.redis.data[db_name] = {} def get(self, db_name, key, field): - return self.redis.data.get(db_name, {}).get(key, {}).get(field, "") + return self.redis.data[db_name].get(key, {}).get(field, "") def keys(self, db_name, pattern): pattern = re.escape(pattern) @@ -53,7 +53,7 @@ def keys(self, db_name, pattern): return filtered_keys def get_all(self, db_name, key): - return self.redis.data.get(db_name, {}).get(key, {}) + return self.redis.data[db_name].get(key, {}) def set(self, db_name, key, field, value, blocking=True): if key not in self.redis.data[db_name]: diff --git a/tests/auto_techsupport_tests/techsupport_cleanup_test.py b/tests/auto_techsupport_tests/techsupport_cleanup_test.py index ce4119aa04..989e6d627d 100644 --- a/tests/auto_techsupport_tests/techsupport_cleanup_test.py +++ b/tests/auto_techsupport_tests/techsupport_cleanup_test.py @@ -4,11 +4,8 @@ import unittest from pyfakefs.fake_filesystem_unittest import Patcher from swsscommon import swsscommon -from .shared_state_mock import RedisSingleton, MockConn from utilities_common.general import load_module_from_source - -# Mock the SonicV2Connector -swsscommon.SonicV2Connector = MockConn +from .shared_state_mock import RedisSingleton, MockConn curr_test_path = os.path.abspath(os.path.join(os.path.abspath(__file__), "../")) test_dir_path = os.path.dirname(curr_test_path) @@ -20,23 +17,19 @@ script_path = os.path.join(scripts_path, 'techsupport_cleanup') ts_mod = load_module_from_source('techsupport_cleanup', script_path) +# Mock the SonicV2Connector +ts_mod.SonicV2Connector = MockConn + # Mock Handle to the data inside the Redis RedisHandle = RedisSingleton.getInstance() def set_auto_ts_cfg(**kwargs): state = kwargs[ts_mod.CFG_STATE] if ts_mod.CFG_STATE in kwargs else "disabled" - max_ts = kwargs[ts_mod.COOLOFF] if ts_mod.COOLOFF in kwargs else "0" + max_ts = kwargs[ts_mod.CFG_MAX_TS] if ts_mod.CFG_MAX_TS in kwargs else "0" RedisHandle.data[ts_mod.CFG_DB] = {ts_mod.AUTO_TS : {ts_mod.CFG_STATE : state, ts_mod.CFG_MAX_TS : max_ts}} class TestTechsupportCreationEvent(unittest.TestCase): - def setUp(self): - self.orig_time_buf = ts_mod.TIME_BUF - ts_mod.TIME_BUF = 0.5 # Patch the buf to 1 sec - - def tearDown(self): - ts_mod.TIME_BUF = self.orig_time_buf - def test_no_cleanup_state_disabled(self): """ Scenario: AUTO_TECHSUPPORT is disabled. @@ -91,7 +84,6 @@ def test_dump_cleanup(self): patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=25) ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz") current_fs = os.listdir(ts_mod.TS_DIR) - print(current_fs) assert len(current_fs) == 2 assert "sonic_dump_random1.tar.gz" not in current_fs assert "sonic_dump_random2.tar.gz" in current_fs diff --git a/utilities_common/auto_techsupport_helper.py b/utilities_common/auto_techsupport_helper.py index e82da3c1b8..6b5188aaf5 100644 --- a/utilities_common/auto_techsupport_helper.py +++ b/utilities_common/auto_techsupport_helper.py @@ -14,6 +14,7 @@ CFG_MAX_TS = "max_techsupport_size" COOLOFF = "cooloff" CFG_CORE_USAGE = "core_usage" +CFG_SINCE = "since" CORE_DUMP_DIR = "/var/core" CORE_DUMP_PTRN = "*.core.gz" @@ -30,6 +31,7 @@ TS_MAP = "AUTO_TECHSUPPORT|TS_CORE_MAP" TIME_BUF = 20 +SINCE_DEFAULT = "2 days ago" ##### Helper methods def subprocess_exec(cmd): @@ -103,7 +105,6 @@ def pretty_size(bytes): def cleanup_process(limit, file_ptrn, dir): """Deletes the oldest files incrementally until the size is under limit""" - print("---- Reached Here ------ ") if not(1 <= limit and limit <= 100): syslog.syslog(syslog.LOG_ERR, "core_usage_limit can only be between 1 and 100, whereas the configured value is: {}".format(limit)) return From 7be9ee41a5e8616396c37b6bc2f3a2136fd019e6 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Wed, 11 Aug 2021 08:19:05 +0000 Subject: [PATCH 08/60] CLI GEN-1 merged Signed-off-by: Vivek Reddy Karri --- clear/plugins/auto/__init__.py | 0 config/plugins/auto/__init__.py | 0 setup.py | 5 + show/plugins/auto/__init__.py | 0 .../bash_completion.d/sonic-cli-gen | 8 + sonic-utilities-data/debian/install | 5 +- .../templates/sonic-cli-gen/common.j2 | 3 + .../templates/sonic-cli-gen/config.py.j2 | 481 +++++++++++++ .../templates/sonic-cli-gen/show.py.j2 | 245 +++++++ sonic_cli_gen/__init__.py | 6 + sonic_cli_gen/generator.py | 67 ++ sonic_cli_gen/main.py | 51 ++ sonic_cli_gen/yang_parser.py | 679 ++++++++++++++++++ .../cli_autogen_input/assert_dictionaries.py | 625 ++++++++++++++++ tests/cli_autogen_input/config_db.json | 544 ++++++++++++++ tests/cli_autogen_input/sonic-1-list.yang | 29 + .../sonic-1-object-container.yang | 23 + .../sonic-1-table-container.yang | 17 + tests/cli_autogen_input/sonic-2-lists.yang | 42 ++ .../sonic-2-object-containers.yang | 29 + .../sonic-2-table-containers.yang | 23 + .../sonic-choice-complex.yang | 91 +++ .../sonic-dynamic-object-complex-1.yang | 57 ++ .../sonic-dynamic-object-complex-2.yang | 84 +++ tests/cli_autogen_input/sonic-grouping-1.yang | 25 + tests/cli_autogen_input/sonic-grouping-2.yang | 25 + .../sonic-grouping-complex.yang | 96 +++ .../sonic-static-object-complex-1.yang | 49 ++ .../sonic-static-object-complex-2.yang | 71 ++ tests/cli_autogen_yang_parser_test.py | 196 +++++ utilities_common/util_base.py | 1 + 31 files changed, 3575 insertions(+), 2 deletions(-) create mode 100644 clear/plugins/auto/__init__.py create mode 100644 config/plugins/auto/__init__.py create mode 100644 show/plugins/auto/__init__.py create mode 100644 sonic-utilities-data/bash_completion.d/sonic-cli-gen create mode 100644 sonic-utilities-data/templates/sonic-cli-gen/common.j2 create mode 100644 sonic-utilities-data/templates/sonic-cli-gen/config.py.j2 create mode 100644 sonic-utilities-data/templates/sonic-cli-gen/show.py.j2 create mode 100644 sonic_cli_gen/__init__.py create mode 100644 sonic_cli_gen/generator.py create mode 100644 sonic_cli_gen/main.py create mode 100644 sonic_cli_gen/yang_parser.py create mode 100644 tests/cli_autogen_input/assert_dictionaries.py create mode 100644 tests/cli_autogen_input/config_db.json create mode 100644 tests/cli_autogen_input/sonic-1-list.yang create mode 100644 tests/cli_autogen_input/sonic-1-object-container.yang create mode 100644 tests/cli_autogen_input/sonic-1-table-container.yang create mode 100644 tests/cli_autogen_input/sonic-2-lists.yang create mode 100644 tests/cli_autogen_input/sonic-2-object-containers.yang create mode 100644 tests/cli_autogen_input/sonic-2-table-containers.yang create mode 100644 tests/cli_autogen_input/sonic-choice-complex.yang create mode 100644 tests/cli_autogen_input/sonic-dynamic-object-complex-1.yang create mode 100644 tests/cli_autogen_input/sonic-dynamic-object-complex-2.yang create mode 100644 tests/cli_autogen_input/sonic-grouping-1.yang create mode 100644 tests/cli_autogen_input/sonic-grouping-2.yang create mode 100644 tests/cli_autogen_input/sonic-grouping-complex.yang create mode 100644 tests/cli_autogen_input/sonic-static-object-complex-1.yang create mode 100644 tests/cli_autogen_input/sonic-static-object-complex-2.yang create mode 100644 tests/cli_autogen_yang_parser_test.py diff --git a/clear/plugins/auto/__init__.py b/clear/plugins/auto/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/config/plugins/auto/__init__.py b/config/plugins/auto/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/setup.py b/setup.py index 5e0b920f88..963c9af9b1 100644 --- a/setup.py +++ b/setup.py @@ -35,8 +35,10 @@ def run_tests(self): 'acl_loader', 'clear', 'clear.plugins', + 'clear.plugins.auto', 'config', 'config.plugins', + 'config.plugins.auto', 'connect', 'consutil', 'counterpoll', @@ -58,6 +60,7 @@ def run_tests(self): 'show', 'show.interfaces', 'show.plugins', + 'show.plugins.auto', 'sonic_installer', 'sonic_installer.bootloader', 'sonic_package_manager', @@ -66,6 +69,7 @@ def run_tests(self): 'undebug', 'utilities_common', 'watchdogutil', + 'sonic_cli_gen', ], package_data={ 'show': ['aliases.ini'], @@ -174,6 +178,7 @@ def run_tests(self): 'spm = sonic_package_manager.main:cli', 'undebug = undebug.main:cli', 'watchdogutil = watchdogutil.main:watchdogutil', + 'sonic-cli-gen = sonic_cli_gen.main:cli', ] }, install_requires=[ diff --git a/show/plugins/auto/__init__.py b/show/plugins/auto/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/sonic-utilities-data/bash_completion.d/sonic-cli-gen b/sonic-utilities-data/bash_completion.d/sonic-cli-gen new file mode 100644 index 0000000000..3327f9c513 --- /dev/null +++ b/sonic-utilities-data/bash_completion.d/sonic-cli-gen @@ -0,0 +1,8 @@ +_sonic_cli_gen_completion() { + COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \ + COMP_CWORD=$COMP_CWORD \ + _SONIC_CLI_GEN_COMPLETE=complete $1 ) ) + return 0 +} + +complete -F _sonic_cli_gen_completion -o default sonic-cli-gen; diff --git a/sonic-utilities-data/debian/install b/sonic-utilities-data/debian/install index 82d087d54d..1f67b78c20 100644 --- a/sonic-utilities-data/debian/install +++ b/sonic-utilities-data/debian/install @@ -1,2 +1,3 @@ -bash_completion.d/ /etc/ -templates/*.j2 /usr/share/sonic/templates/ +bash_completion.d/ /etc/ +templates/*.j2 /usr/share/sonic/templates/ +templates/sonic-cli-gen/*.j2 /usr/share/sonic/templates/sonic-cli-gen/ diff --git a/sonic-utilities-data/templates/sonic-cli-gen/common.j2 b/sonic-utilities-data/templates/sonic-cli-gen/common.j2 new file mode 100644 index 0000000000..3b83ee5635 --- /dev/null +++ b/sonic-utilities-data/templates/sonic-cli-gen/common.j2 @@ -0,0 +1,3 @@ +{% macro cli_name(name) -%} +{{ name|lower|replace("_", "-") }} +{%- endmacro %} diff --git a/sonic-utilities-data/templates/sonic-cli-gen/config.py.j2 b/sonic-utilities-data/templates/sonic-cli-gen/config.py.j2 new file mode 100644 index 0000000000..402b7e3dd2 --- /dev/null +++ b/sonic-utilities-data/templates/sonic-cli-gen/config.py.j2 @@ -0,0 +1,481 @@ +{%- from "common.j2" import cli_name -%} +""" +Autogenerated config CLI plugin. +{% if source_template is defined %} +Source template: {{ source_template }} +{% endif %} +{% if source_yang_module is defined %} +Source YANG module: {{ source_yang_module }} +{% endif %} +""" + +import click +import utilities_common.cli as clicommon +import utilities_common.general as general +from config import config_mgmt + + +# Load sonic-cfggen from source since /usr/local/bin/sonic-cfggen does not have .py extension. +sonic_cfggen = general.load_module_from_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') + + +def exit_with_error(*args, **kwargs): + """ Print a message and abort CLI. """ + + click.secho(*args, **kwargs) + raise click.Abort() + + +def validate_config_or_raise(cfg): + """ Validate config db data using ConfigMgmt """ + + try: + cfg = sonic_cfggen.FormatConverter.to_serialized(cfg) + config_mgmt.ConfigMgmt().loadData(cfg) + except Exception as err: + raise Exception('Failed to validate configuration: {}'.format(err)) + + +def add_entry_validated(db, table, key, data): + """ Add new entry in table and validate configuration """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key in cfg[table]: + raise Exception(f"{key} already exists") + + cfg[table][key] = data + + validate_config_or_raise(cfg) + db.set_entry(table, key, data) + + +def update_entry_validated(db, table, key, data, create_if_not_exists=False): + """ Update entry in table and validate configuration. + If attribute value in data is None, the attribute is deleted. + """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + + if create_if_not_exists: + cfg[table].setdefault(key, {}) + + if key not in cfg[table]: + raise Exception(f"{key} does not exist") + + for attr, value in data.items(): + if value is None and attr in cfg[table][key]: + cfg[table][key].pop(attr) + else: + cfg[table][key][attr] = value + + validate_config_or_raise(cfg) + db.set_entry(table, key, cfg[table][key]) + + +def del_entry_validated(db, table, key): + """ Delete entry in table and validate configuration """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key not in cfg[table]: + raise Exception(f"{key} does not exist") + + cfg[table].pop(key) + + validate_config_or_raise(cfg) + db.set_entry(table, key, None) + + +def add_list_entry_validated(db, table, key, attr, data): + """ Add new entry into list in table and validate configuration""" + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key not in cfg[table]: + raise Exception(f"{key} does not exist") + cfg[table][key].setdefault(attr, []) + for entry in data: + if entry in cfg[table][key][attr]: + raise Exception(f"{entry} already exists") + cfg[table][key][attr].append(entry) + + validate_config_or_raise(cfg) + db.set_entry(table, key, cfg[table][key]) + + +def del_list_entry_validated(db, table, key, attr, data): + """ Delete entry from list in table and validate configuration""" + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key not in cfg[table]: + raise Exception(f"{key} does not exist") + cfg[table][key].setdefault(attr, []) + for entry in data: + if entry not in cfg[table][key][attr]: + raise Exception(f"{entry} does not exist") + cfg[table][key][attr].remove(entry) + if not cfg[table][key][attr]: + cfg[table][key].pop(attr) + + validate_config_or_raise(cfg) + db.set_entry(table, key, cfg[table][key]) + + +def clear_list_entry_validated(db, table, key, attr): + """ Clear list in object and validate configuration""" + + update_entry_validated(db, table, key, {attr: None}) + + +{# Generate click arguments macro +Jinja2 Call: + {{ gen_click_arguments([{"name": "leaf1", "is-leaf-list": False}, + {"name": "leaf2", "is-leaf-list": Talse}) }} +Result: +@click.argument( + "leaf1", + nargs=1, + required=True, +) +@click.argument( + "leaf2", + nargs=-1, + required=True, +) +#} +{%- macro gen_click_arguments(attrs) -%} +{%- for attr in attrs %} +@click.argument( + "{{ cli_name(attr.name) }}", + nargs={% if attr["is-leaf-list"] %}-1{% else %}1{% endif %}, + required=True, +) +{%- endfor %} +{%- endmacro %} + + +{# Generate click options macro +Jinja2 Call: + {{ gen_click_arguments([{"name": "leaf1", "is-mandatory": True, "description": "leaf1-desc"}, + {"name": "leaf2", "is-mandatory": False, "description": "leaf2-desc"}) }} +Result: +@click.option( + "--leaf1", + help="leaf1-desc [mandatory]", +) +@click.option( + "--leaf2", + help="leaf2-desc", +) +#} +{%- macro gen_click_options(attrs) -%} +{%- for attr in attrs %} +@click.option( + "--{{ cli_name(attr.name) }}", + help="{{ attr.description }}{% if attr['is-mandatory'] %}[mandatory]{% endif %}", +) +{%- endfor %} +{%- endmacro %} + +{# Generate valid python identifier from input names #} +{% macro pythonize(attrs) -%} +{{ attrs|map(attribute="name")|map("lower")|map("replace", "-", "_")|join(", ") }} +{%- endmacro %} + +{% macro gen_cfg_obj_list_update(group, table, object, attr) %} +{% set list_update_group = group + "_" + attr.name %} + +@{{ group }}.group(name="{{ cli_name(attr.name) }}", + cls=clicommon.AliasedGroup) +def {{ list_update_group }}(): + """ Add/Delete {{ attr.name }} in {{ table.name }} """ + + pass + +{# Add entries to list attribute config CLI generation +E.g: + @TABLE_object.command(name="add") + @click.argument("key1", nargs=1) + @click.argument("key2", nargs=1) + @click.argument("attribute", nargs=-1) + def TABLE_object_attribute_add(db, key1, key2, attribute): +#} +@{{ list_update_group }}.command(name="add") +{{ gen_click_arguments(object["keys"] + [attr]) }} +@clicommon.pass_db +def {{ list_update_group }}_add( + db, + {{ pythonize(object["keys"] + [attr]) }} +): + """ Add {{ attr.name }} in {{ table.name }} """ + + table = "{{ table.name }}" + key = {{ pythonize(object["keys"]) }} + attr = "{{ attr.name }}" + data = {{ pythonize([attr]) }} + + try: + add_list_entry_validated(db.cfgdb, table, key, attr, data) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +{# Delete entries from list attribute config CLI generation +E.g: + @TABLE_object.command(name="delete") + @click.argument("key1", nargs=1) + @click.argument("key2", nargs=1) + @click.argument("attribute", nargs=-1) + def TABLE_object_attribute_delete(db, key1, key2, attribute): +#} +@{{ list_update_group }}.command(name="delete") +{{ gen_click_arguments(object["keys"] + [attr]) }} +@clicommon.pass_db +def {{ list_update_group }}_delete( + db, + {{ pythonize(object["keys"] + [attr]) }} +): + """ Delete {{ attr.name }} in {{ table.name }} """ + + table = "{{ table.name }}" + key = {{ pythonize(object["keys"]) }} + attr = "{{ attr.name }}" + data = {{ pythonize([attr]) }} + + try: + del_list_entry_validated(db.cfgdb, table, key, attr, data) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +{# Clear entries from list attribute config CLI generation +E.g: + @TABLE_object.command(name="delete") + @click.argument("key1", nargs=1) + @click.argument("key2", nargs=1) + def TABLE_object_attribute_clear(db, key1, key2): +#} +@{{ list_update_group }}.command(name="clear") +{{ gen_click_arguments(object["keys"]) }} +@clicommon.pass_db +def {{ list_update_group }}_clear( + db, + {{ pythonize(object["keys"]) }} +): + """ Clear {{ attr.name }} in {{ table.name }} """ + + table = "{{ table.name }}" + key = {{ pythonize(object["keys"]) }} + attr = "{{ attr.name }}" + + try: + clear_list_entry_validated(db.cfgdb, table, key, attr) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + +{% endmacro %} + + +{% macro gen_cfg_obj_list_update_all(group, table, object) %} +{% for attr in object.attrs %} +{% if attr["is-leaf-list"] %} +{{ gen_cfg_obj_list_update(group, table, object, attr) }} +{% endif %} +{% endfor %} +{% endmacro %} + + +{% macro gen_cfg_static_obj_attr(table, object, attr) %} +@{{ table.name }}_{{ object.name }}.command(name="{{ cli_name(attr.name) }}") +{{ gen_click_arguments([attr]) }} +@clicommon.pass_db +def {{ table.name }}_{{ object.name }}_{{ attr.name }}(db, {{ pythonize([attr]) }}): + """ {{ attr.description }} """ + + table = "{{ table.name }}" + key = "{{ object.name }}" + data = { + "{{ attr.name }}": {{ pythonize([attr]) }}, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") +{% endmacro %} + + +{# Static objects config CLI generation +E.g: + @TABLE.group(name="object") + def TABLE_object(db): +#} +{% macro gen_cfg_static_obj(table, object) %} +@{{ table.name }}.group(name="{{ cli_name(object.name) }}", + cls=clicommon.AliasedGroup) +@clicommon.pass_db +def {{ table.name }}_{{ object.name }}(db): + """ {{ object.description }} """ + + pass + +{# Static objects attributes config CLI generation +E.g: + @TABLE_object.command(name="attribute") + def TABLE_object_attribute(db, attribute): +#} +{% for attr in object.attrs %} +{{ gen_cfg_static_obj_attr(table, object, attr) }} +{% endfor %} + +{{ gen_cfg_obj_list_update_all(table.name + "_" + object.name, table, object) }} +{% endmacro %} + +{# Dynamic objects config CLI generation #} + +{# Dynamic objects add command +E.g: + @TABLE.command(name="add") + @click.argument("key1") + @click.argument("key2") + @click.option("--attr1") + @click.option("--attr2") + @click.option("--attr3") + def TABLE_TABLE_LIST_add(db, key1, key2, attr1, attr2, attr3): +#} +{% macro gen_cfg_dyn_obj_add(group, table, object) %} +@{{ group }}.command(name="add") +{{ gen_click_arguments(object["keys"]) }} +{{ gen_click_options(object.attrs) }} +@clicommon.pass_db +def {{ group }}_add(db, {{ pythonize(object["keys"] + object.attrs) }}): + """ Add object in {{ table.name }}. """ + + table = "{{ table.name }}" + key = {{ pythonize(object["keys"]) }} + data = {} +{%- for attr in object.attrs %} + if {{ pythonize([attr]) }} is not None: +{%- if not attr["is-leaf-list"] %} + data["{{ attr.name }}"] = {{ pythonize([attr]) }} +{%- else %} + data["{{ attr.name }}"] = {{ pythonize([attr]) }}.split(",") +{%- endif %} +{%- endfor %} + + try: + add_entry_validated(db.cfgdb, table, key, data) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") +{% endmacro %} + +{# Dynamic objects update command +E.g: + @TABLE.command(name="update") + @click.argument("key1") + @click.argument("key2") + @click.option("--attr1") + @click.option("--attr2") + @click.option("--attr3") + def TABLE_TABLE_LIST_update(db, key1, key2, attr1, attr2, attr3): +#} +{% macro gen_cfg_dyn_obj_update(group, table, object) %} +@{{ group }}.command(name="update") +{{ gen_click_arguments(object["keys"]) }} +{{ gen_click_options(object.attrs) }} +@clicommon.pass_db +def {{ group }}_update(db, {{ pythonize(object["keys"] + object.attrs) }}): + """ Add object in {{ table.name }}. """ + + table = "{{ table.name }}" + key = {{ pythonize(object["keys"]) }} + data = {} +{%- for attr in object.attrs %} + if {{ pythonize([attr]) }} is not None: +{%- if not attr["is-leaf-list"] %} + data["{{ attr.name }}"] = {{ pythonize([attr]) }} +{%- else %} + data["{{ attr.name }}"] = {{ pythonize([attr]) }}.split(",") +{%- endif %} +{%- endfor %} + + try: + update_entry_validated(db.cfgdb, table, key, data) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") +{% endmacro %} + +{# Dynamic objects delete command +E.g: + @TABLE.command(name="delete") + @click.argument("key1") + @click.argument("key2") + def TABLE_TABLE_LIST_delete(db, key1, key2): +#} +{% macro gen_cfg_dyn_obj_delete(group, table, object) %} +@{{ group }}.command(name="delete") +{{ gen_click_arguments(object["keys"]) }} +@clicommon.pass_db +def {{ group }}_delete(db, {{ pythonize(object["keys"]) }}): + """ Delete object in {{ table.name }}. """ + + table = "{{ table.name }}" + key = {{ pythonize(object["keys"]) }} + try: + del_entry_validated(db.cfgdb, table, key) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") +{% endmacro %} + +{% macro gen_cfg_dyn_obj(table, object) %} +{# Generate another nested group in case table holds two types of objects #} +{% if table["dynamic-objects"]|length > 1 %} +{% set group = table.name + "_" + object.name %} +@{{ table.name }}.group(name="{{ cli_name(object.name) }}", + cls=clicommon.AliasedGroup) +def {{ group }}(): + """ {{ object.description }} """ + + pass +{% else %} +{% set group = table.name %} +{% endif %} + +{{ gen_cfg_dyn_obj_add(group, table, object) }} +{{ gen_cfg_dyn_obj_update(group, table, object) }} +{{ gen_cfg_dyn_obj_delete(group, table, object) }} +{{ gen_cfg_obj_list_update_all(group, table, object) }} +{% endmacro %} + + +{% for table in tables %} +@click.group(name="{{ cli_name(table.name) }}", + cls=clicommon.AliasedGroup) +def {{ table.name }}(): + """ {{ table.description }} """ + + pass + +{% if "static-objects" in table %} +{% for object in table["static-objects"] %} +{{ gen_cfg_static_obj(table, object) }} +{% endfor %} +{% endif %} + +{% if "dynamic-objects" in table %} +{% for object in table["dynamic-objects"] %} +{{ gen_cfg_dyn_obj(table, object) }} +{% endfor %} +{% endif %} + +{% endfor %} + +def register(cli): +{%- for table in tables %} + cli_node = {{ table.name }} + if cli_node.name in cli.commands: + raise Exception(f"{cli_node.name} already exists in CLI") + cli.add_command({{ table.name }}) +{%- endfor %} diff --git a/sonic-utilities-data/templates/sonic-cli-gen/show.py.j2 b/sonic-utilities-data/templates/sonic-cli-gen/show.py.j2 new file mode 100644 index 0000000000..6ee27f2013 --- /dev/null +++ b/sonic-utilities-data/templates/sonic-cli-gen/show.py.j2 @@ -0,0 +1,245 @@ +{% from "common.j2" import cli_name -%} +""" +Auto-generated show CLI plugin. +{% if source_template is defined %} +Source template: {{ source_template }} +{% endif %} +{% if source_yang_module is defined %} +Source YANG module: {{ source_yang_module }} +{% endif %} +""" + +import click +import tabulate +import natsort +import utilities_common.cli as clicommon + + +{% macro column_name(name) -%} +{{ name|upper|replace("_", " ")|replace("-", " ") }} +{%- endmacro %} + + +def format_attr_value(entry, attr): + """ Helper that formats attribute to be presented in the table output. + + Args: + entry (Dict[str, str]): CONFIG DB entry configuration. + attr (Dict): Attribute metadata. + + Returns: + str: fomatted attribute value. + """ + + if attr["is-leaf-list"]: + return "\n".join(entry.get(attr["name"], [])) + return entry.get(attr["name"], "N/A") + + +def format_group_value(entry, attrs): + """ Helper that formats grouped attribute to be presented in the table output. + + Args: + entry (Dict[str, str]): CONFIG DB entry configuration. + attrs (List[Dict]): Attributes metadata that belongs to the same group. + + Returns: + str: fomatted group attributes. + """ + + data = [] + for attr in attrs: + if entry.get(attr["name"]): + data.append((attr["name"] + ":", format_attr_value(entry, attr))) + return tabulate.tabulate(data, tablefmt="plain") + + +{# Generates a python list that represents a row in the table view. +E.g: +Jinja2: +{{ + gen_row("entry", [ + {"name": "leaf1"}, + {"name": "leaf_1"}, + {"name": "leaf_2"}, + {"name": "leaf_3", "group": "group_0"} + ]) +}} +Result: +[ + format_attr_value( + entry, + {'name': 'leaf1'} + ), + format_attr_value( + entry, + {'name': 'leaf_1'} + ), + format_attr_value( + entry, + {'name': 'leaf_2'} + ), + format_group_value( + entry, + [{'name': 'leaf_3', 'group': 'group_0'}] + ), +] +#} +{% macro gen_row(entry, attrs) -%} +[ +{%- for attr in attrs|rejectattr("group", "defined") %} + format_attr_value( + {{ entry }}, + {{ attr }} + ), +{%- endfor %} +{%- for group, attrs in attrs|selectattr("group", "defined")|groupby("group") %} +{%- if group == "" %} +{%- for attr in attrs %} + format_attr_value( + {{ entry }}, + {{ attr }} + ), +{%- endfor %} +{%- else %} + format_group_value( + {{ entry }}, + {{ attrs }} + ), +{%- endif %} +{%- endfor %} +] +{% endmacro %} + +{# Generates a list that represents a header in table view. +E.g: +Jinja2: {{ + gen_header([ + {"name": "key"}, + {"name": "leaf_1"}, + {"name": "leaf_2"}, + {"name": "leaf_3", "group": "group_0"} + ]) + }} + +Result: +[ + "KEY", + "LEAF 1", + "LEAF 2", + "GROUP 0", +] + +#} +{% macro gen_header(attrs) -%} +[ +{% for attr in attrs|rejectattr("group", "defined") -%} + "{{ column_name(attr.name) }}", +{% endfor -%} +{% for group, attrs in attrs|selectattr("group", "defined")|groupby("group") -%} +{%- if group == "" %} +{% for attr in attrs -%} + "{{ column_name(attr.name) }}", +{% endfor -%} +{%- else %} + "{{ column_name(group) }}", +{%- endif %} +{% endfor -%} +] +{% endmacro %} + + +{% for table in tables %} +{% if "static-objects" in table %} +{# For static objects generate a command group called against table name. +E.g: +@click.group(name="table-name", + cls=clicommon.AliasedGroup) +def TABLE_NAME(): + """ TABLE DESCRIPTION """ + + pass +#} +@click.group(name="{{ cli_name(table.name) }}", + cls=clicommon.AliasedGroup) +def {{ table.name }}(): + """ {{ table.description }} """ + + pass + +{% for object in table["static-objects"] %} +{# For every object in static table generate a command +in the group to show individual object configuration. +CLI command is named against the object key in DB. +E.g: +@TABLE_NAME.command(name="object-name") +@clicommon.pass_db +def TABLE_NAME_object_name(db): + ... +#} +@{{ table.name }}.command(name="{{ cli_name(object.name) }}") +@clicommon.pass_db +def {{ table.name }}_{{ object.name }}(db): + """ {{ object.description }} """ + + header = {{ gen_header(object.attrs) }} + body = [] + + table = db.cfgdb.get_table("{{ table.name }}") + entry = table.get("{{ object.name }}", {}) + row = {{ gen_row("entry", object.attrs) }} + body.append(row) + click.echo(tabulate.tabulate(body, header)) + +{% endfor %} +{% elif "dynamic-objects" in table %} +{% if table["dynamic-objects"]|length > 1 %} +@click.group(name="{{ cli_name(table.name) }}", + cls=clicommon.AliasedGroup) +def {{ table.name }}(): + """ {{ table.description }} """ + + pass +{% endif %} +{% for object in table["dynamic-objects"] %} +{# Generate another nesting group in case table holds two types of objects #} +{% if table["dynamic-objects"]|length > 1 %} +{% set group = table.name %} +{% set name = object.name %} +{% else %} +{% set group = "click" %} +{% set name = table.name %} +{% endif %} + +{# Generate an implementation to display table. #} +@{{ group }}.group(name="{{ cli_name(name) }}", + cls=clicommon.AliasedGroup, + invoke_without_command=True) +@clicommon.pass_db +def {{ name }}(db): + """ {{ object.description }} [Callable command group] """ + + header = {{ gen_header(object["keys"] + object.attrs) }} + body = [] + + table = db.cfgdb.get_table("{{ table.name }}") + for key in natsort.natsorted(table): + entry = table[key] + if not isinstance(key, tuple): + key = (key,) + + row = [*key] + {{ gen_row("entry", object.attrs) }} + body.append(row) + + click.echo(tabulate.tabulate(body, header)) +{% endfor %} +{% endif %} +{% endfor %} + +def register(cli): +{%- for table in tables %} + cli_node = {{ table.name }} + if cli_node.name in cli.commands: + raise Exception(f"{cli_node.name} already exists in CLI") + cli.add_command({{ table.name }}) +{%- endfor %} diff --git a/sonic_cli_gen/__init__.py b/sonic_cli_gen/__init__.py new file mode 100644 index 0000000000..e7e775c0fb --- /dev/null +++ b/sonic_cli_gen/__init__.py @@ -0,0 +1,6 @@ +#!/usr/bin/env python + +from sonic_cli_gen.generator import CliGenerator + +__all__ = ['CliGenerator'] + diff --git a/sonic_cli_gen/generator.py b/sonic_cli_gen/generator.py new file mode 100644 index 0000000000..4f48b0201a --- /dev/null +++ b/sonic_cli_gen/generator.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python + +import os +import pkgutil +import jinja2 + +from sonic_cli_gen.yang_parser import YangParser + +templates_path = '/usr/share/sonic/templates/sonic-cli-gen/' + + +class CliGenerator: + """ SONiC CLI generator. This class provides public API + for sonic-cli-gen python library. It can generate config, + show CLI plugins. + + Attributes: + loader: the loaded j2 templates + env: j2 central object + logger: logger + """ + + def __init__(self, logger): + """ Initialize CliGenerator. """ + + self.loader = jinja2.FileSystemLoader(templates_path) + self.env = jinja2.Environment(loader=self.loader) + self.logger = logger + + def generate_cli_plugin(self, cli_group, plugin_name): + """ Generate click CLI plugin and put it to: + /usr/local/lib//dist-packages//plugins/auto/ + """ + + parser = YangParser(yang_model_name=plugin_name, + config_db_path='configDB', + allow_tbl_without_yang=True, + debug=False) + # yang_dict will be used as an input for templates located in + # /usr/share/sonic/templates/sonic-cli-gen/ + yang_dict = parser.parse_yang_model() + plugin_path = get_cli_plugin_path(cli_group, plugin_name + '_yang.py') + template = self.env.get_template(cli_group + '.py.j2') + with open(plugin_path, 'w') as plugin_py: + plugin_py.write(template.render(yang_dict)) + self.logger.info(' Auto-generation successful! Location: {}'.format(plugin_path)) + + def remove_cli_plugin(self, cli_group, plugin_name): + """ Remove CLI plugin from directory: + /usr/local/lib//dist-packages//plugins/auto/ + """ + plugin_path = get_cli_plugin_path(cli_group, plugin_name + '_yang.py') + if os.path.exists(plugin_path): + os.remove(plugin_path) + self.logger.info(' {} was removed.'.format(plugin_path)) + else: + self.logger.info(' Path {} doest NOT exist!'.format(plugin_path)) + + +def get_cli_plugin_path(command, plugin_name): + pkg_loader = pkgutil.get_loader(f'{command}.plugins.auto') + if pkg_loader is None: + raise Exception(f'Failed to get plugins path for {command} CLI') + plugins_pkg_path = os.path.dirname(pkg_loader.path) + + return os.path.join(plugins_pkg_path, plugin_name) + diff --git a/sonic_cli_gen/main.py b/sonic_cli_gen/main.py new file mode 100644 index 0000000000..bfcd301aed --- /dev/null +++ b/sonic_cli_gen/main.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +import sys +import click +import logging +from sonic_cli_gen.generator import CliGenerator + +logger = logging.getLogger('sonic-cli-gen') +logging.basicConfig(stream=sys.stdout, level=logging.INFO) + + +@click.group() +@click.pass_context +def cli(ctx): + """ SONiC CLI Auto-generator tool.\r + Generate click CLI plugin for 'config' or 'show' CLI groups.\r + CLI plugin will be generated from the YANG model, which should be in:\r\n + /usr/local/yang-models/ \n + Generated CLI plugin will be placed in: \r\n + /usr/local/lib/python3.7/dist-packages//plugins/auto/ + """ + + context = { + 'gen': CliGenerator(logger) + } + ctx.obj = context + + +@cli.command() +@click.argument('cli_group', type=click.Choice(['config', 'show'])) +@click.argument('yang_model_name', type=click.STRING) +@click.pass_context +def generate(ctx, cli_group, yang_model_name): + """ Generate click CLI plugin. """ + + ctx.obj['gen'].generate_cli_plugin(cli_group, yang_model_name) + + +@cli.command() +@click.argument('cli_group', type=click.Choice(['config', 'show'])) +@click.argument('yang_model_name', type=click.STRING) +@click.pass_context +def remove(ctx, cli_group, yang_model_name): + """ Remove generated click CLI plugin from. """ + + ctx.obj['gen'].remove_cli_plugin(cli_group, yang_model_name) + + +if __name__ == '__main__': + cli() + diff --git a/sonic_cli_gen/yang_parser.py b/sonic_cli_gen/yang_parser.py new file mode 100644 index 0000000000..df0382536f --- /dev/null +++ b/sonic_cli_gen/yang_parser.py @@ -0,0 +1,679 @@ +#!/usr/bin/env python + +from collections import OrderedDict +from config.config_mgmt import ConfigMgmt +from typing import List, Dict + +yang_guidelines_link = 'https://github.com/Azure/SONiC/blob/master/doc/mgmt/SONiC_YANG_Model_Guidelines.md' + + +class YangParser: + """ YANG model parser + + Attributes: + yang_model_name: Name of the YANG model file + conf_mgmt: Instance of Config Mgmt class to + help parse YANG models + y_module: Reference to 'module' entity + from YANG model file + y_top_level_container: Reference to top level 'container' + entity from YANG model file + y_table_containers: Reference to 'container' entities + from YANG model file that represent Config DB tables + yang_2_dict: dictionary created from YANG model file that + represent Config DB schema. + + Below the 'yang_2_dict' obj in case if YANG model has a 'list' entity: + { + 'tables': [{ + 'name': 'value', + 'description': 'value', + 'dynamic-objects': [ + 'name': 'value', + 'description': 'value, + 'attrs': [ + { + 'name': 'value', + 'description': 'value', + 'is-leaf-list': False, + 'is-mandatory': False, + 'group': 'value' + } + ... + ], + 'keys': [ + { + 'name': 'ACL_TABLE_NAME', + 'description': 'value' + } + ... + ] + ], + }] + } + In case if YANG model does NOT have a 'list' entity, + it has the same structure as above, but 'dynamic-objects' + changed to 'static-objects' and have no 'keys' + """ + + def __init__(self, + yang_model_name, + config_db_path, + allow_tbl_without_yang, + debug): + self.yang_model_name = yang_model_name + self.conf_mgmt = None + self.y_module = None + self.y_top_level_container = None + self.y_table_containers = None + self.yang_2_dict = dict() + + try: + self.conf_mgmt = ConfigMgmt(config_db_path, + debug, + allow_tbl_without_yang) + except Exception as e: + raise Exception("Failed to load the {} class".format(str(e))) + + def _init_yang_module_and_containers(self): + """ Initialize inner class variables: + self.y_module + self.y_top_level_container + self.y_table_containers + + Raises: + Exception: if YANG model is invalid or NOT exist + """ + + self.y_module = self._find_yang_model_in_yjson_obj() + + if self.y_module is None: + raise Exception('The YANG model {} is NOT exist'.format(self.yang_model_name)) + + if self.y_module.get('container') is None: + raise Exception('The YANG model {} does NOT have\ + "top level container" element\ + Please follow the SONiC YANG model guidelines:\ + \n{}'.format(self.yang_model_name, yang_guidelines_link)) + self.y_top_level_container = self.y_module.get('container') + + if self.y_top_level_container.get('container') is None: + raise Exception('The YANG model {} does NOT have "container"\ + element after "top level container"\ + Please follow the SONiC YANG model guidelines:\ + \n{}'.format(self.yang_model_name, yang_guidelines_link)) + self.y_table_containers = self.y_top_level_container.get('container') + + def _find_yang_model_in_yjson_obj(self) -> OrderedDict: + """ Find provided YANG model inside the yJson object, + the yJson object contain all yang-models + parsed from directory - /usr/local/yang-models + + Returns: + reference to yang_model_name + """ + + for yang_model in self.conf_mgmt.sy.yJson: + if yang_model.get('module').get('@name') == self.yang_model_name: + return yang_model.get('module') + + def parse_yang_model(self) -> dict: + """ Parse provided YANG model and save + the output to self.yang_2_dict object + + Returns: + parsed YANG model in dictionary format + """ + + self._init_yang_module_and_containers() + self.yang_2_dict['tables'] = list() + + # determine how many (1 or more) containers a YANG model + # has after the 'top level' container + # 'table' container goes after the 'top level' container + self.yang_2_dict['tables'] += list_handler(self.y_table_containers, + lambda e: on_table_container(self.y_module, e, self.conf_mgmt)) + + return self.yang_2_dict + + +# ------------------------------HANDLERS-------------------------------- # + +def list_handler(y_entity, callback) -> List[Dict]: + """ Determine if the type of entity is a list, + if so - call the callback for every list element + """ + + if isinstance(y_entity, list): + return [callback(e) for e in y_entity] + else: + return [callback(y_entity)] + + +def on_table_container(y_module: OrderedDict, + tbl_container: OrderedDict, + conf_mgmt: ConfigMgmt) -> dict: + """ Parse 'table' container, + 'table' container goes after 'top level' container + + Args: + y_module: reference to 'module' + tbl_container: reference to 'table' container + conf_mgmt: reference to ConfigMgmt class instance, + it have yJson object which contain all parsed YANG models + Returns: + element for self.yang_2_dict['tables'] + """ + y2d_elem = { + 'name': tbl_container.get('@name'), + 'description': get_description(tbl_container) + } + + # determine if 'table container' has a 'list' entity + if tbl_container.get('list') is None: + y2d_elem['static-objects'] = list() + + # 'object' container goes after the 'table' container + # 'object' container have 2 types - list (like sonic-flex_counter.yang) + # and NOT list (like sonic-device_metadata.yang) + y2d_elem['static-objects'] += list_handler(tbl_container.get('container'), + lambda e: on_object_entity(y_module, e, conf_mgmt, is_list=False)) + else: + y2d_elem['dynamic-objects'] = list() + + # 'container' can have more than 1 'list' entity + y2d_elem['dynamic-objects'] += list_handler(tbl_container.get('list'), + lambda e: on_object_entity(y_module, e, conf_mgmt, is_list=True)) + + # move 'keys' elements from 'attrs' to 'keys' + change_dyn_obj_struct(y2d_elem['dynamic-objects']) + + return y2d_elem + + +def on_object_entity(y_module: OrderedDict, + y_entity: OrderedDict, + conf_mgmt: ConfigMgmt, + is_list: bool) -> dict: + """ Parse a 'object' entity, it could be a 'container' or a 'list' + 'Object' entity represent OBJECT in Config DB schema: + { + "TABLE": { + "OBJECT": { + "attr": "value" + } + } + } + + Args: + y_module: reference to 'module' + y_entity: reference to 'object' entity + conf_mgmt: reference to ConfigMgmt class instance, + it have yJson object which contain all parsed YANG models + is_list: boolean flag to determine if a 'list' was passed + Returns: + element for y2d_elem['static-objects'] OR y2d_elem['dynamic-objects'] + """ + + if y_entity is None: + return {} + + obj_elem = { + 'name': y_entity.get('@name'), + 'description': get_description(y_entity), + 'attrs': list() + } + + if is_list: + obj_elem['keys'] = get_list_keys(y_entity) + + attrs_list = list() + # grouping_name is empty because 'grouping' is not used so far + attrs_list.extend(get_leafs(y_entity, grouping_name='')) + attrs_list.extend(get_leaf_lists(y_entity, grouping_name='')) + attrs_list.extend(get_choices(y_module, y_entity, conf_mgmt, grouping_name='')) + attrs_list.extend(get_uses(y_module, y_entity, conf_mgmt)) + + obj_elem['attrs'] = attrs_list + + return obj_elem + + +def on_uses(y_module: OrderedDict, + y_uses, + conf_mgmt: ConfigMgmt) -> list: + """ Parse a YANG 'uses' entities + 'uses' referring to 'grouping' YANG entity + + Args: + y_module: reference to 'module' + y_uses: reference to 'uses' + conf_mgmt: reference to ConfigMgmt class instance, + it have yJson object which contain all parsed YANG model + Returns: + element for obj_elem['attrs'], 'attrs' contain a parsed 'leafs' + """ + + ret_attrs = list() + y_grouping = get_all_grouping(y_module, y_uses, conf_mgmt) + # trim prefixes in order to the next checks + trim_uses_prefixes(y_uses) + + # TODO: 'refine' support + for group in y_grouping: + if isinstance(y_uses, list): + for use in y_uses: + if group.get('@name') == use.get('@name'): + ret_attrs.extend(get_leafs(group, group.get('@name'))) + ret_attrs.extend(get_leaf_lists(group, group.get('@name'))) + ret_attrs.extend(get_choices(y_module, group, conf_mgmt, group.get('@name'))) + else: + if group.get('@name') == y_uses.get('@name'): + ret_attrs.extend(get_leafs(group, group.get('@name'))) + ret_attrs.extend(get_leaf_lists(group, group.get('@name'))) + ret_attrs.extend(get_choices(y_module, group, conf_mgmt, group.get('@name'))) + + return ret_attrs + + +def on_choices(y_module: OrderedDict, + y_choices, + conf_mgmt: ConfigMgmt, + grouping_name: str) -> list: + """ Parse a YANG 'choice' entities + + Args: + y_module: reference to 'module' + y_choices: reference to 'choice' element + conf_mgmt: reference to ConfigMgmt class instance, + it have yJson object which contain all parsed YANG model + grouping_name: if YANG entity contain 'uses', this arg represent 'grouping' name + Returns: + element for obj_elem['attrs'], 'attrs' contain a parsed 'leafs' + """ + + ret_attrs = list() + + # the YANG model can have multiple 'choice' entities + # inside a 'container' or 'list' + if isinstance(y_choices, list): + for choice in y_choices: + attrs = on_choice_cases(y_module, choice.get('case'), + conf_mgmt, grouping_name) + ret_attrs.extend(attrs) + else: + ret_attrs = on_choice_cases(y_module, y_choices.get('case'), + conf_mgmt, grouping_name) + + return ret_attrs + + +def on_choice_cases(y_module: OrderedDict, + y_cases, + conf_mgmt: ConfigMgmt, + grouping_name: str) -> list: + """ Parse a single YANG 'case' entity from the 'choice' entity. + The 'case' element can has inside - 'leaf', 'leaf-list', 'uses' + + Args: + y_module: reference to 'module' + y_cases: reference to 'case' + conf_mgmt: reference to ConfigMgmt class instance, + it have yJson object which contain all + parsed YANG model + grouping_name: if YANG entity contain 'uses', + this argument represent 'grouping' name + Returns: + element for the obj_elem['attrs'], the 'attrs' + contain a parsed 'leafs' + """ + + ret_attrs = list() + + if isinstance(y_cases, list): + for case in y_cases: + ret_attrs.extend(get_leafs(case, grouping_name)) + ret_attrs.extend(get_leaf_lists(case, grouping_name)) + ret_attrs.extend(get_uses(y_module, case, conf_mgmt)) + else: + ret_attrs.extend(get_leafs(y_cases, grouping_name)) + ret_attrs.extend(get_leaf_lists(y_cases, grouping_name)) + ret_attrs.extend(get_uses(y_module, y_cases, conf_mgmt)) + + return ret_attrs + + +def on_leafs(y_leafs, + grouping_name: str, + is_leaf_list: bool) -> list: + """ Parse all the 'leaf' or 'leaf-list' elements + + Args: + y_leafs: reference to all 'leaf' elements + grouping_name: if YANG entity contain 'uses', + this argument represent the 'grouping' name + is_leaf_list: boolean to determine if a 'leaf-list' + was passed as 'y_leafs' argument + Returns: + list of parsed 'leaf' elements + """ + + ret_attrs = list() + # The YANG 'container' entity may have only 1 'leaf' + # element OR a list of 'leaf' elements + ret_attrs += list_handler(y_leafs, lambda e: on_leaf(e, grouping_name, is_leaf_list)) + + return ret_attrs + + +def on_leaf(leaf: OrderedDict, + grouping_name: str, + is_leaf_list: bool) -> dict: + """ Parse a single 'leaf' element + + Args: + leaf: reference to a 'leaf' entity + grouping_name: if YANG entity contain 'uses', + this argument represent 'grouping' name + is_leaf_list: boolean to determine if 'leaf-list' + was passed in 'y_leafs' argument + Returns: + parsed 'leaf' element + """ + + attr = {'name': leaf.get('@name'), + 'description': get_description(leaf), + 'is-leaf-list': is_leaf_list, + 'is-mandatory': get_mandatory(leaf), + 'group': grouping_name} + + return attr + + +# ----------------------GETERS------------------------- # + +def get_mandatory(y_leaf: OrderedDict) -> bool: + """ Parse the 'mandatory' statement for a 'leaf' + + Args: + y_leaf: reference to a 'leaf' entity + Returns: + 'leaf' 'mandatory' value + """ + + if y_leaf.get('mandatory') is not None: + return True + + return False + + +def get_description(y_entity: OrderedDict) -> str: + """ Parse the 'description' entity from any YANG element + + Args: + y_entity: reference to YANG 'container' OR 'list' OR 'leaf' ... + Returns: + text of the 'description' + """ + + if y_entity.get('description') is not None: + return y_entity.get('description').get('text') + else: + return '' + + +def get_leafs(y_entity: OrderedDict, + grouping_name: str) -> list: + """ Check if the YANG entity have 'leafs', if so call handler + + Args: + y_entity: reference YANG 'container' or 'list' + or 'choice' or 'uses' + grouping_name: if YANG entity contain 'uses', + this argument represent 'grouping' name + Returns: + list of parsed 'leaf' elements + """ + + if y_entity.get('leaf') is not None: + return on_leafs(y_entity.get('leaf'), grouping_name, is_leaf_list=False) + + return [] + + +def get_leaf_lists(y_entity: OrderedDict, + grouping_name: str) -> list: + """ Check if the YANG entity have 'leaf-list', if so call handler + + Args: + y_entity: reference YANG 'container' or 'list' + or 'choice' or 'uses' + grouping_name: if YANG entity contain 'uses', + this argument represent 'grouping' name + Returns: + list of parsed 'leaf-list' elements + """ + + if y_entity.get('leaf-list') is not None: + return on_leafs(y_entity.get('leaf-list'), grouping_name, is_leaf_list=True) + + return [] + + +def get_choices(y_module: OrderedDict, + y_entity: OrderedDict, + conf_mgmt: ConfigMgmt, + grouping_name: str) -> list: + """ Check if the YANG entity have 'choice', if so call handler + + Args: + y_module: reference to 'module' + y_entity: reference YANG 'container' or 'list' + or 'choice' or 'uses' + conf_mgmt: reference to ConfigMgmt class instance, + it have yJson object which contain all parsed YANG model + grouping_name: if YANG entity contain 'uses', + this argument represent 'grouping' name + Returns: + list of parsed elements inside 'choice' + """ + + if y_entity.get('choice') is not None: + return on_choices(y_module, y_entity.get('choice'), conf_mgmt, grouping_name) + + return [] + + +def get_uses(y_module: OrderedDict, + y_entity: OrderedDict, + conf_mgmt: ConfigMgmt) -> list: + """ Check if the YANG entity have 'uses', if so call handler + + Args: + y_module: reference to 'module' + y_entity: reference YANG 'container' or 'list' + or 'choice' or 'uses' + conf_mgmt: reference to ConfigMgmt class instance, + it have yJson object which contain all parsed YANG model + Returns: + list of parsed elements inside 'grouping' + that referenced by 'uses' + """ + + if y_entity.get('uses') is not None: + return on_uses(y_module, y_entity.get('uses'), conf_mgmt) + + return [] + + +def get_all_grouping(y_module: OrderedDict, + y_uses: OrderedDict, + conf_mgmt: ConfigMgmt) -> list: + """ Get all the 'grouping' entities that was referenced + by 'uses' in current YANG model + + Args: + y_module: reference to 'module' + y_entity: reference to 'uses' + conf_mgmt: reference to ConfigMgmt class instance, + it have yJson object which contain all parsed YANG model + Returns: + list of 'grouping' elements + """ + + ret_grouping = list() + # prefix_list needed to find what YANG model was imported + prefix_list = get_import_prefixes(y_uses) + + # in case if 'grouping' located in the same YANG model + local_grouping = y_module.get('grouping') + if local_grouping is not None: + if isinstance(local_grouping, list): + ret_grouping.extend(local_grouping) + else: + ret_grouping.append(local_grouping) + + # if prefix_list is NOT empty it means that 'grouping' + # was imported from another YANG model + if prefix_list != []: + for prefix in prefix_list: + y_import = y_module.get('import') + if isinstance(y_import, list): + for _import in y_import: + if _import.get('prefix').get('@value') == prefix: + ret_grouping.extend(get_grouping_from_another_yang_model(_import.get('@module'), conf_mgmt)) + else: + if y_import.get('prefix').get('@value') == prefix: + ret_grouping.extend(get_grouping_from_another_yang_model(y_import.get('@module'), conf_mgmt)) + + return ret_grouping + + +def get_grouping_from_another_yang_model(yang_model_name: str, + conf_mgmt) -> list: + """ Get the YANG 'grouping' entity + + Args: + yang_model_name - YANG model to search + conf_mgmt - reference to ConfigMgmt class instance, + it have yJson object which contain all parsed YANG models + + Returns: + list of 'grouping' entities + """ + + ret_grouping = list() + + for yang_model in conf_mgmt.sy.yJson: + if (yang_model.get('module').get('@name') == yang_model_name): + grouping = yang_model.get('module').get('grouping') + if isinstance(grouping, list): + ret_grouping.extend(grouping) + else: + ret_grouping.append(grouping) + + return ret_grouping + + +def get_import_prefixes(y_uses: OrderedDict) -> list: + """ Parse 'import prefix' of YANG 'uses' entity + Example: + { + uses stypes:endpoint; + } + 'stypes' - prefix of imported YANG module. + 'endpoint' - YANG 'grouping' entity name + + Args: + y_uses: refrence to YANG 'uses' + Returns: + list of parsed prefixes + """ + + ret_prefixes = list() + + if isinstance(y_uses, list): + for use in y_uses: + prefix = use.get('@name').split(':')[0] + if prefix != use.get('@name'): + ret_prefixes.append(prefix) + else: + prefix = y_uses.get('@name').split(':')[0] + if prefix != y_uses.get('@name'): + ret_prefixes.append(prefix) + + return ret_prefixes + + +def trim_uses_prefixes(y_uses) -> list: + """ Trim prefixes from the 'uses' YANG entities. + If the YANG 'grouping' was imported from another + YANG file, it use the 'prefix' before the 'grouping' name: + { + uses sgrop:endpoint; + } + Where 'sgrop' = 'prefix'; 'endpoint' = 'grouping' name. + + Args: + y_uses: reference to 'uses' + + Returns: + list of 'uses' without 'prefixes' + """ + + prefixes = get_import_prefixes(y_uses) + + for prefix in prefixes: + if isinstance(y_uses, list): + for use in y_uses: + if prefix in use.get('@name'): + use['@name'] = use.get('@name').split(':')[1] + else: + if prefix in y_uses.get('@name'): + y_uses['@name'] = y_uses.get('@name').split(':')[1] + + +def get_list_keys(y_list: OrderedDict) -> list: + """ Parse YANG the 'key' entity. + If YANG model has a 'list' entity, inside the 'list' + there is 'key' entity. The 'key' - whitespace + separeted list of 'leafs' + + Args: + y_list: reference to the 'list' + Returns: + list of parsed keys + """ + + ret_list = list() + + keys = y_list.get('key').get('@value').split() + for k in keys: + key = {'name': k} + ret_list.append(key) + + return ret_list + + +def change_dyn_obj_struct(dynamic_objects: list): + """ Rearrange self.yang_2_dict['dynamic_objects'] structure. + If YANG model have a 'list' entity - inside the 'list' + it has 'key' entity. The 'key' entity it is whitespace + separeted list of 'leafs', those 'leafs' was parsed by + 'on_leaf()' function and placed under 'attrs' in + self.yang_2_dict['dynamic_objects'] need to move 'leafs' + from 'attrs' and put them into 'keys' section of + self.yang_2_dict['dynamic_objects'] + + Args: + dynamic_objects: reference to self.yang_2_dict['dynamic_objects'] + """ + + for obj in dynamic_objects: + for key in obj.get('keys'): + for attr in obj.get('attrs'): + if key.get('name') == attr.get('name'): + key['description'] = attr.get('description') + obj['attrs'].remove(attr) + break + diff --git a/tests/cli_autogen_input/assert_dictionaries.py b/tests/cli_autogen_input/assert_dictionaries.py new file mode 100644 index 0000000000..263e48366d --- /dev/null +++ b/tests/cli_autogen_input/assert_dictionaries.py @@ -0,0 +1,625 @@ +""" +Module holding correct dictionaries for test YANG models +""" + +one_table_container = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "static-objects":[ + { + } + ] + } + ] +} + +two_table_containers = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "static-objects":[ + { + + } + ] + }, + { + "description":"TABLE_2 description", + "name":"TABLE_2", + "static-objects":[ + { + + } + ] + } + ] +} + +one_object_container = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "static-objects":[ + { + "name":"OBJECT_1", + "description":"OBJECT_1 description", + "attrs":[ + ] + } + ] + } + ] +} + +two_object_containers = { + "tables":[ + { + "description":"FIRST_TABLE description", + "name":"TABLE_1", + "static-objects":[ + { + "name":"OBJECT_1", + "description":"OBJECT_1 description", + "attrs":[ + ] + }, + { + "name":"OBJECT_2", + "description":"OBJECT_2 description", + "attrs":[ + ] + } + ] + } + ] +} + +one_list = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "dynamic-objects":[ + { + "name":"TABLE_1_LIST", + "description":"TABLE_1_LIST description", + "keys":[ + { + "name": "key_name", + "description": "", + } + ], + "attrs":[ + ] + } + ] + } + ] +} + +two_lists = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "dynamic-objects":[ + { + "name":"TABLE_1_LIST_1", + "description":"TABLE_1_LIST_1 description", + "keys":[ + { + "name": "key_name1", + "description": "", + } + ], + "attrs":[ + ] + }, + { + "name":"TABLE_1_LIST_2", + "description":"TABLE_1_LIST_2 description", + "keys":[ + { + "name": "key_name2", + "description": "", + } + ], + "attrs":[ + ] + } + ] + } + ] +} + +static_object_complex_1 = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "static-objects":[ + { + "name":"OBJECT_1", + "description":"OBJECT_1 description", + "attrs":[ + { + "name":"OBJ_1_LEAF_1", + "description": "OBJ_1_LEAF_1 description", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_LEAF_LIST_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_1_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_1_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + } + ] + } + ] + } + ] +} + +static_object_complex_2 = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "static-objects":[ + { + "name":"OBJECT_1", + "description":"OBJECT_1 description", + "attrs":[ + { + "name":"OBJ_1_LEAF_1", + "description": "OBJ_1_LEAF_1 description", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_LEAF_2", + "description": "OBJ_1_LEAF_2 description", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_LEAF_LIST_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": '', + }, + { + "name":"OBJ_1_LEAF_LIST_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_1_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_1_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_2_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_2_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + ] + } + ] + } + ] +} + +dynamic_object_complex_1 = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "dynamic-objects":[ + { + "name":"OBJECT_1_LIST", + "description":"OBJECT_1_LIST description", + "attrs":[ + { + "name":"OBJ_1_LEAF_1", + "description": "OBJ_1_LEAF_1 description", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_LEAF_LIST_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_1_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_1_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + } + ], + "keys":[ + { + "name": "KEY_LEAF_1", + "description": "KEY_LEAF_1 description", + } + ] + } + ] + } + ] +} + +dynamic_object_complex_2 = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "dynamic-objects":[ + { + "name":"OBJECT_1_LIST", + "description":"OBJECT_1_LIST description", + "attrs":[ + { + "name":"OBJ_1_LEAF_1", + "description": "OBJ_1_LEAF_1 description", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_LEAF_2", + "description": "OBJ_1_LEAF_2 description", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_LEAF_LIST_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": '', + }, + { + "name":"OBJ_1_LEAF_LIST_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_1_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_1_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_2_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"OBJ_1_CHOICE_2_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + } + ], + "keys":[ + { + "name": "KEY_LEAF_1", + "description": "KEY_LEAF_1 description", + }, + { + "name": "KEY_LEAF_2", + "description": "KEY_LEAF_2 description", + } + ] + } + ] + } + ] +} + +choice_complex = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "static-objects":[ + { + "name":"OBJECT_1", + "description":"OBJECT_1 description", + "attrs":[ + { + "name":"LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"LEAF_LIST_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": '', + }, + { + "name":"GR_1_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": "GR_1", + }, + { + "name":"GR_1_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": 'GR_1', + }, + { + "name":"LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"LEAF_3", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": '', + }, + { + "name":"LEAF_LIST_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": '', + }, + { + "name":"LEAF_LIST_3", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": '', + }, + { + "name":"GR_5_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": 'GR_5', + }, + { + "name":"GR_5_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": 'GR_5', + }, + { + "name":"GR_2_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": 'GR_2', + }, + { + "name":"GR_2_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": 'GR_2', + }, + { + "name":"GR_3_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": 'GR_3', + }, + { + "name":"GR_3_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": 'GR_3', + }, + ] + } + ] + } + ] +} + +grouping_complex = { + "tables":[ + { + "description":"TABLE_1 description", + "name":"TABLE_1", + "static-objects":[ + { + "name":"OBJECT_1", + "description":"OBJECT_1 description", + "attrs":[ + { + "name":"GR_1_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": "GR_1", + }, + { + "name":"GR_1_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": 'GR_1', + }, + ] + }, + { + "name":"OBJECT_2", + "description":"OBJECT_2 description", + "attrs":[ + { + "name":"GR_5_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": "GR_5", + }, + { + "name":"GR_5_LEAF_LIST_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": "GR_5", + }, + { + "name":"GR_6_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": "GR_6", + }, + { + "name":"GR_6_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": "GR_6", + }, + { + "name":"GR_6_CASE_1_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": "GR_6", + }, + { + "name":"GR_6_CASE_1_LEAF_LIST_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": "GR_6", + }, + { + "name":"GR_6_CASE_2_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": "GR_6", + }, + { + "name":"GR_6_CASE_2_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": "GR_6", + }, + { + "name":"GR_6_CASE_2_LEAF_LIST_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": "GR_6", + }, + { + "name":"GR_6_CASE_2_LEAF_LIST_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": True, + "group": "GR_6", + }, + { + "name":"GR_4_LEAF_1", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": "GR_4", + }, + { + "name":"GR_4_LEAF_2", + "description": "", + "is-mandatory": False, + "is-leaf-list": False, + "group": "GR_4", + }, + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/cli_autogen_input/config_db.json b/tests/cli_autogen_input/config_db.json new file mode 100644 index 0000000000..5473d6158a --- /dev/null +++ b/tests/cli_autogen_input/config_db.json @@ -0,0 +1,544 @@ +{ + "COPP_GROUP": { + "default": { + "cbs": "600", + "cir": "600", + "meter_type": "packets", + "mode": "sr_tcm", + "queue": "0", + "red_action": "drop" + }, + "queue1_group1": { + "cbs": "6000", + "cir": "6000", + "meter_type": "packets", + "mode": "sr_tcm", + "queue": "1", + "red_action": "drop", + "trap_action": "trap", + "trap_priority": "1" + }, + "queue1_group2": { + "cbs": "600", + "cir": "600", + "meter_type": "packets", + "mode": "sr_tcm", + "queue": "1", + "red_action": "drop", + "trap_action": "trap", + "trap_priority": "1" + }, + "queue2_group1": { + "cbs": "1000", + "cir": "1000", + "genetlink_mcgrp_name": "packets", + "genetlink_name": "psample", + "meter_type": "packets", + "mode": "sr_tcm", + "queue": "2", + "red_action": "drop", + "trap_action": "trap", + "trap_priority": "1" + }, + "queue4_group1": { + "cbs": "600", + "cir": "600", + "color": "blind", + "meter_type": "packets", + "mode": "sr_tcm", + "queue": "4", + "red_action": "drop", + "trap_action": "trap", + "trap_priority": "4" + }, + "queue4_group2": { + "cbs": "600", + "cir": "600", + "meter_type": "packets", + "mode": "sr_tcm", + "queue": "4", + "red_action": "drop", + "trap_action": "copy", + "trap_priority": "4" + }, + "queue4_group3": { + "cbs": "600", + "cir": "600", + "color": "blind", + "meter_type": "packets", + "mode": "sr_tcm", + "queue": "4", + "red_action": "drop", + "trap_action": "trap", + "trap_priority": "4" + } + }, + "COPP_TRAP": { + "arp": { + "trap_group": "queue4_group2", + "trap_ids": "arp_req,arp_resp,neigh_discovery" + }, + "bgp": { + "trap_group": "queue4_group1", + "trap_ids": "bgp,bgpv6" + }, + "dhcp": { + "trap_group": "queue4_group3", + "trap_ids": "dhcp,dhcpv6" + }, + "ip2me": { + "trap_group": "queue1_group1", + "trap_ids": "ip2me" + }, + "lacp": { + "trap_group": "queue4_group1", + "trap_ids": "lacp" + }, + "lldp": { + "trap_group": "queue4_group3", + "trap_ids": "lldp" + }, + "nat": { + "trap_group": "queue1_group2", + "trap_ids": "src_nat_miss,dest_nat_miss" + }, + "sflow": { + "trap_group": "queue2_group1", + "trap_ids": "sample_packet" + }, + "ssh": { + "trap_group": "queue4_group2", + "trap_ids": "ssh" + }, + "udld": { + "trap_group": "queue4_group3", + "trap_ids": "udld" + } + }, + "CRM": { + "Config": { + "acl_counter_high_threshold": "85", + "acl_counter_low_threshold": "70", + "acl_counter_threshold_type": "percentage", + "acl_entry_high_threshold": "85", + "acl_entry_low_threshold": "70", + "acl_entry_threshold_type": "percentage", + "acl_group_high_threshold": "85", + "acl_group_low_threshold": "70", + "acl_group_threshold_type": "percentage", + "acl_table_high_threshold": "85", + "acl_table_low_threshold": "70", + "acl_table_threshold_type": "percentage", + "dnat_entry_high_threshold": "85", + "dnat_entry_low_threshold": "70", + "dnat_entry_threshold_type": "percentage", + "fdb_entry_high_threshold": "85", + "fdb_entry_low_threshold": "70", + "fdb_entry_threshold_type": "percentage", + "ipmc_entry_high_threshold": "85", + "ipmc_entry_low_threshold": "70", + "ipmc_entry_threshold_type": "percentage", + "ipv4_neighbor_high_threshold": "85", + "ipv4_neighbor_low_threshold": "70", + "ipv4_neighbor_threshold_type": "percentage", + "ipv4_nexthop_high_threshold": "85", + "ipv4_nexthop_low_threshold": "70", + "ipv4_nexthop_threshold_type": "percentage", + "ipv4_route_high_threshold": "85", + "ipv4_route_low_threshold": "70", + "ipv4_route_threshold_type": "percentage", + "ipv6_neighbor_high_threshold": "85", + "ipv6_neighbor_low_threshold": "70", + "ipv6_neighbor_threshold_type": "percentage", + "ipv6_nexthop_high_threshold": "85", + "ipv6_nexthop_low_threshold": "70", + "ipv6_nexthop_threshold_type": "percentage", + "ipv6_route_high_threshold": "85", + "ipv6_route_low_threshold": "70", + "ipv6_route_threshold_type": "percentage", + "nexthop_group_high_threshold": "85", + "nexthop_group_low_threshold": "70", + "nexthop_group_member_high_threshold": "85", + "nexthop_group_member_low_threshold": "70", + "nexthop_group_member_threshold_type": "percentage", + "nexthop_group_threshold_type": "percentage", + "polling_interval": "300", + "snat_entry_high_threshold": "85", + "snat_entry_low_threshold": "70", + "snat_entry_threshold_type": "percentage" + } + }, + "DEVICE_METADATA": { + "localhost": { + "buffer_model": "traditional", + "default_bgp_status": "up", + "default_pfcwd_status": "disable", + "hostname": "r-bulldog-02", + "hwsku": "ACS-MSN2100", + "mac": "98:03:9b:f8:e7:c0", + "platform": "x86_64-mlnx_msn2100-r0", + "type": "ToRRouter" + } + }, + "FEATURE": { + "bgp": { + "auto_restart": "enabled", + "has_global_scope": "False", + "has_per_asic_scope": "True", + "has_timer": "False", + "high_mem_alert": "disabled", + "state": "enabled" + }, + "database": { + "auto_restart": "disabled", + "has_global_scope": "True", + "has_per_asic_scope": "True", + "has_timer": "False", + "high_mem_alert": "disabled", + "state": "enabled" + }, + "dhcp_relay": { + "auto_restart": "enabled", + "has_global_scope": "True", + "has_per_asic_scope": "False", + "has_timer": "False", + "high_mem_alert": "disabled", + "state": "enabled" + }, + "lldp": { + "auto_restart": "enabled", + "has_global_scope": "True", + "has_per_asic_scope": "True", + "has_timer": "False", + "high_mem_alert": "disabled", + "state": "enabled", + "status": "enabled" + }, + "mgmt-framework": { + "auto_restart": "enabled", + "has_global_scope": "True", + "has_per_asic_scope": "False", + "has_timer": "True", + "high_mem_alert": "disabled", + "state": "enabled" + }, + "nat": { + "auto_restart": "enabled", + "has_global_scope": "True", + "has_per_asic_scope": "False", + "has_timer": "False", + "high_mem_alert": "disabled", + "state": "disabled" + }, + "pmon": { + "auto_restart": "enabled", + "has_global_scope": "True", + "has_per_asic_scope": "False", + "has_timer": "False", + "high_mem_alert": "disabled", + "state": "enabled" + }, + "radv": { + "auto_restart": "enabled", + "has_global_scope": "True", + "has_per_asic_scope": "False", + "has_timer": "False", + "high_mem_alert": "disabled", + "state": "enabled" + }, + "sflow": { + "auto_restart": "enabled", + "has_global_scope": "True", + "has_per_asic_scope": "False", + "has_timer": "False", + "high_mem_alert": "disabled", + "state": "disabled" + }, + "snmp": { + "auto_restart": "enabled", + "has_global_scope": "True", + "has_per_asic_scope": "False", + "has_timer": "True", + "high_mem_alert": "disabled", + "state": "enabled" + }, + "swss": { + "auto_restart": "enabled", + "has_global_scope": "False", + "has_per_asic_scope": "True", + "has_timer": "False", + "high_mem_alert": "disabled", + "state": "enabled" + }, + "syncd": { + "auto_restart": "enabled", + "has_global_scope": "False", + "has_per_asic_scope": "True", + "has_timer": "False", + "high_mem_alert": "disabled", + "state": "enabled" + }, + "teamd": { + "auto_restart": "enabled", + "has_global_scope": "False", + "has_per_asic_scope": "True", + "has_timer": "False", + "high_mem_alert": "disabled", + "state": "enabled" + }, + "telemetry": { + "auto_restart": "enabled", + "has_global_scope": "True", + "has_per_asic_scope": "False", + "has_timer": "True", + "high_mem_alert": "disabled", + "state": "enabled" + }, + "what-just-happened": { + "auto_restart": "disabled", + "has_timer": "True", + "high_mem_alert": "disabled", + "state": "enabled" + } + }, + "FLEX_COUNTER_TABLE": { + "BUFFER_POOL_WATERMARK": { + "FLEX_COUNTER_STATUS": "enable" + }, + "PFCWD": { + "FLEX_COUNTER_STATUS": "enable" + }, + "PG_WATERMARK": { + "FLEX_COUNTER_STATUS": "enable" + }, + "PORT": { + "FLEX_COUNTER_STATUS": "enable" + }, + "PORT_BUFFER_DROP": { + "FLEX_COUNTER_STATUS": "enable" + }, + "QUEUE": { + "FLEX_COUNTER_STATUS": "enable" + }, + "QUEUE_WATERMARK": { + "FLEX_COUNTER_STATUS": "enable" + }, + "RIF": { + "FLEX_COUNTER_STATUS": "enable" + } + }, + "KDUMP": { + "config": { + "enabled": "false", + "num_dumps": "3", + "memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" + } + }, + "MGMT_INTERFACE": { + "eth0|10.210.25.44/22": { + "gwaddr": "10.210.24.1" + } + }, + "PORT": { + "Ethernet0": { + "admin_status": "up", + "alias": "etp1", + "index": "1", + "lanes": "0,1,2,3", + "speed": "100000" + }, + "Ethernet12": { + "admin_status": "up", + "alias": "etp4a", + "index": "4", + "lanes": "12,13", + "speed": "50000" + }, + "Ethernet14": { + "admin_status": "up", + "alias": "etp4b", + "index": "4", + "lanes": "14,15", + "speed": "50000" + }, + "Ethernet16": { + "admin_status": "up", + "alias": "etp5a", + "index": "5", + "lanes": "16,17", + "speed": "50000" + }, + "Ethernet18": { + "admin_status": "up", + "alias": "etp5b", + "index": "5", + "lanes": "18,19", + "speed": "50000" + }, + "Ethernet20": { + "admin_status": "up", + "alias": "etp6a", + "index": "6", + "lanes": "20", + "speed": "25000" + }, + "Ethernet21": { + "admin_status": "up", + "alias": "etp6b", + "index": "6", + "lanes": "21", + "speed": "25000" + }, + "Ethernet22": { + "admin_status": "up", + "alias": "etp6c", + "index": "6", + "lanes": "22", + "speed": "25000" + }, + "Ethernet23": { + "admin_status": "up", + "alias": "etp6d", + "index": "6", + "lanes": "23", + "speed": "25000" + }, + "Ethernet24": { + "admin_status": "up", + "alias": "etp7a", + "index": "7", + "lanes": "24", + "speed": "25000" + }, + "Ethernet25": { + "admin_status": "up", + "alias": "etp7b", + "index": "7", + "lanes": "25", + "speed": "25000" + }, + "Ethernet26": { + "admin_status": "up", + "alias": "etp7c", + "index": "7", + "lanes": "26", + "speed": "25000" + }, + "Ethernet27": { + "admin_status": "up", + "alias": "etp7d", + "index": "7", + "lanes": "27", + "speed": "25000" + }, + "Ethernet28": { + "admin_status": "up", + "alias": "etp8", + "index": "8", + "lanes": "28,29,30,31", + "speed": "100000" + }, + "Ethernet32": { + "admin_status": "up", + "alias": "etp9", + "index": "9", + "lanes": "32,33,34,35", + "speed": "100000" + }, + "Ethernet36": { + "admin_status": "up", + "alias": "etp10", + "index": "10", + "lanes": "36,37,38,39", + "speed": "100000" + }, + "Ethernet4": { + "admin_status": "up", + "alias": "etp2", + "index": "2", + "lanes": "4,5,6,7", + "speed": "100000" + }, + "Ethernet40": { + "admin_status": "up", + "alias": "etp11", + "index": "11", + "lanes": "40,41,42,43", + "speed": "100000" + }, + "Ethernet44": { + "admin_status": "up", + "alias": "etp12", + "index": "12", + "lanes": "44,45,46,47", + "speed": "100000" + }, + "Ethernet48": { + "admin_status": "up", + "alias": "etp13", + "index": "13", + "lanes": "48,49,50,51", + "speed": "100000" + }, + "Ethernet52": { + "admin_status": "up", + "alias": "etp14", + "index": "14", + "lanes": "52,53,54,55", + "speed": "100000" + }, + "Ethernet56": { + "admin_status": "up", + "alias": "etp15", + "index": "15", + "lanes": "56,57,58,59", + "speed": "100000" + }, + "Ethernet60": { + "admin_status": "up", + "alias": "etp16", + "index": "16", + "lanes": "60,61,62,63", + "speed": "100000" + }, + "Ethernet8": { + "admin_status": "up", + "alias": "etp3", + "index": "3", + "lanes": "8,9,10,11", + "speed": "100000" + } + }, + "SNMP": { + "LOCATION": { + "Location": "public" + } + }, + "SNMP_COMMUNITY": { + "public": { + "TYPE": "RO" + } + }, + "VERSIONS": { + "DATABASE": { + "VERSION": "version_2_0_0" + } + }, + "WJH": { + "global": { + "mode": "debug", + "nice_level": "1", + "pci_bandwidth": "50" + } + }, + "WJH_CHANNEL": { + "forwarding": { + "drop_category_list": "L2,L3,Tunnel", + "type": "raw_and_aggregated" + }, + "layer-1": { + "drop_category_list": "L1", + "type": "raw_and_aggregated" + } + } +} diff --git a/tests/cli_autogen_input/sonic-1-list.yang b/tests/cli_autogen_input/sonic-1-list.yang new file mode 100644 index 0000000000..79a6529b3d --- /dev/null +++ b/tests/cli_autogen_input/sonic-1-list.yang @@ -0,0 +1,29 @@ +module sonic-1-list { + + yang-version 1.1; + + namespace "http://github.com/Azure/s-1-list"; + prefix s-1-list; + + container sonic-1-list { + /* sonic-1-list - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "TABLE_1 description"; + + list TABLE_1_LIST { + /* TABLE_1 - object container */ + + description "TABLE_1_LIST description"; + + key "key_name"; + + leaf key_name { + type string; + } + } + } + } +} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-1-object-container.yang b/tests/cli_autogen_input/sonic-1-object-container.yang new file mode 100644 index 0000000000..e28ef7f90a --- /dev/null +++ b/tests/cli_autogen_input/sonic-1-object-container.yang @@ -0,0 +1,23 @@ +module sonic-1-object-container { + + yang-version 1.1; + + namespace "http://github.com/Azure/s-1-object"; + prefix s-1-object; + + container sonic-1-object-container { + /* sonic-1-object-container - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "TABLE_1 description"; + + container OBJECT_1 { + /* OBJECT_1 - object container */ + + description "OBJECT_1 description"; + } + } + } +} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-1-table-container.yang b/tests/cli_autogen_input/sonic-1-table-container.yang new file mode 100644 index 0000000000..58e7293c0d --- /dev/null +++ b/tests/cli_autogen_input/sonic-1-table-container.yang @@ -0,0 +1,17 @@ +module sonic-1-table-container { + + yang-version 1.1; + + namespace "http://github.com/Azure/s-1-table"; + prefix s-1-table; + + container sonic-1-table-container { + /* sonic-1-table-container - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "TABLE_1 description"; + } + } +} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-2-lists.yang b/tests/cli_autogen_input/sonic-2-lists.yang new file mode 100644 index 0000000000..b20200415b --- /dev/null +++ b/tests/cli_autogen_input/sonic-2-lists.yang @@ -0,0 +1,42 @@ +module sonic-2-lists { + + yang-version 1.1; + + namespace "http://github.com/Azure/s-2-lists"; + prefix s-2-lists; + + container sonic-2-lists { + /* sonic-2-lists - top level container */ + + container TABLE_1 { + /* TALBE_1 - table container */ + + + description "TABLE_1 description"; + + list TABLE_1_LIST_1 { + /* TALBE_1_LIST_1 - object container */ + + description "TABLE_1_LIST_1 description"; + + key "key_name1"; + + leaf key_name1 { + type string; + } + } + + list TABLE_1_LIST_2 { + /* TALBE_1_LIST_2 - object container */ + + description "TABLE_1_LIST_2 description"; + + key "key_name2"; + + leaf key_name2 { + type string; + } + } + } + } +} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-2-object-containers.yang b/tests/cli_autogen_input/sonic-2-object-containers.yang new file mode 100644 index 0000000000..249faf4c89 --- /dev/null +++ b/tests/cli_autogen_input/sonic-2-object-containers.yang @@ -0,0 +1,29 @@ +module sonic-2-object-containers { + + yang-version 1.1; + + namespace "http://github.com/Azure/s-2-object"; + prefix s-2-object; + + container sonic-2-object-containers { + /* sonic-2-object-containers - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "FIRST_TABLE description"; + + container OBJECT_1 { + /* OBJECT_1 - object container */ + + description "OBJECT_1 description"; + } + + container OBJECT_2 { + /* OBJECT_2 - object container */ + + description "OBJECT_2 description"; + } + } + } +} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-2-table-containers.yang b/tests/cli_autogen_input/sonic-2-table-containers.yang new file mode 100644 index 0000000000..393512a313 --- /dev/null +++ b/tests/cli_autogen_input/sonic-2-table-containers.yang @@ -0,0 +1,23 @@ +module sonic-2-table-containers { + + yang-version 1.1; + + namespace "http://github.com/Azure/s-2-table"; + prefix s-2-table; + + container sonic-2-table-containers { + /* sonic-2-table-containers - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "TABLE_1 description"; + } + + container TABLE_2 { + /* TABLE_2 - table container */ + + description "TABLE_2 description"; + } + } +} diff --git a/tests/cli_autogen_input/sonic-choice-complex.yang b/tests/cli_autogen_input/sonic-choice-complex.yang new file mode 100644 index 0000000000..7d6a66d89f --- /dev/null +++ b/tests/cli_autogen_input/sonic-choice-complex.yang @@ -0,0 +1,91 @@ +module sonic-choice-complex { + + yang-version 1.1; + + namespace "http://github.com/Azure/choice-complex"; + prefix choice-complex; + + import sonic-grouping-1 { + prefix sgroup1; + } + + import sonic-grouping-2 { + prefix sgroup2; + } + + grouping GR_5 { + leaf GR_5_LEAF_1 { + type string; + } + + leaf GR_5_LEAF_2 { + type string; + } + } + + grouping GR_6 { + leaf GR_6_LEAF_1 { + type string; + } + + leaf GR_6_LEAF_2 { + type string; + } + } + + container sonic-choice-complex { + /* sonic-choice-complex - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "TABLE_1 description"; + + container OBJECT_1 { + /* OBJECT_1 - object container, it have + * 1 choice, which have 2 cases. + * first case have: 1 leaf, 1 leaf-list, 1 uses + * second case have: 2 leafs, 2 leaf-lists, 2 uses + */ + + description "OBJECT_1 description"; + + choice CHOICE_1 { + case CHOICE_1_CASE_1 { + leaf LEAF_1 { + type uint16; + } + + leaf-list LEAF_LIST_1 { + type string; + } + + uses sgroup1:GR_1; + } + + case CHOICE_1_CASE_2 { + leaf LEAF_2 { + type string; + } + + leaf LEAF_3 { + type string; + } + + leaf-list LEAF_LIST_2 { + type string; + } + + leaf-list LEAF_LIST_3 { + type string; + } + + uses GR_5; + uses sgroup1:GR_2; + uses sgroup2:GR_3; + } + } + } + } + } +} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-dynamic-object-complex-1.yang b/tests/cli_autogen_input/sonic-dynamic-object-complex-1.yang new file mode 100644 index 0000000000..9beb98549d --- /dev/null +++ b/tests/cli_autogen_input/sonic-dynamic-object-complex-1.yang @@ -0,0 +1,57 @@ +module sonic-dynamic-object-complex-1 { + + yang-version 1.1; + + namespace "http://github.com/Azure/dynamic-complex-1"; + prefix dynamic-complex-1; + + container sonic-dynamic-object-complex-1 { + /* sonic-dynamic-object-complex-1 - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "TABLE_1 description"; + + list OBJECT_1_LIST { + /* OBJECT_1_LIST - dynamic object container, it have: + * 1 key, + * 1 leaf, + * 1 leaf-list + * 1 choice + */ + + description "OBJECT_1_LIST description"; + + key "KEY_LEAF_1"; + + leaf KEY_LEAF_1 { + description "KEY_LEAF_1 description"; + type string; + } + + leaf OBJ_1_LEAF_1 { + description "OBJ_1_LEAF_1 description"; + type string; + } + + leaf-list OBJ_1_LEAF_LIST_1 { + type string; + } + + choice OBJ_1_CHOICE_1 { + case OBJ_1_CHOICE_1_CASE_1 { + leaf OBJ_1_CHOICE_1_LEAF_1 { + type uint16; + } + } + case OBJ_1_CHOICE_1_CASE_2 { + leaf OBJ_1_CHOICE_1_LEAF_2 { + type string; + } + } + } + } + } + } +} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-dynamic-object-complex-2.yang b/tests/cli_autogen_input/sonic-dynamic-object-complex-2.yang new file mode 100644 index 0000000000..00e25c8135 --- /dev/null +++ b/tests/cli_autogen_input/sonic-dynamic-object-complex-2.yang @@ -0,0 +1,84 @@ +module sonic-dynamic-object-complex-2 { + + yang-version 1.1; + + namespace "http://github.com/Azure/dynamic-complex-2"; + prefix dynamic-complex-2; + + container sonic-dynamic-object-complex-2 { + /* sonic-dynamic-object-complex-2 - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "TABLE_1 description"; + + list OBJECT_1_LIST { + /* OBJECT_1_LIST - dynamic object container, it have: + * 2 keys + * 2 leaf, + * 2 leaf-list + * 2 choice + */ + + description "OBJECT_1_LIST description"; + + key "KEY_LEAF_1 KEY_LEAF_2"; + + leaf KEY_LEAF_1 { + description "KEY_LEAF_1 description"; + type string; + } + + leaf KEY_LEAF_2 { + description "KEY_LEAF_2 description"; + type string; + } + + leaf OBJ_1_LEAF_1 { + description "OBJ_1_LEAF_1 description"; + type string; + } + + leaf OBJ_1_LEAF_2 { + description "OBJ_1_LEAF_2 description"; + type string; + } + + leaf-list OBJ_1_LEAF_LIST_1 { + type string; + } + + leaf-list OBJ_1_LEAF_LIST_2 { + type string; + } + + choice OBJ_1_CHOICE_1 { + case OBJ_1_CHOICE_1_CASE_1 { + leaf OBJ_1_CHOICE_1_LEAF_1 { + type uint16; + } + } + case OBJ_1_CHOICE_1_CASE_2 { + leaf OBJ_1_CHOICE_1_LEAF_2 { + type string; + } + } + } + + choice OBJ_1_CHOICE_2 { + case OBJ_1_CHOICE_2_CASE_1 { + leaf OBJ_1_CHOICE_2_LEAF_1 { + type uint16; + } + } + case OBJ_1_CHOICE_2_CASE_2 { + leaf OBJ_1_CHOICE_2_LEAF_2 { + type string; + } + } + } + } + } + } +} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-grouping-1.yang b/tests/cli_autogen_input/sonic-grouping-1.yang new file mode 100644 index 0000000000..831c3a4ad8 --- /dev/null +++ b/tests/cli_autogen_input/sonic-grouping-1.yang @@ -0,0 +1,25 @@ +module sonic-grouping-1{ + + yang-version 1.1; + + namespace "http://github.com/Azure/s-grouping-1"; + prefix s-grouping-1; + + grouping GR_1 { + leaf GR_1_LEAF_1 { + type string; + } + leaf GR_1_LEAF_2 { + type string; + } + } + + grouping GR_2 { + leaf GR_2_LEAF_1 { + type string; + } + leaf GR_2_LEAF_2 { + type string; + } + } +} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-grouping-2.yang b/tests/cli_autogen_input/sonic-grouping-2.yang new file mode 100644 index 0000000000..bfaa13db15 --- /dev/null +++ b/tests/cli_autogen_input/sonic-grouping-2.yang @@ -0,0 +1,25 @@ +module sonic-grouping-2 { + + yang-version 1.1; + + namespace "http://github.com/Azure/s-grouping-2"; + prefix s-grouping-2; + + grouping GR_3 { + leaf GR_3_LEAF_1 { + type string; + } + leaf GR_3_LEAF_2 { + type string; + } + } + + grouping GR_4 { + leaf GR_4_LEAF_1 { + type string; + } + leaf GR_4_LEAF_2 { + type string; + } + } +} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-grouping-complex.yang b/tests/cli_autogen_input/sonic-grouping-complex.yang new file mode 100644 index 0000000000..d6ed68563a --- /dev/null +++ b/tests/cli_autogen_input/sonic-grouping-complex.yang @@ -0,0 +1,96 @@ +module sonic-grouping-complex { + + yang-version 1.1; + + namespace "http://github.com/Azure/grouping-complex"; + prefix grouping-complex; + + import sonic-grouping-1 { + prefix sgroup1; + } + + import sonic-grouping-2 { + prefix sgroup2; + } + + grouping GR_5 { + leaf GR_5_LEAF_1 { + type string; + } + + leaf-list GR_5_LEAF_LIST_1 { + type string; + } + } + + grouping GR_6 { + leaf GR_6_LEAF_1 { + type string; + } + + leaf GR_6_LEAF_2 { + type string; + } + + choice GR_6_CHOICE_1 { + case CHOICE_1_CASE_1 { + leaf GR_6_CASE_1_LEAF_1 { + type uint16; + } + + leaf-list GR_6_CASE_1_LEAF_LIST_1 { + type string; + } + } + + case CHOICE_1_CASE_2 { + leaf GR_6_CASE_2_LEAF_1 { + type uint16; + } + + leaf GR_6_CASE_2_LEAF_2 { + type uint16; + } + + leaf-list GR_6_CASE_2_LEAF_LIST_1 { + type string; + } + + leaf-list GR_6_CASE_2_LEAF_LIST_2 { + type string; + } + } + } + } + + container sonic-grouping-complex { + /* sonic-grouping-complex - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "TABLE_1 description"; + + container OBJECT_1 { + /* OBJECT_1 - object container, it have + * 1 choice, which have 2 cases. + * first case have: 1 leaf, 1 leaf-list, 1 uses + * second case have: 2 leafs, 2 leaf-lists, 2 uses + */ + + description "OBJECT_1 description"; + + uses sgroup1:GR_1; + } + + container OBJECT_2 { + + description "OBJECT_2 description"; + + uses GR_5; + uses GR_6; + uses sgroup2:GR_4; + } + } + } +} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-static-object-complex-1.yang b/tests/cli_autogen_input/sonic-static-object-complex-1.yang new file mode 100644 index 0000000000..a7dfee86ab --- /dev/null +++ b/tests/cli_autogen_input/sonic-static-object-complex-1.yang @@ -0,0 +1,49 @@ +module sonic-static-object-complex-1 { + + yang-version 1.1; + + namespace "http://github.com/Azure/static-complex-1"; + prefix static-complex-1; + + container sonic-static-object-complex-1 { + /* sonic-static-object-complex-1 - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "TABLE_1 description"; + + container OBJECT_1 { + /* OBJECT_1 - object container, it have: + * 1 leaf, + * 1 leaf-list + * 1 choice + */ + + description "OBJECT_1 description"; + + leaf OBJ_1_LEAF_1 { + description "OBJ_1_LEAF_1 description"; + type string; + } + + leaf-list OBJ_1_LEAF_LIST_1 { + type string; + } + + choice OBJ_1_CHOICE_1 { + case OBJ_1_CHOICE_1_CASE_1 { + leaf OBJ_1_CHOICE_1_LEAF_1 { + type uint16; + } + } + case OBJ_1_CHOICE_1_CASE_2 { + leaf OBJ_1_CHOICE_1_LEAF_2 { + type string; + } + } + } + } + } + } +} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-static-object-complex-2.yang b/tests/cli_autogen_input/sonic-static-object-complex-2.yang new file mode 100644 index 0000000000..451a445ce6 --- /dev/null +++ b/tests/cli_autogen_input/sonic-static-object-complex-2.yang @@ -0,0 +1,71 @@ +module sonic-static-object-complex-2 { + + yang-version 1.1; + + namespace "http://github.com/Azure/static-complex-2"; + prefix static-complex-2; + + container sonic-static-object-complex-2 { + /* sonic-static-object-complex-2 - top level container */ + + container TABLE_1 { + /* TABLE_1 - table container */ + + description "TABLE_1 description"; + + container OBJECT_1 { + /* OBJECT_1 - object container, it have: + * 2 leafs, + * 2 leaf-lists, + * 2 choices + */ + + description "OBJECT_1 description"; + + leaf OBJ_1_LEAF_1 { + description "OBJ_1_LEAF_1 description"; + type string; + } + + leaf OBJ_1_LEAF_2 { + description "OBJ_1_LEAF_2 description"; + type string; + } + + leaf-list OBJ_1_LEAF_LIST_1 { + type string; + } + + leaf-list OBJ_1_LEAF_LIST_2 { + type string; + } + + choice OBJ_1_CHOICE_1 { + case OBJ_1_CHOICE_1_CASE_1 { + leaf OBJ_1_CHOICE_1_LEAF_1 { + type uint16; + } + } + case OBJ_1_CHOICE_1_CASE_2 { + leaf OBJ_1_CHOICE_1_LEAF_2 { + type string; + } + } + } + + choice OBJ_1_CHOICE_2 { + case OBJ_1_CHOICE_2_CASE_1 { + leaf OBJ_1_CHOICE_2_LEAF_1 { + type uint16; + } + } + case OBJ_1_CHOICE_2_CASE_2 { + leaf OBJ_1_CHOICE_2_LEAF_2 { + type string; + } + } + } + } + } + } +} \ No newline at end of file diff --git a/tests/cli_autogen_yang_parser_test.py b/tests/cli_autogen_yang_parser_test.py new file mode 100644 index 0000000000..9ed915c69b --- /dev/null +++ b/tests/cli_autogen_yang_parser_test.py @@ -0,0 +1,196 @@ +import os +import logging +import pprint + +from sonic_cli_gen.yang_parser import YangParser +from .cli_autogen_input import assert_dictionaries + +logger = logging.getLogger(__name__) + +test_path = os.path.dirname(os.path.abspath(__file__)) +yang_models_path = '/usr/local/yang-models' +test_yang_models = [ + 'sonic-1-table-container', + 'sonic-2-table-containers', + 'sonic-1-object-container', + 'sonic-2-object-containers', + 'sonic-1-list', + 'sonic-2-lists', + 'sonic-static-object-complex-1', + 'sonic-static-object-complex-2', + 'sonic-dynamic-object-complex-1', + 'sonic-dynamic-object-complex-2', + 'sonic-choice-complex', + 'sonic-grouping-complex', + 'sonic-grouping-1', + 'sonic-grouping-2', +] + + +class TestYangParser: + @classmethod + def setup_class(cls): + logger.info("SETUP") + os.environ['UTILITIES_UNIT_TESTING'] = "1" + move_yang_models() + + @classmethod + def teardown_class(cls): + logger.info("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + remove_yang_models() + + def test_1_table_container(self): + """ Test for 1 'table' container + 'table' container represent TABLE in Config DB schema: + { + "TABLE": { + "OBJECT": { + "attr": "value" + ... + } + } + } + """ + + base_test('sonic-1-table-container', + assert_dictionaries.one_table_container) + + def test_2_table_containers(self): + """ Test for 2 'table' containers """ + + base_test('sonic-2-table-containers', + assert_dictionaries.two_table_containers) + + def test_1_object_container(self): + """ Test for 1 'object' container + 'object' container represent OBJECT in Config DB schema: + { + "TABLE": { + "OBJECT": { + "attr": "value" + ... + } + } + } + """ + + base_test('sonic-1-object-container', + assert_dictionaries.one_object_container) + + def test_2_object_containers(self): + """ Test for 2 'object' containers """ + + base_test('sonic-2-object-containers', + assert_dictionaries.two_object_containers) + + def test_1_list(self): + """ Test for 1 container that has inside + the YANG 'list' entity + """ + + base_test('sonic-1-list', assert_dictionaries.one_list) + + def test_2_lists(self): + """ Test for 2 containers that have inside + the YANG 'list' entity + """ + + base_test('sonic-2-lists', assert_dictionaries.two_lists) + + def test_static_object_complex_1(self): + """ Test for the object container with: + 1 leaf, 1 leaf-list, 1 choice. + """ + + base_test('sonic-static-object-complex-1', + assert_dictionaries.static_object_complex_1) + + def test_static_object_complex_2(self): + """ Test for object container with: + 2 leafs, 2 leaf-lists, 2 choices. + """ + + base_test('sonic-static-object-complex-2', + assert_dictionaries.static_object_complex_2) + + def test_dynamic_object_complex_1(self): + """ Test for object container with: + 1 key, 1 leaf, 1 leaf-list, 1 choice. + """ + + base_test('sonic-dynamic-object-complex-1', + assert_dictionaries.dynamic_object_complex_1) + + def test_dynamic_object_complex_2(self): + """ Test for object container with: + 2 keys, 2 leafs, 2 leaf-list, 2 choice. + """ + + base_test('sonic-dynamic-object-complex-2', + assert_dictionaries.dynamic_object_complex_2) + + def test_choice_complex(self): + """ Test for object container with the 'choice' + that have complex strucutre: + leafs, leaf-lists, multiple 'uses' from different files + """ + + base_test('sonic-choice-complex', + assert_dictionaries.choice_complex) + + def test_grouping_complex(self): + """ Test for object container with multitple 'uses' that using 'grouping' + from different files. The used 'grouping' have a complex structure: + leafs, leaf-lists, choices + """ + + base_test('sonic-grouping-complex', + assert_dictionaries.grouping_complex) + + +def base_test(yang_model_name, correct_dict): + """ General logic for each test case """ + + config_db_path = os.path.join(test_path, + 'cli_autogen_input/config_db.json') + parser = YangParser(yang_model_name=yang_model_name, + config_db_path=config_db_path, + allow_tbl_without_yang=True, + debug=False) + yang_dict = parser.parse_yang_model() + pretty_log_debug(yang_dict) + assert yang_dict == correct_dict + + +def move_yang_models(): + """ Move a test YANG models to known location + in order to be parsed by YangParser class + """ + + for yang_model in test_yang_models: + src_path = os.path.join(test_path, + 'cli_autogen_input', + yang_model + '.yang') + cmd = 'sudo cp {} {}'.format(src_path, yang_models_path) + os.system(cmd) + + +def remove_yang_models(): + """ Remove a test YANG models to known location + in order to be parsed by YangParser class + """ + + for yang_model in test_yang_models: + yang_model_path = os.path.join(yang_models_path, + yang_model + '.yang') + cmd = 'sudo rm {}'.format(yang_model_path) + os.system(cmd) + + +def pretty_log_debug(dictionary): + """ Pretty print of parsed dictionary """ + + for line in pprint.pformat(dictionary).split('\n'): + logging.debug(line) + diff --git a/utilities_common/util_base.py b/utilities_common/util_base.py index ff5570735c..9bea158b59 100644 --- a/utilities_common/util_base.py +++ b/utilities_common/util_base.py @@ -24,6 +24,7 @@ def iter_namespace(ns_pkg): for _, module_name, ispkg in iter_namespace(plugins_namespace): if ispkg: + yield from self.load_plugins(importlib.import_module(module_name)) continue log.log_debug('importing plugin: {}'.format(module_name)) try: From 4fdf8050fae77de5ff251c50254bcfbfac931b19 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Wed, 11 Aug 2021 08:19:16 +0000 Subject: [PATCH 09/60] CLI GEN-2 merged Signed-off-by: Vivek Reddy Karri --- config/config_mgmt.py | 109 +++++-- sonic_package_manager/constraint.py | 2 +- sonic_package_manager/dockerapi.py | 9 + sonic_package_manager/errors.py | 1 - sonic_package_manager/main.py | 20 +- sonic_package_manager/manager.py | 179 ++++++++---- sonic_package_manager/manifest.py | 4 +- sonic_package_manager/metadata.py | 13 +- sonic_package_manager/registry.py | 2 +- .../service_creator/__init__.py | 1 + .../service_creator/creator.py | 268 ++++++++++++------ .../service_creator/feature.py | 112 +++++--- .../service_creator/sonic_db.py | 139 ++++++--- tests/sonic_package_manager/conftest.py | 34 ++- .../test_service_creator.py | 235 ++++++++++----- 15 files changed, 786 insertions(+), 342 deletions(-) diff --git a/config/config_mgmt.py b/config/config_mgmt.py index 9b2021bef0..4e34a7ae00 100644 --- a/config/config_mgmt.py +++ b/config/config_mgmt.py @@ -2,8 +2,11 @@ config_mgmt.py provides classes for configuration validation and for Dynamic Port Breakout. ''' + +import os import re import syslog +import yang as ly from json import load from sys import flags from time import sleep as tsleep @@ -46,27 +49,14 @@ def __init__(self, source="configDB", debug=False, allowTablesWithoutYang=True): try: self.configdbJsonIn = None self.configdbJsonOut = None + self.source = source self.allowTablesWithoutYang = allowTablesWithoutYang # logging vars self.SYSLOG_IDENTIFIER = "ConfigMgmt" self.DEBUG = debug - self.sy = sonic_yang.SonicYang(YANG_DIR, debug=debug) - # load yang models - self.sy.loadYangModel() - # load jIn from config DB or from config DB json file. - if source.lower() == 'configdb': - self.readConfigDB() - # treat any other source as file input - else: - self.readConfigDBJson(source) - # this will crop config, xlate and load. - self.sy.loadData(self.configdbJsonIn) - - # Raise if tables without YANG models are not allowed but exist. - if not allowTablesWithoutYang and len(self.sy.tablesWithOutYang): - raise Exception('Config has tables without YANG models') + self.__init_sonic_yang() except Exception as e: self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) @@ -74,6 +64,23 @@ def __init__(self, source="configDB", debug=False, allowTablesWithoutYang=True): return + def __init_sonic_yang(self): + self.sy = sonic_yang.SonicYang(YANG_DIR, debug=self.DEBUG) + # load yang models + self.sy.loadYangModel() + # load jIn from config DB or from config DB json file. + if self.source.lower() == 'configdb': + self.readConfigDB() + # treat any other source as file input + else: + self.readConfigDBJson(self.source) + # this will crop config, xlate and load. + self.sy.loadData(self.configdbJsonIn) + + # Raise if tables without YANG models are not allowed but exist. + if not self.allowTablesWithoutYang and len(self.sy.tablesWithOutYang): + raise Exception('Config has tables without YANG models') + def __del__(self): pass @@ -213,6 +220,70 @@ def writeConfigDB(self, jDiff): return + def add_module(self, yang_module_str, replace_if_exists=False): + """ + Validate and add new YANG module to the system. + + Parameters: + yang_module_str (str): YANG module in string representation. + + Returns: + None + """ + + module_name = self.get_module_name(yang_module_str) + module_path = os.path.join(YANG_DIR, '{}.yang'.format(module_name)) + if os.path.exists(module_path) and not replace_if_exists: + raise Exception('{} already exists'.format(module_name)) + with open(module_path, 'w') as module_file: + module_file.write(yang_module_str) + try: + self.__init_sonic_yang() + except Exception: + os.remove(module_path) + raise + + def remove_module(self, module_name): + """ + Remove YANG module on the system and validate. + + Parameters: + module_name (str): YANG module name. + + Returns: + None + """ + + module_path = os.path.join(YANG_DIR, '{}.yang'.format(module_name)) + if not os.path.exists(module_path): + return + with open(module_path, 'r') as module_file: + yang_module_str = module_file.read() + try: + os.remove(module_path) + self.__init_sonic_yang() + except Exception: + self.add_module(yang_module_str) + raise + + @staticmethod + def get_module_name(yang_module_str): + """ + Read yangs module name from yang_module_str + + Parameters: + yang_module_str(str): YANG module string. + + Returns: + str: Module name + """ + + # Instantiate new context since parse_module_mem() loads the module into context. + sy = sonic_yang.SonicYang(YANG_DIR) + module = sy.ctx.parse_module_mem(yang_module_str, ly.LYS_IN_YANG) + return module.name() + + # End of Class ConfigMgmt class ConfigMgmtDPB(ConfigMgmt): @@ -417,8 +488,8 @@ def _deletePorts(self, ports=list(), force=False): deps.extend(dep) # No further action with no force and deps exist - if force == False and deps: - return configToLoad, deps, False; + if not force and deps: + return configToLoad, deps, False # delets all deps, No topological sort is needed as of now, if deletion # of deps fails, return immediately @@ -436,8 +507,8 @@ def _deletePorts(self, ports=list(), force=False): self.sy.deleteNode(str(xPathPort)) # Let`s Validate the tree now - if self.validateConfigData()==False: - return configToLoad, deps, False; + if not self.validateConfigData(): + return configToLoad, deps, False # All great if we are here, Lets get the diff self.configdbJsonOut = self.sy.getData() diff --git a/sonic_package_manager/constraint.py b/sonic_package_manager/constraint.py index af5a13000b..70b7165354 100644 --- a/sonic_package_manager/constraint.py +++ b/sonic_package_manager/constraint.py @@ -46,7 +46,7 @@ def parse(constraints: Dict) -> 'ComponentConstraints': """ components = {component: VersionConstraint.parse(version) - for component, version in constraints.items()} + for component, version in constraints.items()} return ComponentConstraints(components) def deparse(self) -> Dict[str, str]: diff --git a/sonic_package_manager/dockerapi.py b/sonic_package_manager/dockerapi.py index 926600d0bc..7f051d2d72 100644 --- a/sonic_package_manager/dockerapi.py +++ b/sonic_package_manager/dockerapi.py @@ -186,6 +186,15 @@ def rm(self, container: str, **kwargs): self.client.containers.get(container).remove(**kwargs) log.debug(f'removed container {container}') + def rm_by_ancestor(self, image_id: str, **kwargs): + """ Docker 'rm' command for running containers instantiated + from image passed to this function. """ + + # Clean containers based on the old image + containers = self.ps(filters={'ancestor': image_id}, all=True) + for container in containers: + self.rm(container.id, **kwargs) + def ps(self, **kwargs): """ Docker 'ps' command. """ diff --git a/sonic_package_manager/errors.py b/sonic_package_manager/errors.py index 17279c52c4..fe4de39a39 100644 --- a/sonic_package_manager/errors.py +++ b/sonic_package_manager/errors.py @@ -143,4 +143,3 @@ class PackageComponentConflictError(PackageInstallationError): def __str__(self): return (f'Package {self.name} conflicts with {self.component} {self.constraint} ' f'in package {self.dependency} but version {self.installed_ver} is installed') - diff --git a/sonic_package_manager/main.py b/sonic_package_manager/main.py index c0589ae5b5..8a0aabb901 100644 --- a/sonic_package_manager/main.py +++ b/sonic_package_manager/main.py @@ -361,7 +361,7 @@ def install(ctx, package_source = package_expr or from_repository or from_tarball if not package_source: - exit_cli(f'Package source is not specified', fg='red') + exit_cli('Package source is not specified', fg='red') if not yes and not force: click.confirm(f'{package_source} is going to be installed, ' @@ -386,7 +386,7 @@ def install(ctx, except Exception as err: exit_cli(f'Failed to install {package_source}: {err}', fg='red') except KeyboardInterrupt: - exit_cli(f'Operation canceled by user', fg='red') + exit_cli('Operation canceled by user', fg='red') @cli.command() @@ -409,15 +409,16 @@ def reset(ctx, name, force, yes, skip_host_plugins): except Exception as err: exit_cli(f'Failed to reset package {name}: {err}', fg='red') except KeyboardInterrupt: - exit_cli(f'Operation canceled by user', fg='red') + exit_cli('Operation canceled by user', fg='red') @cli.command() @add_options(PACKAGE_COMMON_OPERATION_OPTIONS) +@click.option('--keep-config', is_flag=True, help='Keep features configuration in CONFIG DB.') @click.argument('name') @click.pass_context @root_privileges_required -def uninstall(ctx, name, force, yes): +def uninstall(ctx, name, force, yes, keep_config): """ Uninstall package. """ manager: PackageManager = ctx.obj @@ -426,12 +427,17 @@ def uninstall(ctx, name, force, yes): click.confirm(f'Package {name} is going to be uninstalled, ' f'continue?', abort=True, show_default=True) + uninstall_opts = { + 'force': force, + 'keep_config': keep_config, + } + try: - manager.uninstall(name, force) + manager.uninstall(name, **uninstall_opts) except Exception as err: exit_cli(f'Failed to uninstall package {name}: {err}', fg='red') except KeyboardInterrupt: - exit_cli(f'Operation canceled by user', fg='red') + exit_cli('Operation canceled by user', fg='red') @cli.command() @@ -453,7 +459,7 @@ def migrate(ctx, database, force, yes, dockerd_socket): except Exception as err: exit_cli(f'Failed to migrate packages {err}', fg='red') except KeyboardInterrupt: - exit_cli(f'Operation canceled by user', fg='red') + exit_cli('Operation canceled by user', fg='red') if __name__ == "__main__": diff --git a/sonic_package_manager/manager.py b/sonic_package_manager/manager.py index 3caf90d95f..836a992f0a 100644 --- a/sonic_package_manager/manager.py +++ b/sonic_package_manager/manager.py @@ -10,8 +10,11 @@ import docker import filelock +from config import config_mgmt from sonic_py_common import device_info +from sonic_cli_gen.generator import CliGenerator + from sonic_package_manager import utils from sonic_package_manager.constraint import ( VersionConstraint, @@ -39,12 +42,16 @@ from sonic_package_manager.progress import ProgressManager from sonic_package_manager.reference import PackageReference from sonic_package_manager.registry import RegistryResolver +from sonic_package_manager.service_creator import SONIC_CLI_COMMANDS from sonic_package_manager.service_creator.creator import ( ServiceCreator, run_command ) from sonic_package_manager.service_creator.feature import FeatureRegistry -from sonic_package_manager.service_creator.sonic_db import SonicDB +from sonic_package_manager.service_creator.sonic_db import ( + INIT_CFG_JSON, + SonicDB +) from sonic_package_manager.service_creator.utils import in_chroot from sonic_package_manager.source import ( PackageSource, @@ -52,7 +59,6 @@ RegistrySource, TarballSource ) -from sonic_package_manager.utils import DockerReference from sonic_package_manager.version import ( Version, VersionRange, @@ -102,7 +108,7 @@ def wrapped_function(*args, **kwargs): return wrapped_function -def rollback(func, *args, **kwargs): +def rollback(func, *args, **kwargs) -> Callable: """ Used in rollback callbacks to ignore failure but proceed with rollback. Error will be printed but not fail the whole procedure of rollback. """ @@ -131,7 +137,7 @@ def package_constraint_to_reference(constraint: PackageConstraint) -> PackageRef return PackageReference(package_name, version_to_tag(version_constraint)) -def parse_reference_expression(expression): +def parse_reference_expression(expression) -> PackageReference: try: return package_constraint_to_reference(PackageConstraint.parse(expression)) except ValueError: @@ -140,6 +146,36 @@ def parse_reference_expression(expression): return PackageReference.parse(expression) +def get_cli_plugin_directory(command: str) -> str: + """ Returns a plugins package directory for command group. + + Args: + command: SONiC command: "show"/"config"/"clear". + Returns: + Path to plugins package directory. + """ + + pkg_loader = pkgutil.get_loader(f'{command}.plugins') + if pkg_loader is None: + raise PackageManagerError(f'Failed to get plugins path for {command} CLI') + plugins_pkg_path = os.path.dirname(pkg_loader.path) + return plugins_pkg_path + + +def get_cli_plugin_path(package: Package, command: str) -> str: + """ Returns a path where to put CLI plugin code. + + Args: + package: Package to generate this path for. + command: SONiC command: "show"/"config"/"clear". + Returns: + Path generated for this package. + """ + + plugin_module_file = package.name + '.py' + return os.path.join(get_cli_plugin_directory(command), plugin_module_file) + + def validate_package_base_os_constraints(package: Package, sonic_version_info: Dict[str, str]): """ Verify that all dependencies on base OS components are met. Args: @@ -217,11 +253,10 @@ def validate_package_tree(packages: Dict[str, Package]): continue component_version = conflicting_package.components[component] - log.debug(f'conflicting package {dependency.name}: ' + log.debug(f'conflicting package {conflict.name}: ' f'component {component} version is {component_version}') - if constraint.allows_all(component_version): - raise PackageComponentConflictError(package.name, dependency, component, + raise PackageComponentConflictError(package.name, conflict, component, constraint, component_version) @@ -367,12 +402,17 @@ def install_from_source(self, if not self.database.has_package(package.name): self.database.add_package(package.name, package.repository) + service_create_opts = { + 'state': feature_state, + 'owner': default_owner, + } + try: with contextlib.ExitStack() as exits: source.install(package) exits.callback(rollback(source.uninstall, package)) - self.service_creator.create(package, state=feature_state, owner=default_owner) + self.service_creator.create(package, **service_create_opts) exits.callback(rollback(self.service_creator.remove, package)) self.service_creator.generate_shutdown_sequence_files( @@ -400,13 +440,16 @@ def install_from_source(self, @under_lock @opt_check - def uninstall(self, name: str, force=False): + def uninstall(self, name: str, + force: bool = False, + keep_config: bool = False): """ Uninstall SONiC Package referenced by name. The uninstallation can be forced if force argument is True. Args: name: SONiC Package name. force: Force the installation. + keep_config: Keep feature configuration in databases. Raises: PackageManagerError """ @@ -436,17 +479,11 @@ def uninstall(self, name: str, force=False): try: self._uninstall_cli_plugins(package) - self.service_creator.remove(package) + self.service_creator.remove(package, keep_config=keep_config) self.service_creator.generate_shutdown_sequence_files( self._get_installed_packages_except(package) ) - - # Clean containers based on this image - containers = self.docker.ps(filters={'ancestor': package.image_id}, - all=True) - for container in containers: - self.docker.rm(container.id, force=True) - + self.docker.rm_by_ancestor(package.image_id, force=True) self.docker.rmi(package.image_id, force=True) package.entry.image_id = None except Exception as err: @@ -494,7 +531,6 @@ def upgrade_from_source(self, ) old_feature = old_package.manifest['service']['name'] - new_feature = new_package.manifest['service']['name'] old_version = old_package.manifest['package']['version'] new_version = new_package.manifest['package']['version'] @@ -522,6 +558,13 @@ def upgrade_from_source(self, # After all checks are passed we proceed to actual upgrade + service_create_opts = { + 'register_feature': False, + } + service_remove_opts = { + 'deregister_feature': False, + } + try: with contextlib.ExitStack() as exits: self._uninstall_cli_plugins(old_package) @@ -530,24 +573,25 @@ def upgrade_from_source(self, source.install(new_package) exits.callback(rollback(source.uninstall, new_package)) - if self.feature_registry.is_feature_enabled(old_feature): + feature_enabled = self.feature_registry.is_feature_enabled(old_feature) + + if feature_enabled: + self._systemctl_action(new_package, 'disable') + exits.callback(rollback(self._systemctl_action, + old_package, 'enable')) self._systemctl_action(old_package, 'stop') exits.callback(rollback(self._systemctl_action, old_package, 'start')) - self.service_creator.remove(old_package, deregister_feature=False) + self.service_creator.remove(old_package, **service_remove_opts) exits.callback(rollback(self.service_creator.create, old_package, - register_feature=False)) + **service_create_opts)) - # Clean containers based on the old image - containers = self.docker.ps(filters={'ancestor': old_package.image_id}, - all=True) - for container in containers: - self.docker.rm(container.id, force=True) + self.docker.rm_by_ancestor(old_package.image_id, force=True) - self.service_creator.create(new_package, register_feature=False) + self.service_creator.create(new_package, **service_create_opts) exits.callback(rollback(self.service_creator.remove, new_package, - register_feature=False)) + **service_remove_opts)) self.service_creator.generate_shutdown_sequence_files( self._get_installed_packages_and(new_package) @@ -557,11 +601,23 @@ def upgrade_from_source(self, self._get_installed_packages_and(old_package)) ) - if self.feature_registry.is_feature_enabled(new_feature): + if feature_enabled: + self._systemctl_action(new_package, 'enable') + exits.callback(rollback(self._systemctl_action, + old_package, 'disable')) self._systemctl_action(new_package, 'start') exits.callback(rollback(self._systemctl_action, new_package, 'stop')) + # Update feature configuration after we have started new service. + # If we place it before the above, our service start/stop will + # interfere with hostcfgd in rollback path leading to a service + # running with new image and not the old one. + self.feature_registry.update(old_package.manifest, new_package.manifest) + exits.callback(rollback( + self.feature_registry.update, new_package.manifest, old_package.manifest) + ) + if not skip_host_plugins: self._install_cli_plugins(new_package) exits.callback(rollback(self._uninstall_cli_plugin, old_package)) @@ -613,16 +669,16 @@ def migrate_packages(self, old_package_database: PackageDatabase, dockerd_sock: Optional[str] = None): """ - Migrate packages from old database. This function can do a comparison between - current database and the database passed in as argument. If the package is - missing in the current database it will be added. If the package is installed - in the passed database and in the current it is not installed it will be - installed with a passed database package version. If the package is installed - in the passed database and it is installed in the current database but with - older version the package will be upgraded to the never version. If the package - is installed in the passed database and in the current it is installed but with - never version - no actions are taken. If dockerd_sock parameter is passed, the - migration process will use loaded images from docker library of the currently + Migrate packages from old database. This function can do a comparison between + current database and the database passed in as argument. If the package is + missing in the current database it will be added. If the package is installed + in the passed database and in the current it is not installed it will be + installed with a passed database package version. If the package is installed + in the passed database and it is installed in the current database but with + older version the package will be upgraded to the never version. If the package + is installed in the passed database and in the current it is installed but with + never version - no actions are taken. If dockerd_sock parameter is passed, the + migration process will use loaded images from docker library of the currently installed image. Args: @@ -743,7 +799,7 @@ def get_package_source(self, ref = parse_reference_expression(package_expression) return self.get_package_source(package_ref=ref) elif repository_reference: - repo_ref = DockerReference.parse(repository_reference) + repo_ref = utils.DockerReference.parse(repository_reference) repository = repo_ref['name'] reference = repo_ref['tag'] or repo_ref['digest'] reference = reference or 'latest' @@ -774,8 +830,8 @@ def get_package_source(self, if package_entry.default_reference is not None: package_ref.reference = package_entry.default_reference else: - raise PackageManagerError(f'No default reference tag. ' - f'Please specify the version or tag explicitly') + raise PackageManagerError('No default reference tag. ' + 'Please specify the version or tag explicitly') return RegistrySource(package_entry.repository, package_ref.reference, @@ -847,7 +903,7 @@ def get_installed_packages_list(self) -> List[Package]: Installed packages dictionary. """ - return [self.get_installed_package(entry.name) + return [self.get_installed_package(entry.name) for entry in self.database if entry.installed] def _migrate_package_database(self, old_package_database: PackageDatabase): @@ -906,38 +962,26 @@ def _systemctl_action(self, package: Package, action: str): for npu in range(self.num_npus): run_command(f'systemctl {action} {name}@{npu}') - @staticmethod - def _get_cli_plugin_name(package: Package): - return utils.make_python_identifier(package.name) + '.py' - - @classmethod - def _get_cli_plugin_path(cls, package: Package, command): - pkg_loader = pkgutil.get_loader(f'{command}.plugins') - if pkg_loader is None: - raise PackageManagerError(f'Failed to get plugins path for {command} CLI') - plugins_pkg_path = os.path.dirname(pkg_loader.path) - return os.path.join(plugins_pkg_path, cls._get_cli_plugin_name(package)) - def _install_cli_plugins(self, package: Package): - for command in ('show', 'config', 'clear'): + for command in SONIC_CLI_COMMANDS: self._install_cli_plugin(package, command) def _uninstall_cli_plugins(self, package: Package): - for command in ('show', 'config', 'clear'): + for command in SONIC_CLI_COMMANDS: self._uninstall_cli_plugin(package, command) def _install_cli_plugin(self, package: Package, command: str): image_plugin_path = package.manifest['cli'][command] if not image_plugin_path: return - host_plugin_path = self._get_cli_plugin_path(package, command) + host_plugin_path = get_cli_plugin_path(package, command) self.docker.extract(package.entry.image_id, image_plugin_path, host_plugin_path) def _uninstall_cli_plugin(self, package: Package, command: str): image_plugin_path = package.manifest['cli'][command] if not image_plugin_path: return - host_plugin_path = self._get_cli_plugin_path(package, command) + host_plugin_path = get_cli_plugin_path(package, command) if os.path.exists(host_plugin_path): os.remove(host_plugin_path) @@ -949,12 +993,21 @@ def get_manager() -> 'PackageManager': PackageManager """ - docker_api = DockerApi(docker.from_env()) + docker_api = DockerApi(docker.from_env(), ProgressManager()) registry_resolver = RegistryResolver() - return PackageManager(DockerApi(docker.from_env(), ProgressManager()), + metadata_resolver = MetadataResolver(docker_api, registry_resolver) + cfg_mgmt = config_mgmt.ConfigMgmt(source=INIT_CFG_JSON) + cli_generator = CliGenerator(log) + feature_registry = FeatureRegistry(SonicDB) + service_creator = ServiceCreator(feature_registry, + SonicDB, + cli_generator, + cfg_mgmt) + + return PackageManager(docker_api, registry_resolver, PackageDatabase.from_file(), - MetadataResolver(docker_api, registry_resolver), - ServiceCreator(FeatureRegistry(SonicDB), SonicDB), + metadata_resolver, + service_creator, device_info, filelock.FileLock(PACKAGE_MANAGER_LOCK_FILE, timeout=0)) diff --git a/sonic_package_manager/manifest.py b/sonic_package_manager/manifest.py index c126e2eef1..216baef756 100644 --- a/sonic_package_manager/manifest.py +++ b/sonic_package_manager/manifest.py @@ -205,7 +205,9 @@ def unmarshal(self, value): ManifestField('mandatory', DefaultMarshaller(bool), False), ManifestField('show', DefaultMarshaller(str), ''), ManifestField('config', DefaultMarshaller(str), ''), - ManifestField('clear', DefaultMarshaller(str), '') + ManifestField('clear', DefaultMarshaller(str), ''), + ManifestField('auto-generate-show', DefaultMarshaller(bool), False), + ManifestField('auto-generate-config', DefaultMarshaller(bool), False), ]) ]) diff --git a/sonic_package_manager/metadata.py b/sonic_package_manager/metadata.py index 7f7c25ceaf..dc718375ed 100644 --- a/sonic_package_manager/metadata.py +++ b/sonic_package_manager/metadata.py @@ -4,7 +4,7 @@ import json import tarfile -from typing import Dict +from typing import Dict, Optional from sonic_package_manager.errors import MetadataError from sonic_package_manager.manifest import Manifest @@ -24,10 +24,10 @@ def deep_update(dst: Dict, src: Dict) -> Dict: for key, value in src.items(): if isinstance(value, dict): - node = dst.setdefault(key, {}) - deep_update(node, value) + node = dst.setdefault(key, {}) + deep_update(node, value) else: - dst[key] = value + dst[key] = value return dst @@ -73,6 +73,7 @@ class Metadata: manifest: Manifest components: Dict[str, Version] = field(default_factory=dict) + yang_module_str: Optional[str] = None class MetadataResolver: @@ -182,4 +183,6 @@ def from_labels(cls, labels: Dict[str, str]) -> Metadata: except ValueError as err: raise MetadataError(f'Failed to parse component version: {err}') - return Metadata(Manifest.marshal(manifest_dict), components) + yang_module_str = sonic_metadata.get('yang-module') + + return Metadata(Manifest.marshal(manifest_dict), components, yang_module_str) diff --git a/sonic_package_manager/registry.py b/sonic_package_manager/registry.py index 8a09d9136e..8c03b078d2 100644 --- a/sonic_package_manager/registry.py +++ b/sonic_package_manager/registry.py @@ -38,7 +38,7 @@ def get_token(realm, service, scope) -> str: response = requests.get(f'{realm}?scope={scope}&service={service}') if response.status_code != requests.codes.ok: - raise AuthenticationServiceError(f'Failed to retrieve token') + raise AuthenticationServiceError('Failed to retrieve token') content = json.loads(response.content) token = content['token'] diff --git a/sonic_package_manager/service_creator/__init__.py b/sonic_package_manager/service_creator/__init__.py index e2af81ceb5..b0f4a24086 100644 --- a/sonic_package_manager/service_creator/__init__.py +++ b/sonic_package_manager/service_creator/__init__.py @@ -1,3 +1,4 @@ #!/usr/bin/env python ETC_SONIC_PATH = '/etc/sonic' +SONIC_CLI_COMMANDS = ('show', 'config', 'clear') diff --git a/sonic_package_manager/service_creator/creator.py b/sonic_package_manager/service_creator/creator.py index 4c618eb7ea..91f0f6102c 100644 --- a/sonic_package_manager/service_creator/creator.py +++ b/sonic_package_manager/service_creator/creator.py @@ -5,18 +5,27 @@ import stat import subprocess from collections import defaultdict -from typing import Dict +from typing import Dict, Type import jinja2 as jinja2 +from config.config_mgmt import ConfigMgmt from prettyprinter import pformat from toposort import toposort_flatten, CircularDependencyError +from config.config_mgmt import sonic_cfggen +from sonic_cli_gen.generator import CliGenerator + from sonic_package_manager.logger import log from sonic_package_manager.package import Package -from sonic_package_manager.service_creator import ETC_SONIC_PATH +from sonic_package_manager.service_creator import ( + ETC_SONIC_PATH, + SONIC_CLI_COMMANDS, +) from sonic_package_manager.service_creator.feature import FeatureRegistry +from sonic_package_manager.service_creator.sonic_db import SonicDB from sonic_package_manager.service_creator.utils import in_chroot + SERVICE_FILE_TEMPLATE = 'sonic.service.j2' TIMER_UNIT_TEMPLATE = 'timer.unit.j2' @@ -78,12 +87,22 @@ def set_executable_bit(filepath): os.chmod(filepath, st.st_mode | stat.S_IEXEC) +def remove_if_exists(path): + """ Remove filepath if it exists """ + + if not os.path.exists(path): + return + + os.remove(path) + log.info(f'removed {path}') + + def run_command(command: str): """ Run arbitrary bash command. Args: command: String command to execute as bash script Raises: - PackageManagerError: Raised when the command return code + ServiceCreatorError: Raised when the command return code is not 0. """ @@ -104,24 +123,30 @@ class ServiceCreator: def __init__(self, feature_registry: FeatureRegistry, - sonic_db): + sonic_db: Type[SonicDB], + cli_gen: CliGenerator, + cfg_mgmt: ConfigMgmt): """ Initialize ServiceCreator with: - + Args: feature_registry: FeatureRegistry object. - sonic_db: SonicDb interface. + sonic_db: SonicDB interface. + cli_gen: CliGenerator instance. + cfg_mgmt: ConfigMgmt instance. """ self.feature_registry = feature_registry self.sonic_db = sonic_db + self.cli_gen = cli_gen + self.cfg_mgmt = cfg_mgmt def create(self, package: Package, register_feature: bool = True, state: str = 'enabled', owner: str = 'local'): - """ Register package as SONiC service. - + """ Register package as SONiC service. + Args: package: Package object to install. register_feature: Wether to register this package in FEATURE table. @@ -139,54 +164,54 @@ def create(self, self.generate_systemd_service(package) self.generate_dump_script(package) self.generate_service_reconciliation_file(package) - + self.install_yang_module(package) self.set_initial_config(package) + self.install_autogen_cli_all(package) self._post_operation_hook() if register_feature: - self.feature_registry.register(package.manifest, - state, owner) + self.feature_registry.register(package.manifest, state, owner) except (Exception, KeyboardInterrupt): - self.remove(package, register_feature) + self.remove(package, deregister_feature=register_feature) raise def remove(self, package: Package, - deregister_feature: bool = True): + deregister_feature: bool = True, + keep_config: bool = False): """ Uninstall SONiC service provided by the package. - + Args: package: Package object to uninstall. deregister_feature: Wether to deregister this package from FEATURE table. + keep_config: Whether to remove package configuration. Returns: None """ name = package.manifest['service']['name'] + remove_if_exists(os.path.join(SYSTEMD_LOCATION, f'{name}.service')) + remove_if_exists(os.path.join(SYSTEMD_LOCATION, f'{name}@.service')) + remove_if_exists(os.path.join(SERVICE_MGMT_SCRIPT_LOCATION, f'{name}.sh')) + remove_if_exists(os.path.join(DOCKER_CTL_SCRIPT_LOCATION, f'{name}.sh')) + remove_if_exists(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, f'{name}')) + remove_if_exists(os.path.join(ETC_SONIC_PATH, f'{name}_reconcile')) + self.update_dependent_list_file(package, remove=True) - def remove_file(path): - if os.path.exists(path): - os.remove(path) - log.info(f'removed {path}') - - remove_file(os.path.join(SYSTEMD_LOCATION, f'{name}.service')) - remove_file(os.path.join(SYSTEMD_LOCATION, f'{name}@.service')) - remove_file(os.path.join(SERVICE_MGMT_SCRIPT_LOCATION, f'{name}.sh')) - remove_file(os.path.join(DOCKER_CTL_SCRIPT_LOCATION, f'{name}.sh')) - remove_file(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, f'{name}')) - remove_file(os.path.join(ETC_SONIC_PATH, f'{name}_reconcile')) + if deregister_feature and not keep_config: + self.remove_config(package) - self.update_dependent_list_file(package, remove=True) + self.uninstall_autogen_cli_all(package) + self.uninstall_yang_module(package) self._post_operation_hook() if deregister_feature: self.feature_registry.deregister(package.manifest['service']['name']) - self.remove_config(package) def generate_container_mgmt(self, package: Package): - """ Generates container management script under /usr/bin/.sh for package. - + """ Generates container management script under /usr/bin/.sh for package. + Args: package: Package object to generate script for. Returns: @@ -228,8 +253,8 @@ def generate_container_mgmt(self, package: Package): log.info(f'generated {script_path}') def generate_service_mgmt(self, package: Package): - """ Generates service management script under /usr/local/bin/.sh for package. - + """ Generates service management script under /usr/local/bin/.sh for package. + Args: package: Package object to generate script for. Returns: @@ -249,8 +274,8 @@ def generate_service_mgmt(self, package: Package): log.info(f'generated {script_path}') def generate_systemd_service(self, package: Package): - """ Generates systemd service(s) file and timer(s) (if needed) for package. - + """ Generates systemd service(s) file and timer(s) (if needed) for package. + Args: package: Package object to generate service for. Returns: @@ -297,13 +322,13 @@ def generate_systemd_service(self, package: Package): def update_dependent_list_file(self, package: Package, remove=False): """ This function updates dependent list file for packages listed in "dependent-of" (path: /etc/sonic/_dependent file). - Args: package: Package to update packages dependent of it. + remove: True if update for removal process. Returns: None. - """ + name = package.manifest['service']['name'] dependent_of = package.manifest['service']['dependent-of'] host_service = package.manifest['service']['host-service'] @@ -337,7 +362,6 @@ def update_dependent(service, name, multi_inst): def generate_dump_script(self, package): """ Generates dump plugin script for package. - Args: package: Package object to generate dump plugin script for. Returns: @@ -363,7 +387,7 @@ def generate_dump_script(self, package): def get_shutdown_sequence(self, reboot_type: str, packages: Dict[str, Package]): """ Returns shutdown sequence file for particular reboot type. - + Args: reboot_type: Reboot type to generated service shutdown sequence for. packages: Dict of installed packages. @@ -410,7 +434,7 @@ def filter_not_available(services): def generate_shutdown_sequence_file(self, reboot_type: str, packages: Dict[str, Package]): """ Generates shutdown sequence file for particular reboot type (path: /etc/sonic/-reboot_order). - + Args: reboot_type: Reboot type to generated service shutdown sequence for. packages: Dict of installed packages. @@ -421,11 +445,11 @@ def generate_shutdown_sequence_file(self, reboot_type: str, packages: Dict[str, order = self.get_shutdown_sequence(reboot_type, packages) with open(os.path.join(ETC_SONIC_PATH, f'{reboot_type}-reboot_order'), 'w') as file: file.write(' '.join(order)) - + def generate_shutdown_sequence_files(self, packages: Dict[str, Package]): - """ Generates shutdown sequence file for fast and warm reboot. + """ Generates shutdown sequence file for fast and warm reboot. (path: /etc/sonic/-reboot_order). - + Args: packages: Dict of installed packages. Returns: @@ -462,27 +486,18 @@ def set_initial_config(self, package): """ init_cfg = package.manifest['package']['init-cfg'] + if not init_cfg: + return - for tablename, content in init_cfg.items(): - if not isinstance(content, dict): - continue - - tables = self._get_tables(tablename) - - for key in content: - for table in tables: - cfg = content[key] - exists, old_fvs = table.get(key) - if exists: - cfg.update(old_fvs) - fvs = list(cfg.items()) - table.set(key, fvs) + for conn in self.sonic_db.get_connectors(): + cfg = conn.get_config() + new_cfg = init_cfg.copy() + sonic_cfggen.deep_update(new_cfg, cfg) + self.validate_config(new_cfg) + conn.mod_config(new_cfg) def remove_config(self, package): - """ Remove configuration based on init-cfg tables, so having - init-cfg even with tables without keys might be a good idea. - TODO: init-cfg should be validated with yang model - TODO: remove config from tables known to yang model + """ Remove configuration based on package YANG module. Args: package: Package object remove initial configuration for. @@ -490,36 +505,131 @@ def remove_config(self, package): None """ - init_cfg = package.manifest['package']['init-cfg'] + if not package.metadata.yang_module_str: + return - for tablename, content in init_cfg.items(): - if not isinstance(content, dict): + module_name = self.cfg_mgmt.get_module_name(package.metadata.yang_module_str) + for tablename, module in self.cfg_mgmt.sy.confDbYangMap.items(): + if module.get('module') != module_name: continue - tables = self._get_tables(tablename) + for conn in self.sonic_db.get_connectors(): + keys = conn.get_table(tablename).keys() + for key in keys: + conn.set_entry(tablename, key, None) + + def validate_config(self, config): + """ Validate configuration through YANG. + + Args: + config: Config DB data. + Returns: + None. + Raises: + Exception: if config does not pass YANG validation. + """ + + config = sonic_cfggen.FormatConverter.to_serialized(config) + log.debug(f'validating configuration {pformat(config)}') + # This will raise exception if configuration is not valid. + # NOTE: loadData() modifies the state of ConfigMgmt instance. + # This is not desired for configuration validation only purpose. + # Although the config loaded into ConfigMgmt instance is not + # interesting in this application so we don't care. + self.cfg_mgmt.loadData(config) + + def install_yang_module(self, package: Package): + """ Install package's yang module in the system. + + Args: + package: Package object. + Returns: + None + """ + + if not package.metadata.yang_module_str: + return - for key in content: - for table in tables: - table._del(key) + self.cfg_mgmt.add_module(package.metadata.yang_module_str) - def _get_tables(self, table_name): - """ Return swsscommon Tables for all kinds of configuration DBs """ + def uninstall_yang_module(self, package: Package): + """ Uninstall package's yang module in the system. - tables = [] + Args: + package: Package object. + Returns: + None + """ - running_table = self.sonic_db.running_table(table_name) - if running_table is not None: - tables.append(running_table) + if not package.metadata.yang_module_str: + return - persistent_table = self.sonic_db.persistent_table(table_name) - if persistent_table is not None: - tables.append(persistent_table) + module_name = self.cfg_mgmt.get_module_name(package.metadata.yang_module_str) + self.cfg_mgmt.remove_module(module_name) - initial_table = self.sonic_db.initial_table(table_name) - if initial_table is not None: - tables.append(initial_table) + def install_autogen_cli_all(self, package: Package): + """ Install autogenerated CLI plugins for package. + + Args: + package: Package + Returns: + None + """ + + for command in SONIC_CLI_COMMANDS: + self.install_autogen_cli(package, command) + + def uninstall_autogen_cli_all(self, package: Package): + """ Remove autogenerated CLI plugins for package. + + Args: + package: Package + Returns: + None + """ - return tables + for command in SONIC_CLI_COMMANDS: + self.uninstall_autogen_cli(package, command) + + def install_autogen_cli(self, package: Package, command: str): + """ Install autogenerated CLI plugins for package for particular command. + + Args: + package: Package. + command: Name of command to generate CLI for. + Returns: + None + """ + + if package.metadata.yang_module_str is None: + return + if f'auto-generate-{command}' not in package.manifest['cli']: + return + if not package.manifest['cli'][f'auto-generate-{command}']: + return + module_name = self.cfg_mgmt.get_module_name(package.metadata.yang_module_str) + self.cli_gen.generate_cli_plugin(command, module_name) + log.debug(f'{command} command line interface autogenerated for {module_name}') + + def uninstall_autogen_cli(self, package: Package, command: str): + """ Uninstall autogenerated CLI plugins for package for particular command. + + Args: + package: Package. + command: Name of command to remove CLI. + Returns: + None + """ + + if package.metadata.yang_module_str is None: + return + if f'auto-generate-{command}' not in package.manifest['cli']: + return + if not package.manifest['cli'][f'auto-generate-{command}']: + return + module_name = self.cfg_mgmt.get_module_name(package.metadata.yang_module_str) + self.cli_gen.remove_cli_plugin(command, module_name) + log.debug(f'{command} command line interface removed for {module_name}') def _post_operation_hook(self): """ Common operations executed after service is created/removed. """ diff --git a/sonic_package_manager/service_creator/feature.py b/sonic_package_manager/service_creator/feature.py index 4df06384d2..eb8e1a0710 100644 --- a/sonic_package_manager/service_creator/feature.py +++ b/sonic_package_manager/service_creator/feature.py @@ -16,6 +16,14 @@ } +def is_enabled(cfg): + return cfg.get('state', 'disabled').lower() == 'enabled' + + +def is_multi_instance(cfg): + return str(cfg.get('has_per_asic_scope', 'False')).lower() == 'true' + + class FeatureRegistry: """ FeatureRegistry class provides an interface to register/de-register new feature persistently. """ @@ -27,51 +35,93 @@ def register(self, manifest: Manifest, state: str = 'disabled', owner: str = 'local'): + """ Register feature in CONFIG DBs. + + Args: + manifest: Feature's manifest. + state: Desired feature admin state. + owner: Owner of this feature (kube/local). + Returns: + None. + """ + name = manifest['service']['name'] - for table in self._get_tables(): - cfg_entries = self.get_default_feature_entries(state, owner) - non_cfg_entries = self.get_non_configurable_feature_entries(manifest) + db_connectors = self._sonic_db.get_connectors() + cfg_entries = self.get_default_feature_entries(state, owner) + non_cfg_entries = self.get_non_configurable_feature_entries(manifest) - exists, current_cfg = table.get(name) + for conn in db_connectors: + current_cfg = conn.get_entry(FEATURE, name) new_cfg = cfg_entries.copy() # Override configurable entries with CONFIG DB data. - new_cfg = {**new_cfg, **dict(current_cfg)} + new_cfg = {**new_cfg, **current_cfg} # Override CONFIG DB data with non configurable entries. new_cfg = {**new_cfg, **non_cfg_entries} - table.set(name, list(new_cfg.items())) + conn.set_entry(FEATURE, name, new_cfg) def deregister(self, name: str): - for table in self._get_tables(): - table._del(name) + """ Deregister feature by name. + + Args: + name: Name of the feature in CONFIG DB. + Returns: + None + """ + + db_connetors = self._sonic_db.get_connectors() + for conn in db_connetors: + conn.set_entry(FEATURE, name, None) + + def update(self, + old_manifest: Manifest, + new_manifest: Manifest): + """ Migrate feature configuration. It can be that non-configurable + feature entries have to be updated. e.g: "has_timer" for example if + the new feature introduces a service timer or name of the service has + changed, but user configurable entries are not changed). + + Args: + old_manifest: Old feature manifest. + new_manifest: New feature manifest. + Returns: + None + """ + + old_name = old_manifest['service']['name'] + new_name = new_manifest['service']['name'] + db_connectors = self._sonic_db.get_connectors() + non_cfg_entries = self.get_non_configurable_feature_entries(new_manifest) + + for conn in db_connectors: + current_cfg = conn.get_entry(FEATURE, old_name) + conn.set_entry(FEATURE, old_name, None) + + new_cfg = current_cfg.copy() + # Override CONFIG DB data with non configurable entries. + new_cfg = {**new_cfg, **non_cfg_entries} + + conn.set_entry(FEATURE, new_name, new_cfg) def is_feature_enabled(self, name: str) -> bool: """ Returns whether the feature is current enabled or not. Accesses running CONFIG DB. If no running CONFIG_DB table is found in tables returns False. """ - running_db_table = self._sonic_db.running_table(FEATURE) - if running_db_table is None: + conn = self._sonic_db.get_running_db_connector() + if conn is None: return False - exists, cfg = running_db_table.get(name) - if not exists: - return False - cfg = dict(cfg) - return cfg.get('state').lower() == 'enabled' + cfg = conn.get_entry(FEATURE, name) + return is_enabled(cfg) def get_multi_instance_features(self): - res = [] - init_db_table = self._sonic_db.initial_table(FEATURE) - for feature in init_db_table.keys(): - exists, cfg = init_db_table.get(feature) - assert exists - cfg = dict(cfg) - asic_flag = str(cfg.get('has_per_asic_scope', 'False')) - if asic_flag.lower() == 'true': - res.append(feature) - return res + """ Returns a list of features which run in asic namespace. """ + + conn = self._sonic_db.get_initial_db_connector() + features = conn.get_table(FEATURE) + return [feature for feature, cfg in features.items() if is_multi_instance(cfg)] @staticmethod def get_default_feature_entries(state=None, owner=None) -> Dict[str, str]: @@ -94,15 +144,3 @@ def get_non_configurable_feature_entries(manifest) -> Dict[str, str]: 'has_global_scope': str(manifest['service']['host-service']), 'has_timer': str(manifest['service']['delayed']), } - - def _get_tables(self): - tables = [] - running = self._sonic_db.running_table(FEATURE) - if running is not None: # it's Ok if there is no database container running - tables.append(running) - persistent = self._sonic_db.persistent_table(FEATURE) - if persistent is not None: # it's Ok if there is no config_db.json - tables.append(persistent) - tables.append(self._sonic_db.initial_table(FEATURE)) # init_cfg.json is must - - return tables diff --git a/sonic_package_manager/service_creator/sonic_db.py b/sonic_package_manager/service_creator/sonic_db.py index a064c60c4a..6b617cb802 100644 --- a/sonic_package_manager/service_creator/sonic_db.py +++ b/sonic_package_manager/service_creator/sonic_db.py @@ -6,6 +6,8 @@ from swsscommon import swsscommon +from config.config_mgmt import sonic_cfggen + from sonic_package_manager.service_creator import ETC_SONIC_PATH from sonic_package_manager.service_creator.utils import in_chroot @@ -14,46 +16,74 @@ INIT_CFG_JSON = os.path.join(ETC_SONIC_PATH, 'init_cfg.json') -class FileDbTable: - """ swsscommon.Table adapter for persistent DBs. """ - - def __init__(self, file, table): - self._file = file - self._table = table +class PersistentConfigDbConnector: + """ This class implements swsscommon.ConfigDBConnector methods for persistent DBs (JSON files). + For method description refer to swsscommon.ConfigDBConnector. + """ - def keys(self): - with open(self._file) as stream: - config = json.load(stream) - return config.get(self._table, {}).keys() + def __init__(self, filepath): + self._filepath = filepath - def get(self, key): - with open(self._file) as stream: - config = json.load(stream) - - table = config.get(self._table, {}) - exists = key in table - fvs_dict = table.get(key, {}) - fvs = list(fvs_dict.items()) - return exists, fvs - - def set(self, key, fvs): - with open(self._file) as stream: - config = json.load(stream) - - table = config.setdefault(self._table, {}) - table.update({key: dict(fvs)}) - - with open(self._file, 'w') as stream: - json.dump(config, stream, indent=4) - - def _del(self, key): - with open(self._file) as stream: + def get_config(self): + with open(self._filepath) as stream: config = json.load(stream) + config = sonic_cfggen.FormatConverter.to_deserialized(config) + return config + + def get_entry(self, table, key): + table = table.upper() + table_data = self.get_table(table) + return table_data.get(key, {}) + + def get_table(self, table): + table = table.upper() + config = self.get_config() + return config.get(table, {}) + + def set_entry(self, table, key, data): + table = table.upper() + config = self.get_config() + if data is None: + self._del_key(config, table, key) + else: + table_data = config.setdefault(table, {}) + table_data[key] = data + self._write_config(config) + + def mod_entry(self, table, key, data): + table = table.upper() + config = self.get_config() + if data is None: + self._del_key(config, table, key) + else: + table_data = config.setdefault(table, {}) + curr_data = table_data.setdefault(key, {}) + curr_data.update(data) + self._write_config(config) + + def mod_config(self, config): + for table_name in config: + table_data = config[table_name] + if table_data is None: + self._del_table(config, table_name) + continue + for key in table_data: + self.mod_entry(table_name, key, table_data[key]) + + def _del_table(self, config, table): + with contextlib.suppress(KeyError): + config.pop(table) + def _del_key(self, config, table, key): with contextlib.suppress(KeyError): - config[self._table].pop(key) + config[table].pop(key) + + if not config[table]: + self._del_table(config, table) - with open(self._file, 'w') as stream: + def _write_config(self, config): + config = sonic_cfggen.FormatConverter.to_serialized(config) + with open(self._filepath, 'w') as stream: json.dump(config, stream, indent=4) @@ -62,37 +92,52 @@ class SonicDB: running DB and also for persistent and initial configs. """ - _running = None + _running_db_conn = None + + @classmethod + def get_connectors(cls): + """ Yields available DBs connectors. """ + + initial_db_conn = cls.get_initial_db_connector() + persistent_db_conn = cls.get_persistent_db_connector() + running_db_conn = cls.get_running_db_connector() + + yield initial_db_conn + if persistent_db_conn is not None: + yield persistent_db_conn + if running_db_conn is not None: + yield running_db_conn @classmethod - def running_table(cls, table): - """ Returns running DB table. """ + def get_running_db_connector(cls): + """ Returns running DB connector. """ # In chroot we can connect to a running # DB via TCP socket, we should ignore this case. if in_chroot(): return None - if cls._running is None: + if cls._running_db_conn is None: try: - cls._running = swsscommon.DBConnector(CONFIG_DB, 0) + cls._running_db_conn = swsscommon.ConfigDBConnector() + cls._running_db_conn.connect() except RuntimeError: # Failed to connect to DB. - return None + cls._running_db_conn = None - return swsscommon.Table(cls._running, table) + return cls._running_db_conn @classmethod - def persistent_table(cls, table): - """ Returns persistent DB table. """ + def get_persistent_db_connector(cls): + """ Returns persistent DB connector. """ if not os.path.exists(CONFIG_DB_JSON): return None - return FileDbTable(CONFIG_DB_JSON, table) + return PersistentConfigDbConnector(CONFIG_DB_JSON) @classmethod - def initial_table(cls, table): - """ Returns initial DB table. """ + def get_initial_db_connector(cls): + """ Returns initial DB connector. """ - return FileDbTable(INIT_CFG_JSON, table) + return PersistentConfigDbConnector(INIT_CFG_JSON) diff --git a/tests/sonic_package_manager/conftest.py b/tests/sonic_package_manager/conftest.py index 2788a75cd3..1ec067657c 100644 --- a/tests/sonic_package_manager/conftest.py +++ b/tests/sonic_package_manager/conftest.py @@ -7,6 +7,8 @@ import pytest from docker_image.reference import Reference +from config.config_mgmt import ConfigMgmt + from sonic_package_manager.database import PackageDatabase, PackageEntry from sonic_package_manager.manager import DockerApi, PackageManager from sonic_package_manager.manifest import Manifest @@ -62,7 +64,17 @@ def mock_service_creator(): @pytest.fixture def mock_sonic_db(): - yield Mock() + yield MagicMock() + + +@pytest.fixture +def mock_config_mgmt(): + yield MagicMock() + + +@pytest.fixture +def mock_cli_gen(): + yield MagicMock() @pytest.fixture @@ -107,7 +119,7 @@ def __init__(self): 'before': ['swss'], } ) - self.add('Azure/docker-test', '1.6.0', 'test-package', '1.6.0') + self.add('Azure/docker-test', '1.6.0', 'test-package', '1.6.0', yang='TEST') self.add('Azure/docker-test-2', '1.5.0', 'test-package-2', '1.5.0') self.add('Azure/docker-test-2', '2.0.0', 'test-package-2', '2.0.0') self.add('Azure/docker-test-3', 'latest', 'test-package-3', '1.6.0') @@ -124,23 +136,26 @@ def __init__(self): def from_registry(self, repository: str, reference: str): manifest = Manifest.marshal(self.metadata_store[repository][reference]['manifest']) components = self.metadata_store[repository][reference]['components'] - return Metadata(manifest, components) + yang = self.metadata_store[repository][reference]['yang'] + return Metadata(manifest, components, yang) def from_local(self, image: str): ref = Reference.parse(image) manifest = Manifest.marshal(self.metadata_store[ref['name']][ref['tag']]['manifest']) components = self.metadata_store[ref['name']][ref['tag']]['components'] - return Metadata(manifest, components) + yang = self.metadata_store[ref['name']][ref['tag']]['yang'] + return Metadata(manifest, components, yang) def from_tarball(self, filepath: str) -> Manifest: path, ref = filepath.split(':') manifest = Manifest.marshal(self.metadata_store[path][ref]['manifest']) components = self.metadata_store[path][ref]['components'] - return Metadata(manifest, components) + yang = self.metadata_store[path][ref]['yang'] + return Metadata(manifest, components, yang) def add(self, repo, reference, name, version, components=None, warm_shutdown=None, fast_shutdown=None, - processes=None): + processes=None, yang=None): repo_dict = self.metadata_store.setdefault(repo, {}) repo_dict[reference] = { 'manifest': { @@ -157,6 +172,7 @@ def add(self, repo, reference, name, version, components=None, 'processes': processes or [], }, 'components': components or {}, + 'yang': yang, } yield FakeMetadataResolver() @@ -252,7 +268,7 @@ def fake_db(fake_metadata_resolver): description='SONiC Package Manager Test Package', default_reference='1.6.0', installed=False, - built_in=False + built_in=False, ) add_package( content, @@ -402,8 +418,8 @@ def sonic_fs(fs): @pytest.fixture(autouse=True) def patch_pkgutil(): - with mock.patch('pkgutil.get_loader'): - yield + with mock.patch('pkgutil.get_loader') as loader: + yield loader @pytest.fixture diff --git a/tests/sonic_package_manager/test_service_creator.py b/tests/sonic_package_manager/test_service_creator.py index ffa6737531..456cc71a4a 100644 --- a/tests/sonic_package_manager/test_service_creator.py +++ b/tests/sonic_package_manager/test_service_creator.py @@ -1,7 +1,8 @@ #!/usr/bin/env python import os -from unittest.mock import Mock, MagicMock +import copy +from unittest.mock import Mock, call import pytest @@ -59,13 +60,25 @@ def manifest(): }) -def test_service_creator(sonic_fs, manifest, package_manager, mock_feature_registry, mock_sonic_db): - creator = ServiceCreator(mock_feature_registry, mock_sonic_db) +@pytest.fixture() +def service_creator(mock_feature_registry, + mock_sonic_db, + mock_cli_gen, + mock_config_mgmt): + yield ServiceCreator( + mock_feature_registry, + mock_sonic_db, + mock_cli_gen, + mock_config_mgmt + ) + + +def test_service_creator(sonic_fs, manifest, service_creator, package_manager): entry = PackageEntry('test', 'azure/sonic-test') package = Package(entry, Metadata(manifest)) installed_packages = package_manager._get_installed_packages_and(package) - creator.create(package) - creator.generate_shutdown_sequence_files(installed_packages) + service_creator.create(package) + service_creator.generate_shutdown_sequence_files(installed_packages) assert sonic_fs.exists(os.path.join(ETC_SONIC_PATH, 'swss_dependent')) assert sonic_fs.exists(os.path.join(DOCKER_CTL_SCRIPT_LOCATION, 'test.sh')) @@ -81,122 +94,200 @@ def read_file(name): assert read_file('test_reconcile') == 'test-process test-process-3' -def test_service_creator_with_timer_unit(sonic_fs, manifest, mock_feature_registry, mock_sonic_db): - creator = ServiceCreator(mock_feature_registry, mock_sonic_db) +def test_service_creator_with_timer_unit(sonic_fs, manifest, service_creator): entry = PackageEntry('test', 'azure/sonic-test') package = Package(entry, Metadata(manifest)) - creator.create(package) + service_creator.create(package) assert not sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.timer')) manifest['service']['delayed'] = True package = Package(entry, Metadata(manifest)) - creator.create(package) + service_creator.create(package) assert sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.timer')) -def test_service_creator_with_debug_dump(sonic_fs, manifest, mock_feature_registry, mock_sonic_db): - creator = ServiceCreator(mock_feature_registry, mock_sonic_db) +def test_service_creator_with_debug_dump(sonic_fs, manifest, service_creator): entry = PackageEntry('test', 'azure/sonic-test') package = Package(entry, Metadata(manifest)) - creator.create(package) + service_creator.create(package) assert not sonic_fs.exists(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, 'test')) manifest['package']['debug-dump'] = '/some/command' package = Package(entry, Metadata(manifest)) - creator.create(package) + service_creator.create(package) assert sonic_fs.exists(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, 'test')) -def test_service_creator_initial_config(sonic_fs, manifest, mock_feature_registry, mock_sonic_db): - mock_table = Mock() - mock_table.get = Mock(return_value=(True, (('field_2', 'original_value_2'),))) - mock_sonic_db.initial_table = Mock(return_value=mock_table) - mock_sonic_db.persistent_table = Mock(return_value=mock_table) - mock_sonic_db.running_table = Mock(return_value=mock_table) +def test_service_creator_yang(sonic_fs, manifest, mock_sonic_db, + mock_config_mgmt, service_creator): + test_yang = 'TEST YANG' + test_yang_module = 'sonic-test' - creator = ServiceCreator(mock_feature_registry, mock_sonic_db) + mock_connector = Mock() + mock_sonic_db.get_connectors = Mock(return_value=[mock_connector]) + mock_connector.get_table = Mock(return_value={'key_a': {'field_1': 'value_1'}}) + mock_connector.get_config = Mock(return_value={ + 'TABLE_A': mock_connector.get_table('') + }) entry = PackageEntry('test', 'azure/sonic-test') - package = Package(entry, Metadata(manifest)) - creator.create(package) + package = Package(entry, Metadata(manifest, yang_module_str=test_yang)) + service_creator.create(package) - assert not sonic_fs.exists(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, 'test')) + mock_config_mgmt.add_module.assert_called_with(test_yang) + mock_config_mgmt.get_module_name = Mock(return_value=test_yang_module) manifest['package']['init-cfg'] = { 'TABLE_A': { 'key_a': { - 'field_1': 'value_1', + 'field_1': 'new_value_1', 'field_2': 'value_2' }, }, } - package = Package(entry, Metadata(manifest)) + package = Package(entry, Metadata(manifest, yang_module_str=test_yang)) + + service_creator.create(package) + + mock_config_mgmt.add_module.assert_called_with(test_yang) + + mock_connector.mod_config.assert_called_with( + { + 'TABLE_A': { + 'key_a': { + 'field_1': 'value_1', + 'field_2': 'value_2', + }, + }, + } + ) + + mock_config_mgmt.sy.confDbYangMap = { + 'TABLE_A': {'module': test_yang_module} + } + + service_creator.remove(package) + mock_connector.set_entry.assert_called_with('TABLE_A', 'key_a', None) + mock_config_mgmt.remove_module.assert_called_with(test_yang_module) - creator.create(package) - mock_table.set.assert_called_with('key_a', [('field_1', 'value_1'), - ('field_2', 'original_value_2')]) - creator.remove(package) - mock_table._del.assert_called_with('key_a') +def test_service_creator_autocli(sonic_fs, manifest, mock_cli_gen, + mock_config_mgmt, service_creator): + test_yang = 'TEST YANG' + test_yang_module = 'sonic-test' + + manifest['cli']['auto-generate-show'] = True + manifest['cli']['auto-generate-config'] = True + + entry = PackageEntry('test', 'azure/sonic-test') + package = Package(entry, Metadata(manifest, yang_module_str=test_yang)) + mock_config_mgmt.get_module_name = Mock(return_value=test_yang_module) + service_creator.create(package) + + mock_cli_gen.generate_cli_plugin.assert_has_calls( + [ + call('show', test_yang_module), + call('config', test_yang_module), + ], + any_order=True + ) + + service_creator.remove(package) + mock_cli_gen.remove_cli_plugin.assert_has_calls( + [ + call('show', test_yang_module), + call('config', test_yang_module), + ], + any_order=True + ) def test_feature_registration(mock_sonic_db, manifest): - mock_feature_table = Mock() - mock_feature_table.get = Mock(return_value=(False, ())) - mock_sonic_db.initial_table = Mock(return_value=mock_feature_table) - mock_sonic_db.persistent_table = Mock(return_value=mock_feature_table) - mock_sonic_db.running_table = Mock(return_value=mock_feature_table) + mock_connector = Mock() + mock_connector.get_entry = Mock(return_value={}) + mock_sonic_db.get_connectors = Mock(return_value=[mock_connector]) feature_registry = FeatureRegistry(mock_sonic_db) feature_registry.register(manifest) - mock_feature_table.set.assert_called_with('test', [ - ('state', 'disabled'), - ('auto_restart', 'enabled'), - ('high_mem_alert', 'disabled'), - ('set_owner', 'local'), - ('has_per_asic_scope', 'False'), - ('has_global_scope', 'True'), - ('has_timer', 'False'), - ]) + mock_connector.set_entry.assert_called_with('FEATURE', 'test', { + 'state': 'disabled', + 'auto_restart': 'enabled', + 'high_mem_alert': 'disabled', + 'set_owner': 'local', + 'has_per_asic_scope': 'False', + 'has_global_scope': 'True', + 'has_timer': 'False', + }) + + +def test_feature_update(mock_sonic_db, manifest): + curr_feature_config = { + 'state': 'enabled', + 'auto_restart': 'enabled', + 'high_mem_alert': 'disabled', + 'set_owner': 'local', + 'has_per_asic_scope': 'False', + 'has_global_scope': 'True', + 'has_timer': 'False', + } + mock_connector = Mock() + mock_connector.get_entry = Mock(return_value=curr_feature_config) + mock_sonic_db.get_connectors = Mock(return_value=[mock_connector]) + feature_registry = FeatureRegistry(mock_sonic_db) + + new_manifest = copy.deepcopy(manifest) + new_manifest['service']['name'] = 'test_new' + new_manifest['service']['delayed'] = True + + feature_registry.update(manifest, new_manifest) + + mock_connector.set_entry.assert_has_calls([ + call('FEATURE', 'test', None), + call('FEATURE', 'test_new', { + 'state': 'enabled', + 'auto_restart': 'enabled', + 'high_mem_alert': 'disabled', + 'set_owner': 'local', + 'has_per_asic_scope': 'False', + 'has_global_scope': 'True', + 'has_timer': 'True', + }), + ], any_order=True) def test_feature_registration_with_timer(mock_sonic_db, manifest): manifest['service']['delayed'] = True - mock_feature_table = Mock() - mock_feature_table.get = Mock(return_value=(False, ())) - mock_sonic_db.initial_table = Mock(return_value=mock_feature_table) - mock_sonic_db.persistent_table = Mock(return_value=mock_feature_table) - mock_sonic_db.running_table = Mock(return_value=mock_feature_table) + mock_connector = Mock() + mock_connector.get_entry = Mock(return_value={}) + mock_sonic_db.get_connectors = Mock(return_value=[mock_connector]) feature_registry = FeatureRegistry(mock_sonic_db) feature_registry.register(manifest) - mock_feature_table.set.assert_called_with('test', [ - ('state', 'disabled'), - ('auto_restart', 'enabled'), - ('high_mem_alert', 'disabled'), - ('set_owner', 'local'), - ('has_per_asic_scope', 'False'), - ('has_global_scope', 'True'), - ('has_timer', 'True'), - ]) + mock_connector.set_entry.assert_called_with('FEATURE', 'test', { + 'state': 'disabled', + 'auto_restart': 'enabled', + 'high_mem_alert': 'disabled', + 'set_owner': 'local', + 'has_per_asic_scope': 'False', + 'has_global_scope': 'True', + 'has_timer': 'True', + }) def test_feature_registration_with_non_default_owner(mock_sonic_db, manifest): - mock_feature_table = Mock() - mock_feature_table.get = Mock(return_value=(False, ())) - mock_sonic_db.initial_table = Mock(return_value=mock_feature_table) - mock_sonic_db.persistent_table = Mock(return_value=mock_feature_table) - mock_sonic_db.running_table = Mock(return_value=mock_feature_table) + mock_connector = Mock() + mock_connector.get_entry = Mock(return_value={}) + mock_sonic_db.get_connectors = Mock(return_value=[mock_connector]) feature_registry = FeatureRegistry(mock_sonic_db) feature_registry.register(manifest, owner='kube') - mock_feature_table.set.assert_called_with('test', [ - ('state', 'disabled'), - ('auto_restart', 'enabled'), - ('high_mem_alert', 'disabled'), - ('set_owner', 'kube'), - ('has_per_asic_scope', 'False'), - ('has_global_scope', 'True'), - ('has_timer', 'False'), - ]) + mock_connector.set_entry.assert_called_with('FEATURE', 'test', { + 'state': 'disabled', + 'auto_restart': 'enabled', + 'high_mem_alert': 'disabled', + 'set_owner': 'kube', + 'has_per_asic_scope': 'False', + 'has_global_scope': 'True', + 'has_timer': 'False', + }) From 7def4b78f40496e8b9797087e30698dfea8c1eca Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Wed, 11 Aug 2021 18:19:04 +0000 Subject: [PATCH 10/60] Removed a few tests Signed-off-by: Vivek Reddy Karri --- tests/cli_autogen_yang_parser_test.py | 196 -- tests/generic_config_updater/__init__.py | 0 .../files/any_config_db.json | 2 - .../files/any_other_config_db.json | 4 - .../files/config_db_after_multi_patch.json | 122 -- .../config_db_after_single_operation.json | 83 - .../files/config_db_as_json.json | 92 - .../files/config_db_as_json_invalid.json | 7 - .../files/config_db_choice.json | 17 - .../files/config_db_no_dependencies.json | 39 - .../files/config_db_with_crm.json | 9 - .../files/config_db_with_device_metadata.json | 16 - .../files/config_db_with_interface.json | 20 - .../config_db_with_portchannel_and_acl.json | 25 - .../config_db_with_portchannel_interface.json | 10 - .../contrainer_with_container_config_db.json | 7 - .../files/cropped_config_db_as_json.json | 86 - .../files/dpb_1_split_full_config.json | 35 - .../files/dpb_1_to_4.json-patch | 88 - .../files/dpb_4_splits_full_config.json | 65 - .../files/dpb_4_to_1.json-patch | 58 - .../files/empty_config_db.json | 2 - ...multi_operation_config_db_patch.json-patch | 88 - ...ulti_operation_sonic_yang_patch.json-patch | 97 - .../files/simple_config_db_inc_deps.json | 20 - ...ingle_operation_config_db_patch.json-patch | 6 - ...ngle_operation_sonic_yang_patch.json-patch | 6 - .../files/sonic_yang_after_multi_patch.json | 153 -- .../files/sonic_yang_as_json.json | 114 -- .../files/sonic_yang_as_json_invalid.json | 13 - ...c_yang_as_json_with_unexpected_colons.json | 114 -- .../sonic_yang_as_json_without_colons.json | 114 -- .../generic_updater_test.py | 766 -------- .../generic_config_updater/gu_common_test.py | 635 ------ .../generic_config_updater/gutest_helpers.py | 53 - .../patch_sorter_test.py | 1730 ----------------- 36 files changed, 4892 deletions(-) delete mode 100644 tests/cli_autogen_yang_parser_test.py delete mode 100644 tests/generic_config_updater/__init__.py delete mode 100644 tests/generic_config_updater/files/any_config_db.json delete mode 100644 tests/generic_config_updater/files/any_other_config_db.json delete mode 100644 tests/generic_config_updater/files/config_db_after_multi_patch.json delete mode 100644 tests/generic_config_updater/files/config_db_after_single_operation.json delete mode 100644 tests/generic_config_updater/files/config_db_as_json.json delete mode 100644 tests/generic_config_updater/files/config_db_as_json_invalid.json delete mode 100644 tests/generic_config_updater/files/config_db_choice.json delete mode 100644 tests/generic_config_updater/files/config_db_no_dependencies.json delete mode 100644 tests/generic_config_updater/files/config_db_with_crm.json delete mode 100644 tests/generic_config_updater/files/config_db_with_device_metadata.json delete mode 100644 tests/generic_config_updater/files/config_db_with_interface.json delete mode 100644 tests/generic_config_updater/files/config_db_with_portchannel_and_acl.json delete mode 100644 tests/generic_config_updater/files/config_db_with_portchannel_interface.json delete mode 100644 tests/generic_config_updater/files/contrainer_with_container_config_db.json delete mode 100644 tests/generic_config_updater/files/cropped_config_db_as_json.json delete mode 100644 tests/generic_config_updater/files/dpb_1_split_full_config.json delete mode 100644 tests/generic_config_updater/files/dpb_1_to_4.json-patch delete mode 100644 tests/generic_config_updater/files/dpb_4_splits_full_config.json delete mode 100644 tests/generic_config_updater/files/dpb_4_to_1.json-patch delete mode 100644 tests/generic_config_updater/files/empty_config_db.json delete mode 100644 tests/generic_config_updater/files/multi_operation_config_db_patch.json-patch delete mode 100644 tests/generic_config_updater/files/multi_operation_sonic_yang_patch.json-patch delete mode 100644 tests/generic_config_updater/files/simple_config_db_inc_deps.json delete mode 100644 tests/generic_config_updater/files/single_operation_config_db_patch.json-patch delete mode 100644 tests/generic_config_updater/files/single_operation_sonic_yang_patch.json-patch delete mode 100644 tests/generic_config_updater/files/sonic_yang_after_multi_patch.json delete mode 100644 tests/generic_config_updater/files/sonic_yang_as_json.json delete mode 100644 tests/generic_config_updater/files/sonic_yang_as_json_invalid.json delete mode 100644 tests/generic_config_updater/files/sonic_yang_as_json_with_unexpected_colons.json delete mode 100644 tests/generic_config_updater/files/sonic_yang_as_json_without_colons.json delete mode 100644 tests/generic_config_updater/generic_updater_test.py delete mode 100644 tests/generic_config_updater/gu_common_test.py delete mode 100644 tests/generic_config_updater/gutest_helpers.py delete mode 100644 tests/generic_config_updater/patch_sorter_test.py diff --git a/tests/cli_autogen_yang_parser_test.py b/tests/cli_autogen_yang_parser_test.py deleted file mode 100644 index 9ed915c69b..0000000000 --- a/tests/cli_autogen_yang_parser_test.py +++ /dev/null @@ -1,196 +0,0 @@ -import os -import logging -import pprint - -from sonic_cli_gen.yang_parser import YangParser -from .cli_autogen_input import assert_dictionaries - -logger = logging.getLogger(__name__) - -test_path = os.path.dirname(os.path.abspath(__file__)) -yang_models_path = '/usr/local/yang-models' -test_yang_models = [ - 'sonic-1-table-container', - 'sonic-2-table-containers', - 'sonic-1-object-container', - 'sonic-2-object-containers', - 'sonic-1-list', - 'sonic-2-lists', - 'sonic-static-object-complex-1', - 'sonic-static-object-complex-2', - 'sonic-dynamic-object-complex-1', - 'sonic-dynamic-object-complex-2', - 'sonic-choice-complex', - 'sonic-grouping-complex', - 'sonic-grouping-1', - 'sonic-grouping-2', -] - - -class TestYangParser: - @classmethod - def setup_class(cls): - logger.info("SETUP") - os.environ['UTILITIES_UNIT_TESTING'] = "1" - move_yang_models() - - @classmethod - def teardown_class(cls): - logger.info("TEARDOWN") - os.environ['UTILITIES_UNIT_TESTING'] = "0" - remove_yang_models() - - def test_1_table_container(self): - """ Test for 1 'table' container - 'table' container represent TABLE in Config DB schema: - { - "TABLE": { - "OBJECT": { - "attr": "value" - ... - } - } - } - """ - - base_test('sonic-1-table-container', - assert_dictionaries.one_table_container) - - def test_2_table_containers(self): - """ Test for 2 'table' containers """ - - base_test('sonic-2-table-containers', - assert_dictionaries.two_table_containers) - - def test_1_object_container(self): - """ Test for 1 'object' container - 'object' container represent OBJECT in Config DB schema: - { - "TABLE": { - "OBJECT": { - "attr": "value" - ... - } - } - } - """ - - base_test('sonic-1-object-container', - assert_dictionaries.one_object_container) - - def test_2_object_containers(self): - """ Test for 2 'object' containers """ - - base_test('sonic-2-object-containers', - assert_dictionaries.two_object_containers) - - def test_1_list(self): - """ Test for 1 container that has inside - the YANG 'list' entity - """ - - base_test('sonic-1-list', assert_dictionaries.one_list) - - def test_2_lists(self): - """ Test for 2 containers that have inside - the YANG 'list' entity - """ - - base_test('sonic-2-lists', assert_dictionaries.two_lists) - - def test_static_object_complex_1(self): - """ Test for the object container with: - 1 leaf, 1 leaf-list, 1 choice. - """ - - base_test('sonic-static-object-complex-1', - assert_dictionaries.static_object_complex_1) - - def test_static_object_complex_2(self): - """ Test for object container with: - 2 leafs, 2 leaf-lists, 2 choices. - """ - - base_test('sonic-static-object-complex-2', - assert_dictionaries.static_object_complex_2) - - def test_dynamic_object_complex_1(self): - """ Test for object container with: - 1 key, 1 leaf, 1 leaf-list, 1 choice. - """ - - base_test('sonic-dynamic-object-complex-1', - assert_dictionaries.dynamic_object_complex_1) - - def test_dynamic_object_complex_2(self): - """ Test for object container with: - 2 keys, 2 leafs, 2 leaf-list, 2 choice. - """ - - base_test('sonic-dynamic-object-complex-2', - assert_dictionaries.dynamic_object_complex_2) - - def test_choice_complex(self): - """ Test for object container with the 'choice' - that have complex strucutre: - leafs, leaf-lists, multiple 'uses' from different files - """ - - base_test('sonic-choice-complex', - assert_dictionaries.choice_complex) - - def test_grouping_complex(self): - """ Test for object container with multitple 'uses' that using 'grouping' - from different files. The used 'grouping' have a complex structure: - leafs, leaf-lists, choices - """ - - base_test('sonic-grouping-complex', - assert_dictionaries.grouping_complex) - - -def base_test(yang_model_name, correct_dict): - """ General logic for each test case """ - - config_db_path = os.path.join(test_path, - 'cli_autogen_input/config_db.json') - parser = YangParser(yang_model_name=yang_model_name, - config_db_path=config_db_path, - allow_tbl_without_yang=True, - debug=False) - yang_dict = parser.parse_yang_model() - pretty_log_debug(yang_dict) - assert yang_dict == correct_dict - - -def move_yang_models(): - """ Move a test YANG models to known location - in order to be parsed by YangParser class - """ - - for yang_model in test_yang_models: - src_path = os.path.join(test_path, - 'cli_autogen_input', - yang_model + '.yang') - cmd = 'sudo cp {} {}'.format(src_path, yang_models_path) - os.system(cmd) - - -def remove_yang_models(): - """ Remove a test YANG models to known location - in order to be parsed by YangParser class - """ - - for yang_model in test_yang_models: - yang_model_path = os.path.join(yang_models_path, - yang_model + '.yang') - cmd = 'sudo rm {}'.format(yang_model_path) - os.system(cmd) - - -def pretty_log_debug(dictionary): - """ Pretty print of parsed dictionary """ - - for line in pprint.pformat(dictionary).split('\n'): - logging.debug(line) - diff --git a/tests/generic_config_updater/__init__.py b/tests/generic_config_updater/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/generic_config_updater/files/any_config_db.json b/tests/generic_config_updater/files/any_config_db.json deleted file mode 100644 index 2c63c08510..0000000000 --- a/tests/generic_config_updater/files/any_config_db.json +++ /dev/null @@ -1,2 +0,0 @@ -{ -} diff --git a/tests/generic_config_updater/files/any_other_config_db.json b/tests/generic_config_updater/files/any_other_config_db.json deleted file mode 100644 index c258f768cf..0000000000 --- a/tests/generic_config_updater/files/any_other_config_db.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "VLAN": { - } -} diff --git a/tests/generic_config_updater/files/config_db_after_multi_patch.json b/tests/generic_config_updater/files/config_db_after_multi_patch.json deleted file mode 100644 index 39dff7d688..0000000000 --- a/tests/generic_config_updater/files/config_db_after_multi_patch.json +++ /dev/null @@ -1,122 +0,0 @@ -{ - "VLAN_MEMBER": { - "Vlan1000|Ethernet0": { - "tagging_mode": "untagged" - }, - "Vlan1000|Ethernet4": { - "tagging_mode": "untagged" - }, - "Vlan1000|Ethernet8": { - "tagging_mode": "untagged" - }, - "Vlan100|Ethernet2": { - "tagging_mode": "untagged" - }, - "Vlan100|Ethernet3": { - "tagging_mode": "untagged" - }, - "Vlan100|Ethernet1": { - "tagging_mode": "untagged" - } - }, - "VLAN": { - "Vlan1000": { - "vlanid": "1000", - "dhcp_servers": [ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ] - } - }, - "ACL_TABLE": { - "NO-NSW-PACL-V4": { - "type": "L3", - "policy_desc": "NO-NSW-PACL-V4", - "ports": [ - "Ethernet0", - "Ethernet1", - "Ethernet2", - "Ethernet3" - ] - }, - "DATAACL": { - "policy_desc": "DATAACL", - "ports": [ - "Ethernet4" - ], - "stage": "ingress", - "type": "L3" - }, - "EVERFLOW": { - "policy_desc": "EVERFLOW", - "ports": [ - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRROR" - }, - "EVERFLOWV6": { - "policy_desc": "EVERFLOWV6", - "ports": [ - "Ethernet4", - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRRORV6" - } - }, - "PORT": { - "Ethernet0": { - "alias": "Eth1/1", - "lanes": "65", - "description": "", - "speed": "10000" - }, - "Ethernet4": { - "admin_status": "up", - "alias": "fortyGigE0/4", - "description": "Servers0:eth0", - "index": "1", - "lanes": "29,30,31,32", - "mtu": "9100", - "pfc_asym": "off", - "speed": "40000" - }, - "Ethernet8": { - "admin_status": "up", - "alias": "fortyGigE0/8", - "description": "Servers1:eth0", - "index": "2", - "lanes": "33,34,35,36", - "mtu": "9100", - "pfc_asym": "off", - "speed": "40000" - }, - "Ethernet3": { - "alias": "Eth1/4", - "lanes": "68", - "description": "", - "speed": "10000" - }, - "Ethernet1": { - "alias": "Eth1/2", - "lanes": "66", - "description": "", - "speed": "10000" - }, - "Ethernet2": { - "alias": "Eth1/3", - "lanes": "67", - "description": "", - "speed": "10000" - } - }, - "TABLE_WITHOUT_YANG": { - "Item1": { - "key11": "value11", - "key12": "value12" - } - } -} diff --git a/tests/generic_config_updater/files/config_db_after_single_operation.json b/tests/generic_config_updater/files/config_db_after_single_operation.json deleted file mode 100644 index 0f2f447537..0000000000 --- a/tests/generic_config_updater/files/config_db_after_single_operation.json +++ /dev/null @@ -1,83 +0,0 @@ -{ - "VLAN_MEMBER": { - "Vlan1000|Ethernet0": { - "tagging_mode": "untagged" - }, - "Vlan1000|Ethernet4": { - "tagging_mode": "untagged" - } - }, - "VLAN": { - "Vlan1000": { - "vlanid": "1000", - "dhcp_servers": [ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ] - } - }, - "ACL_TABLE": { - "NO-NSW-PACL-V4": { - "type": "L3", - "policy_desc": "NO-NSW-PACL-V4", - "ports": [ - "Ethernet0" - ] - }, - "DATAACL": { - "policy_desc": "DATAACL", - "ports": [ - "Ethernet4" - ], - "stage": "ingress", - "type": "L3" - }, - "EVERFLOW": { - "policy_desc": "EVERFLOW", - "ports": [ - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRROR" - }, - "EVERFLOWV6": { - "policy_desc": "EVERFLOWV6", - "ports": [ - "Ethernet4", - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRRORV6" - } - }, - "PORT": { - "Ethernet0": { - "alias": "Eth1", - "lanes": "65, 66, 67, 68", - "description": "Ethernet0 100G link", - "speed": "100000" - }, - "Ethernet4": { - "admin_status": "up", - "alias": "fortyGigE0/4", - "description": "Servers0:eth0", - "index": "1", - "lanes": "29,30,31,32", - "mtu": "9100", - "pfc_asym": "off", - "speed": "40000" - }, - "Ethernet8": { - "admin_status": "up", - "alias": "fortyGigE0/8", - "description": "Servers1:eth0", - "index": "2", - "lanes": "33,34,35,36", - "mtu": "9100", - "pfc_asym": "off", - "speed": "40000" - } - } -} diff --git a/tests/generic_config_updater/files/config_db_as_json.json b/tests/generic_config_updater/files/config_db_as_json.json deleted file mode 100644 index 02fb7c7e6a..0000000000 --- a/tests/generic_config_updater/files/config_db_as_json.json +++ /dev/null @@ -1,92 +0,0 @@ -{ - "VLAN_MEMBER": { - "Vlan1000|Ethernet0": { - "tagging_mode": "untagged" - }, - "Vlan1000|Ethernet4": { - "tagging_mode": "untagged" - }, - "Vlan1000|Ethernet8": { - "tagging_mode": "untagged" - } - }, - "VLAN": { - "Vlan1000": { - "vlanid": "1000", - "dhcp_servers": [ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ] - } - }, - "ACL_TABLE": { - "NO-NSW-PACL-V4": { - "type": "L3", - "policy_desc": "NO-NSW-PACL-V4", - "ports": [ - "Ethernet0" - ] - }, - "DATAACL": { - "policy_desc": "DATAACL", - "ports": [ - "Ethernet4" - ], - "stage": "ingress", - "type": "L3" - }, - "EVERFLOW": { - "policy_desc": "EVERFLOW", - "ports": [ - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRROR" - }, - "EVERFLOWV6": { - "policy_desc": "EVERFLOWV6", - "ports": [ - "Ethernet4", - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRRORV6" - } - }, - "PORT": { - "Ethernet0": { - "alias": "Eth1", - "lanes": "65, 66, 67, 68", - "description": "Ethernet0 100G link", - "speed": "100000" - }, - "Ethernet4": { - "admin_status": "up", - "alias": "fortyGigE0/4", - "description": "Servers0:eth0", - "index": "1", - "lanes": "29,30,31,32", - "mtu": "9100", - "pfc_asym": "off", - "speed": "40000" - }, - "Ethernet8": { - "admin_status": "up", - "alias": "fortyGigE0/8", - "description": "Servers1:eth0", - "index": "2", - "lanes": "33,34,35,36", - "mtu": "9100", - "pfc_asym": "off", - "speed": "40000" - } - }, - "TABLE_WITHOUT_YANG": { - "Item1": { - "key11": "value11", - "key12": "value12" - } - } -} diff --git a/tests/generic_config_updater/files/config_db_as_json_invalid.json b/tests/generic_config_updater/files/config_db_as_json_invalid.json deleted file mode 100644 index a2cfdc91df..0000000000 --- a/tests/generic_config_updater/files/config_db_as_json_invalid.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "VLAN_MEMBER": { - "Vlan1000|Ethernet8": { - "tagging_mode": "untagged" - } - } -} diff --git a/tests/generic_config_updater/files/config_db_choice.json b/tests/generic_config_updater/files/config_db_choice.json deleted file mode 100644 index eaece3248f..0000000000 --- a/tests/generic_config_updater/files/config_db_choice.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "ACL_RULE": { - "SSH_ONLY|RULE1": { - "L4_SRC_PORT":"65174-6530" - } - }, - "ACL_TABLE": { - "SSH_ONLY": { - "policy_desc": "SSH_ONLY", - "type": "CTRLPLANE", - "stage": "ingress", - "services": [ - "SSH" - ] - } - } -} diff --git a/tests/generic_config_updater/files/config_db_no_dependencies.json b/tests/generic_config_updater/files/config_db_no_dependencies.json deleted file mode 100644 index 12bdd464a5..0000000000 --- a/tests/generic_config_updater/files/config_db_no_dependencies.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "VLAN": { - "Vlan1000": { - "vlanid": "1000", - "dhcp_servers": [ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ] - } - }, - "ACL_TABLE": { - "EVERFLOW": { - "policy_desc": "EVERFLOW", - "ports": [ - "" - ], - "stage": "ingress", - "type": "MIRROR" - }, - "EVERFLOWV6": { - "policy_desc": "EVERFLOWV6", - "ports": [ - "" - ], - "stage": "ingress", - "type": "MIRRORV6" - } - }, - "PORT": { - "Ethernet0": { - "alias": "Eth1", - "lanes": "65, 66, 67, 68", - "description": "Ethernet0 100G link", - "speed": "100000" - } - } -} diff --git a/tests/generic_config_updater/files/config_db_with_crm.json b/tests/generic_config_updater/files/config_db_with_crm.json deleted file mode 100644 index 5fd324d988..0000000000 --- a/tests/generic_config_updater/files/config_db_with_crm.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "CRM": { - "Config": { - "acl_counter_high_threshold": "90", - "acl_counter_low_threshold": "70", - "acl_counter_threshold_type": "free" - } - } -} \ No newline at end of file diff --git a/tests/generic_config_updater/files/config_db_with_device_metadata.json b/tests/generic_config_updater/files/config_db_with_device_metadata.json deleted file mode 100644 index 34def579f6..0000000000 --- a/tests/generic_config_updater/files/config_db_with_device_metadata.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "DEVICE_METADATA": { - "localhost": { - "default_bgp_status": "up", - "default_pfcwd_status": "disable", - "bgp_asn": "65100", - "deployment_id": "1", - "docker_routing_config_mode": "separated", - "hostname": "vlab-01", - "hwsku": "Force10-S6000", - "type": "ToRRouter", - "platform": "x86_64-kvm_x86_64-r0", - "mac": "52:54:00:99:7e:85" - } - } -} \ No newline at end of file diff --git a/tests/generic_config_updater/files/config_db_with_interface.json b/tests/generic_config_updater/files/config_db_with_interface.json deleted file mode 100644 index 2e1c488a4a..0000000000 --- a/tests/generic_config_updater/files/config_db_with_interface.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "INTERFACE": { - "Ethernet8": {}, - "Ethernet8|10.0.0.1/30": { - "family": "IPv4", - "scope": "global" - } - }, - "PORT": { - "Ethernet8": { - "admin_status": "up", - "alias": "eth8", - "description": "Ethernet8", - "fec": "rs", - "lanes": "65", - "mtu": "9000", - "speed": "25000" - } - } -} diff --git a/tests/generic_config_updater/files/config_db_with_portchannel_and_acl.json b/tests/generic_config_updater/files/config_db_with_portchannel_and_acl.json deleted file mode 100644 index 23d33890f3..0000000000 --- a/tests/generic_config_updater/files/config_db_with_portchannel_and_acl.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "PORT": { - "Ethernet0": { - "alias": "Eth1/1", - "lanes": "65", - "description": "", - "speed": "10000" - } - }, - "PORTCHANNEL": { - "PortChannel0001": { - "admin_status": "up" - } - }, - "ACL_TABLE": { - "NO-NSW-PACL-V4": { - "type": "L3", - "policy_desc": "NO-NSW-PACL-V4", - "ports": [ - "Ethernet0", - "PortChannel0001" - ] - } - } -} diff --git a/tests/generic_config_updater/files/config_db_with_portchannel_interface.json b/tests/generic_config_updater/files/config_db_with_portchannel_interface.json deleted file mode 100644 index 4e05639dc5..0000000000 --- a/tests/generic_config_updater/files/config_db_with_portchannel_interface.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "PORTCHANNEL": { - "PortChannel0001": { - "admin_status": "up" - } - }, - "PORTCHANNEL_INTERFACE": { - "PortChannel0001|1.1.1.1/24": {} - } -} diff --git a/tests/generic_config_updater/files/contrainer_with_container_config_db.json b/tests/generic_config_updater/files/contrainer_with_container_config_db.json deleted file mode 100644 index b0680b22b5..0000000000 --- a/tests/generic_config_updater/files/contrainer_with_container_config_db.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "FLEX_COUNTER_TABLE": { - "BUFFER_POOL_WATERMARK": { - "FLEX_COUNTER_STATUS": "enable" - } - } -} diff --git a/tests/generic_config_updater/files/cropped_config_db_as_json.json b/tests/generic_config_updater/files/cropped_config_db_as_json.json deleted file mode 100644 index 261e912c71..0000000000 --- a/tests/generic_config_updater/files/cropped_config_db_as_json.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "VLAN_MEMBER": { - "Vlan1000|Ethernet0": { - "tagging_mode": "untagged" - }, - "Vlan1000|Ethernet4": { - "tagging_mode": "untagged" - }, - "Vlan1000|Ethernet8": { - "tagging_mode": "untagged" - } - }, - "VLAN": { - "Vlan1000": { - "vlanid": "1000", - "dhcp_servers": [ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ] - } - }, - "ACL_TABLE": { - "NO-NSW-PACL-V4": { - "type": "L3", - "policy_desc": "NO-NSW-PACL-V4", - "ports": [ - "Ethernet0" - ] - }, - "DATAACL": { - "policy_desc": "DATAACL", - "ports": [ - "Ethernet4" - ], - "stage": "ingress", - "type": "L3" - }, - "EVERFLOW": { - "policy_desc": "EVERFLOW", - "ports": [ - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRROR" - }, - "EVERFLOWV6": { - "policy_desc": "EVERFLOWV6", - "ports": [ - "Ethernet4", - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRRORV6" - } - }, - "PORT": { - "Ethernet0": { - "alias": "Eth1", - "lanes": "65, 66, 67, 68", - "description": "Ethernet0 100G link", - "speed": "100000" - }, - "Ethernet4": { - "admin_status": "up", - "alias": "fortyGigE0/4", - "description": "Servers0:eth0", - "index": "1", - "lanes": "29,30,31,32", - "mtu": "9100", - "pfc_asym": "off", - "speed": "40000" - }, - "Ethernet8": { - "admin_status": "up", - "alias": "fortyGigE0/8", - "description": "Servers1:eth0", - "index": "2", - "lanes": "33,34,35,36", - "mtu": "9100", - "pfc_asym": "off", - "speed": "40000" - } - } -} diff --git a/tests/generic_config_updater/files/dpb_1_split_full_config.json b/tests/generic_config_updater/files/dpb_1_split_full_config.json deleted file mode 100644 index 2097289606..0000000000 --- a/tests/generic_config_updater/files/dpb_1_split_full_config.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "PORT": { - "Ethernet0": { - "alias": "Eth1", - "lanes": "65, 66, 67, 68", - "description": "Ethernet0 100G link", - "speed": "100000" - } - }, - "ACL_TABLE": { - "NO-NSW-PACL-V4": { - "type": "L3", - "policy_desc": "NO-NSW-PACL-V4", - "ports": [ - "Ethernet0" - ] - } - }, - "VLAN_MEMBER": { - "Vlan100|Ethernet0": { - "tagging_mode": "untagged" - } - }, - "VLAN": { - "Vlan100": { - "vlanid": "100", - "dhcp_servers": [ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ] - } - } -} diff --git a/tests/generic_config_updater/files/dpb_1_to_4.json-patch b/tests/generic_config_updater/files/dpb_1_to_4.json-patch deleted file mode 100644 index 8eddd7a19d..0000000000 --- a/tests/generic_config_updater/files/dpb_1_to_4.json-patch +++ /dev/null @@ -1,88 +0,0 @@ -[ - { - "op": "add", - "path": "/PORT/Ethernet3", - "value": { - "alias": "Eth1/4", - "lanes": "68", - "description": "", - "speed": "10000" - } - }, - { - "op": "add", - "path": "/PORT/Ethernet1", - "value": { - "alias": "Eth1/2", - "lanes": "66", - "description": "", - "speed": "10000" - } - }, - { - "op": "add", - "path": "/PORT/Ethernet2", - "value": { - "alias": "Eth1/3", - "lanes": "67", - "description": "", - "speed": "10000" - } - }, - { - "op": "replace", - "path": "/PORT/Ethernet0/lanes", - "value": "65" - }, - { - "op": "replace", - "path": "/PORT/Ethernet0/alias", - "value": "Eth1/1" - }, - { - "op": "replace", - "path": "/PORT/Ethernet0/description", - "value": "" - }, - { - "op": "replace", - "path": "/PORT/Ethernet0/speed", - "value": "10000" - }, - { - "op": "add", - "path": "/VLAN_MEMBER/Vlan100|Ethernet2", - "value": { - "tagging_mode": "untagged" - } - }, - { - "op": "add", - "path": "/VLAN_MEMBER/Vlan100|Ethernet3", - "value": { - "tagging_mode": "untagged" - } - }, - { - "op": "add", - "path": "/VLAN_MEMBER/Vlan100|Ethernet1", - "value": { - "tagging_mode": "untagged" - } - }, - { - "op": "add", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1", - "value": "Ethernet1" - }, - { - "op": "add", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/2", - "value": "Ethernet2" - }, - { - "op": "add", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/3", - "value": "Ethernet3" - } -] diff --git a/tests/generic_config_updater/files/dpb_4_splits_full_config.json b/tests/generic_config_updater/files/dpb_4_splits_full_config.json deleted file mode 100644 index 23d1b9ecfc..0000000000 --- a/tests/generic_config_updater/files/dpb_4_splits_full_config.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "PORT": { - "Ethernet0": { - "alias": "Eth1/1", - "lanes": "65", - "description": "", - "speed": "10000" - }, - "Ethernet1": { - "alias": "Eth1/2", - "lanes": "66", - "description": "", - "speed": "10000" - }, - "Ethernet2": { - "alias": "Eth1/3", - "lanes": "67", - "description": "", - "speed": "10000" - }, - "Ethernet3": { - "alias": "Eth1/4", - "lanes": "68", - "description": "", - "speed": "10000" - } - }, - "ACL_TABLE": { - "NO-NSW-PACL-V4": { - "type": "L3", - "policy_desc": "NO-NSW-PACL-V4", - "ports": [ - "Ethernet0", - "Ethernet1", - "Ethernet2", - "Ethernet3" - ] - } - }, - "VLAN_MEMBER": { - "Vlan100|Ethernet0": { - "tagging_mode": "untagged" - }, - "Vlan100|Ethernet1": { - "tagging_mode": "untagged" - }, - "Vlan100|Ethernet2": { - "tagging_mode": "untagged" - }, - "Vlan100|Ethernet3": { - "tagging_mode": "untagged" - } - }, - "VLAN": { - "Vlan100": { - "vlanid": "100", - "dhcp_servers": [ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ] - } - } -} diff --git a/tests/generic_config_updater/files/dpb_4_to_1.json-patch b/tests/generic_config_updater/files/dpb_4_to_1.json-patch deleted file mode 100644 index 33addd290d..0000000000 --- a/tests/generic_config_updater/files/dpb_4_to_1.json-patch +++ /dev/null @@ -1,58 +0,0 @@ -[ - { - "op": "remove", - "path": "/PORT/Ethernet2" - }, - { - "op": "remove", - "path": "/PORT/Ethernet1" - }, - { - "op": "remove", - "path": "/PORT/Ethernet3" - }, - { - "op": "replace", - "path": "/PORT/Ethernet0/alias", - "value": "Eth1" - }, - { - "op": "replace", - "path": "/PORT/Ethernet0/lanes", - "value": "65, 66, 67, 68" - }, - { - "op": "replace", - "path": "/PORT/Ethernet0/description", - "value": "Ethernet0 100G link" - }, - { - "op": "replace", - "path": "/PORT/Ethernet0/speed", - "value": "100000" - }, - { - "op": "remove", - "path": "/VLAN_MEMBER/Vlan100|Ethernet1" - }, - { - "op": "remove", - "path": "/VLAN_MEMBER/Vlan100|Ethernet3" - }, - { - "op": "remove", - "path": "/VLAN_MEMBER/Vlan100|Ethernet2" - }, - { - "op": "remove", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1" - }, - { - "op": "remove", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1" - }, - { - "op": "remove", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1" - } -] diff --git a/tests/generic_config_updater/files/empty_config_db.json b/tests/generic_config_updater/files/empty_config_db.json deleted file mode 100644 index 2c63c08510..0000000000 --- a/tests/generic_config_updater/files/empty_config_db.json +++ /dev/null @@ -1,2 +0,0 @@ -{ -} diff --git a/tests/generic_config_updater/files/multi_operation_config_db_patch.json-patch b/tests/generic_config_updater/files/multi_operation_config_db_patch.json-patch deleted file mode 100644 index 8eddd7a19d..0000000000 --- a/tests/generic_config_updater/files/multi_operation_config_db_patch.json-patch +++ /dev/null @@ -1,88 +0,0 @@ -[ - { - "op": "add", - "path": "/PORT/Ethernet3", - "value": { - "alias": "Eth1/4", - "lanes": "68", - "description": "", - "speed": "10000" - } - }, - { - "op": "add", - "path": "/PORT/Ethernet1", - "value": { - "alias": "Eth1/2", - "lanes": "66", - "description": "", - "speed": "10000" - } - }, - { - "op": "add", - "path": "/PORT/Ethernet2", - "value": { - "alias": "Eth1/3", - "lanes": "67", - "description": "", - "speed": "10000" - } - }, - { - "op": "replace", - "path": "/PORT/Ethernet0/lanes", - "value": "65" - }, - { - "op": "replace", - "path": "/PORT/Ethernet0/alias", - "value": "Eth1/1" - }, - { - "op": "replace", - "path": "/PORT/Ethernet0/description", - "value": "" - }, - { - "op": "replace", - "path": "/PORT/Ethernet0/speed", - "value": "10000" - }, - { - "op": "add", - "path": "/VLAN_MEMBER/Vlan100|Ethernet2", - "value": { - "tagging_mode": "untagged" - } - }, - { - "op": "add", - "path": "/VLAN_MEMBER/Vlan100|Ethernet3", - "value": { - "tagging_mode": "untagged" - } - }, - { - "op": "add", - "path": "/VLAN_MEMBER/Vlan100|Ethernet1", - "value": { - "tagging_mode": "untagged" - } - }, - { - "op": "add", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1", - "value": "Ethernet1" - }, - { - "op": "add", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/2", - "value": "Ethernet2" - }, - { - "op": "add", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/3", - "value": "Ethernet3" - } -] diff --git a/tests/generic_config_updater/files/multi_operation_sonic_yang_patch.json-patch b/tests/generic_config_updater/files/multi_operation_sonic_yang_patch.json-patch deleted file mode 100644 index f7005bb4a0..0000000000 --- a/tests/generic_config_updater/files/multi_operation_sonic_yang_patch.json-patch +++ /dev/null @@ -1,97 +0,0 @@ -[ - { - "op": "add", - "path": "/sonic-vlan:sonic-vlan/sonic-vlan:VLAN_MEMBER/VLAN_MEMBER_LIST/3", - "value": { - "name": "Vlan100", - "port": "Ethernet2", - "tagging_mode": "untagged" - } - }, - { - "op": "add", - "path": "/sonic-vlan:sonic-vlan/sonic-vlan:VLAN_MEMBER/VLAN_MEMBER_LIST/4", - "value": { - "name": "Vlan100", - "port": "Ethernet3", - "tagging_mode": "untagged" - } - }, - { - "op": "add", - "path": "/sonic-vlan:sonic-vlan/sonic-vlan:VLAN_MEMBER/VLAN_MEMBER_LIST/5", - "value": { - "name": "Vlan100", - "port": "Ethernet1", - "tagging_mode": "untagged" - } - }, - { - "op": "replace", - "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/0/lanes", - "value": "65" - }, - { - "op": "replace", - "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/0/alias", - "value": "Eth1/1" - }, - { - "op": "replace", - "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/0/speed", - "value": 10000 - }, - { - "op": "replace", - "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/0/description", - "value": "" - }, - { - "op": "add", - "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/3", - "value": { - "name": "Ethernet3", - "alias": "Eth1/4", - "lanes": "68", - "description": "", - "speed": 10000 - } - }, - { - "op": "add", - "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/4", - "value": { - "name": "Ethernet1", - "alias": "Eth1/2", - "lanes": "66", - "description": "", - "speed": 10000 - } - }, - { - "op": "add", - "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/5", - "value": { - "name": "Ethernet2", - "alias": "Eth1/3", - "lanes": "67", - "description": "", - "speed": 10000 - } - }, - { - "op": "add", - "path": "/sonic-acl:sonic-acl/sonic-acl:ACL_TABLE/ACL_TABLE_LIST/0/ports/1", - "value": "Ethernet1" - }, - { - "op": "add", - "path": "/sonic-acl:sonic-acl/sonic-acl:ACL_TABLE/ACL_TABLE_LIST/0/ports/2", - "value": "Ethernet2" - }, - { - "op": "add", - "path": "/sonic-acl:sonic-acl/sonic-acl:ACL_TABLE/ACL_TABLE_LIST/0/ports/3", - "value": "Ethernet3" - } -] diff --git a/tests/generic_config_updater/files/simple_config_db_inc_deps.json b/tests/generic_config_updater/files/simple_config_db_inc_deps.json deleted file mode 100644 index 4554582103..0000000000 --- a/tests/generic_config_updater/files/simple_config_db_inc_deps.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "ACL_TABLE": { - "EVERFLOW": { - "policy_desc": "EVERFLOW", - "ports": [ - "Ethernet0" - ], - "stage": "ingress", - "type": "MIRROR" - } - }, - "PORT": { - "Ethernet0": { - "alias": "Eth1", - "lanes": "65, 66, 67, 68", - "description": "Ethernet0 100G link", - "speed": "100000" - } - } -} diff --git a/tests/generic_config_updater/files/single_operation_config_db_patch.json-patch b/tests/generic_config_updater/files/single_operation_config_db_patch.json-patch deleted file mode 100644 index 7cc0967bf0..0000000000 --- a/tests/generic_config_updater/files/single_operation_config_db_patch.json-patch +++ /dev/null @@ -1,6 +0,0 @@ -[ - { - "op": "remove", - "path": "/VLAN_MEMBER/Vlan1000|Ethernet8" - } -] diff --git a/tests/generic_config_updater/files/single_operation_sonic_yang_patch.json-patch b/tests/generic_config_updater/files/single_operation_sonic_yang_patch.json-patch deleted file mode 100644 index 5a46560496..0000000000 --- a/tests/generic_config_updater/files/single_operation_sonic_yang_patch.json-patch +++ /dev/null @@ -1,6 +0,0 @@ -[ - { - "op": "remove", - "path": "/sonic-vlan:sonic-vlan/sonic-vlan:VLAN_MEMBER/VLAN_MEMBER_LIST/2" - } -] diff --git a/tests/generic_config_updater/files/sonic_yang_after_multi_patch.json b/tests/generic_config_updater/files/sonic_yang_after_multi_patch.json deleted file mode 100644 index 0c9ddd4546..0000000000 --- a/tests/generic_config_updater/files/sonic_yang_after_multi_patch.json +++ /dev/null @@ -1,153 +0,0 @@ -{ - "sonic-vlan:sonic-vlan": { - "sonic-vlan:VLAN_MEMBER": { - "VLAN_MEMBER_LIST": [ - { - "name": "Vlan1000", - "port": "Ethernet0", - "tagging_mode": "untagged" - }, - { - "name": "Vlan1000", - "port": "Ethernet4", - "tagging_mode": "untagged" - }, - { - "name": "Vlan1000", - "port": "Ethernet8", - "tagging_mode": "untagged" - }, - { - "name": "Vlan100", - "port": "Ethernet2", - "tagging_mode": "untagged" - }, - { - "name": "Vlan100", - "port": "Ethernet3", - "tagging_mode": "untagged" - }, - { - "name": "Vlan100", - "port": "Ethernet1", - "tagging_mode": "untagged" - } - ] - }, - "sonic-vlan:VLAN": { - "VLAN_LIST": [ - { - "name": "Vlan1000", - "vlanid": 1000, - "dhcp_servers": [ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ] - } - ] - } - }, - "sonic-acl:sonic-acl": { - "sonic-acl:ACL_TABLE": { - "ACL_TABLE_LIST": [ - { - "ACL_TABLE_NAME": "NO-NSW-PACL-V4", - "type": "L3", - "policy_desc": "NO-NSW-PACL-V4", - "ports": [ - "Ethernet0", - "Ethernet1", - "Ethernet2", - "Ethernet3" - ] - }, - { - "ACL_TABLE_NAME": "DATAACL", - "policy_desc": "DATAACL", - "ports": [ - "Ethernet4" - ], - "stage": "ingress", - "type": "L3" - }, - { - "ACL_TABLE_NAME": "EVERFLOW", - "policy_desc": "EVERFLOW", - "ports": [ - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRROR" - }, - { - "ACL_TABLE_NAME": "EVERFLOWV6", - "policy_desc": "EVERFLOWV6", - "ports": [ - "Ethernet4", - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRRORV6" - } - ] - } - }, - "sonic-port:sonic-port": { - "sonic-port:PORT": { - "PORT_LIST": [ - { - "name": "Ethernet0", - "alias": "Eth1/1", - "lanes": "65", - "description": "", - "speed": 10000 - }, - { - "name": "Ethernet4", - "admin_status": "up", - "alias": "fortyGigE0/4", - "description": "Servers0:eth0", - "index": 1, - "lanes": "29,30,31,32", - "mtu": 9100, - "pfc_asym": "off", - "speed": 40000 - }, - { - "name": "Ethernet8", - "admin_status": "up", - "alias": "fortyGigE0/8", - "description": "Servers1:eth0", - "index": 2, - "lanes": "33,34,35,36", - "mtu": 9100, - "pfc_asym": "off", - "speed": 40000 - }, - { - "name": "Ethernet3", - "alias": "Eth1/4", - "lanes": "68", - "description": "", - "speed": 10000 - }, - { - "name": "Ethernet1", - "alias": "Eth1/2", - "lanes": "66", - "description": "", - "speed": 10000 - }, - { - "name": "Ethernet2", - "alias": "Eth1/3", - "lanes": "67", - "description": "", - "speed": 10000 - } - ] - } - } -} diff --git a/tests/generic_config_updater/files/sonic_yang_as_json.json b/tests/generic_config_updater/files/sonic_yang_as_json.json deleted file mode 100644 index 37f0fe6ba7..0000000000 --- a/tests/generic_config_updater/files/sonic_yang_as_json.json +++ /dev/null @@ -1,114 +0,0 @@ -{ - "sonic-vlan:sonic-vlan": { - "sonic-vlan:VLAN_MEMBER": { - "VLAN_MEMBER_LIST": [ - { - "name": "Vlan1000", - "port": "Ethernet0", - "tagging_mode": "untagged" - }, - { - "name": "Vlan1000", - "port": "Ethernet4", - "tagging_mode": "untagged" - }, - { - "name": "Vlan1000", - "port": "Ethernet8", - "tagging_mode": "untagged" - } - ] - }, - "sonic-vlan:VLAN": { - "VLAN_LIST": [ - { - "name": "Vlan1000", - "vlanid": 1000, - "dhcp_servers": [ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ] - } - ] - } - }, - "sonic-acl:sonic-acl": { - "sonic-acl:ACL_TABLE": { - "ACL_TABLE_LIST": [ - { - "ACL_TABLE_NAME": "NO-NSW-PACL-V4", - "type": "L3", - "policy_desc": "NO-NSW-PACL-V4", - "ports": [ - "Ethernet0" - ] - }, - { - "ACL_TABLE_NAME": "DATAACL", - "policy_desc": "DATAACL", - "ports": [ - "Ethernet4" - ], - "stage": "ingress", - "type": "L3" - }, - { - "ACL_TABLE_NAME": "EVERFLOW", - "policy_desc": "EVERFLOW", - "ports": [ - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRROR" - }, - { - "ACL_TABLE_NAME": "EVERFLOWV6", - "policy_desc": "EVERFLOWV6", - "ports": [ - "Ethernet4", - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRRORV6" - } - ] - } - }, - "sonic-port:sonic-port": { - "sonic-port:PORT": { - "PORT_LIST": [ - { - "name": "Ethernet0", - "alias": "Eth1", - "lanes": "65, 66, 67, 68", - "description": "Ethernet0 100G link", - "speed": 100000 - }, - { - "name": "Ethernet4", - "admin_status": "up", - "alias": "fortyGigE0/4", - "description": "Servers0:eth0", - "index": 1, - "lanes": "29,30,31,32", - "mtu": 9100, - "pfc_asym": "off", - "speed": 40000 - }, - { - "name": "Ethernet8", - "admin_status": "up", - "alias": "fortyGigE0/8", - "description": "Servers1:eth0", - "index": 2, - "lanes": "33,34,35,36", - "mtu": 9100, - "pfc_asym": "off", - "speed": 40000 - } - ] - } - } -} diff --git a/tests/generic_config_updater/files/sonic_yang_as_json_invalid.json b/tests/generic_config_updater/files/sonic_yang_as_json_invalid.json deleted file mode 100644 index 4f67d7e6a6..0000000000 --- a/tests/generic_config_updater/files/sonic_yang_as_json_invalid.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "sonic-vlan:sonic-vlan": { - "sonic-vlan:VLAN_MEMBER": { - "VLAN_MEMBER_LIST": [ - { - "name": "Vlan1000", - "port": "Ethernet4", - "tagging_mode": "untagged" - } - ] - } - } -} diff --git a/tests/generic_config_updater/files/sonic_yang_as_json_with_unexpected_colons.json b/tests/generic_config_updater/files/sonic_yang_as_json_with_unexpected_colons.json deleted file mode 100644 index aac97da42b..0000000000 --- a/tests/generic_config_updater/files/sonic_yang_as_json_with_unexpected_colons.json +++ /dev/null @@ -1,114 +0,0 @@ -{ - "sonic-vlan:sonic-vlan": { - "sonic-vlan::VLAN_MEMBER": { - "VLAN_MEMBER_LIST": [ - { - "name": "Vlan1000", - "port": "Ethernet0", - "tagging_mode": "untagged" - }, - { - "name": "Vlan1000", - "port": "Ethernet4", - "tagging_mode": "untagged" - }, - { - "name": "Vlan1000", - "port": "Ethernet8", - "tagging_mode": "untagged" - } - ] - }, - "sonic-vlan::VLAN": { - "VLAN_LIST": [ - { - "name": "Vlan1000", - "vlanid": 1000, - "dhcp_servers": [ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ] - } - ] - } - }, - "sonic-acl:sonic-acl": { - "sonic-vlan::ACL_TABLE": { - "ACL_TABLE_LIST": [ - { - "ACL_TABLE_NAME": "NO-NSW-PACL-V4", - "type": "L3", - "policy_desc": "NO-NSW-PACL-V4", - "ports": [ - "Ethernet0" - ] - }, - { - "ACL_TABLE_NAME": "DATAACL", - "policy_desc": "DATAACL", - "ports": [ - "Ethernet4" - ], - "stage": "ingress", - "type": "L3" - }, - { - "ACL_TABLE_NAME": "EVERFLOW", - "policy_desc": "EVERFLOW", - "ports": [ - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRROR" - }, - { - "ACL_TABLE_NAME": "EVERFLOWV6", - "policy_desc": "EVERFLOWV6", - "ports": [ - "Ethernet4", - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRRORV6" - } - ] - } - }, - "sonic-port:sonic-port": { - "sonic-vlan::PORT": { - "PORT_LIST": [ - { - "name": "Ethernet0", - "alias": "Eth1", - "lanes": "65, 66, 67, 68", - "description": "Ethernet0 100G link", - "speed": 100000 - }, - { - "name": "Ethernet4", - "admin_status": "up", - "alias": "fortyGigE0/4", - "description": "Servers0:eth0", - "index": 1, - "lanes": "29,30,31,32", - "mtu": 9100, - "pfc_asym": "off", - "speed": 40000 - }, - { - "name": "Ethernet8", - "admin_status": "up", - "alias": "fortyGigE0/8", - "description": "Servers1:eth0", - "index": 2, - "lanes": "33,34,35,36", - "mtu": 9100, - "pfc_asym": "off", - "speed": 40000 - } - ] - } - } -} diff --git a/tests/generic_config_updater/files/sonic_yang_as_json_without_colons.json b/tests/generic_config_updater/files/sonic_yang_as_json_without_colons.json deleted file mode 100644 index ad4ab15f4a..0000000000 --- a/tests/generic_config_updater/files/sonic_yang_as_json_without_colons.json +++ /dev/null @@ -1,114 +0,0 @@ -{ - "sonic-vlan:sonic-vlan": { - "VLAN_MEMBER": { - "VLAN_MEMBER_LIST": [ - { - "name": "Vlan1000", - "port": "Ethernet0", - "tagging_mode": "untagged" - }, - { - "name": "Vlan1000", - "port": "Ethernet4", - "tagging_mode": "untagged" - }, - { - "name": "Vlan1000", - "port": "Ethernet8", - "tagging_mode": "untagged" - } - ] - }, - "VLAN": { - "VLAN_LIST": [ - { - "name": "Vlan1000", - "vlanid": 1000, - "dhcp_servers": [ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ] - } - ] - } - }, - "sonic-acl:sonic-acl": { - "ACL_TABLE": { - "ACL_TABLE_LIST": [ - { - "ACL_TABLE_NAME": "NO-NSW-PACL-V4", - "type": "L3", - "policy_desc": "NO-NSW-PACL-V4", - "ports": [ - "Ethernet0" - ] - }, - { - "ACL_TABLE_NAME": "DATAACL", - "policy_desc": "DATAACL", - "ports": [ - "Ethernet4" - ], - "stage": "ingress", - "type": "L3" - }, - { - "ACL_TABLE_NAME": "EVERFLOW", - "policy_desc": "EVERFLOW", - "ports": [ - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRROR" - }, - { - "ACL_TABLE_NAME": "EVERFLOWV6", - "policy_desc": "EVERFLOWV6", - "ports": [ - "Ethernet4", - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRRORV6" - } - ] - } - }, - "sonic-port:sonic-port": { - "PORT": { - "PORT_LIST": [ - { - "name": "Ethernet0", - "alias": "Eth1", - "lanes": "65, 66, 67, 68", - "description": "Ethernet0 100G link", - "speed": 100000 - }, - { - "name": "Ethernet4", - "admin_status": "up", - "alias": "fortyGigE0/4", - "description": "Servers0:eth0", - "index": 1, - "lanes": "29,30,31,32", - "mtu": 9100, - "pfc_asym": "off", - "speed": 40000 - }, - { - "name": "Ethernet8", - "admin_status": "up", - "alias": "fortyGigE0/8", - "description": "Servers1:eth0", - "index": 2, - "lanes": "33,34,35,36", - "mtu": 9100, - "pfc_asym": "off", - "speed": 40000 - } - ] - } - } -} diff --git a/tests/generic_config_updater/generic_updater_test.py b/tests/generic_config_updater/generic_updater_test.py deleted file mode 100644 index f201280062..0000000000 --- a/tests/generic_config_updater/generic_updater_test.py +++ /dev/null @@ -1,766 +0,0 @@ -import json -import os -import shutil -import unittest -from unittest.mock import MagicMock, Mock, call -from .gutest_helpers import create_side_effect_dict, Files - -import generic_config_updater.generic_updater as gu - -# import sys -# sys.path.insert(0,'../../generic_config_updater') -# import generic_updater as gu - -class TestPatchApplier(unittest.TestCase): - def test_apply__invalid_patch_updating_tables_without_yang_models__failure(self): - # Arrange - patch_applier = self.__create_patch_applier(valid_patch_only_tables_with_yang_models=False) - - # Act and assert - self.assertRaises(ValueError, patch_applier.apply, Files.MULTI_OPERATION_CONFIG_DB_PATCH) - - def test_apply__invalid_config_db__failure(self): - # Arrange - patch_applier = self.__create_patch_applier(valid_config_db=False) - - # Act and assert - self.assertRaises(ValueError, patch_applier.apply, Files.MULTI_OPERATION_CONFIG_DB_PATCH) - - def test_apply__json_not_fully_updated__failure(self): - # Arrange - patch_applier = self.__create_patch_applier(verified_same_config=False) - - # Act and assert - self.assertRaises(gu.GenericConfigUpdaterError, patch_applier.apply, Files.MULTI_OPERATION_CONFIG_DB_PATCH) - - def test_apply__no_errors__update_successful(self): - # Arrange - changes = [Mock(), Mock()] - patch_applier = self.__create_patch_applier(changes) - - # Act - patch_applier.apply(Files.MULTI_OPERATION_CONFIG_DB_PATCH) - - # Assert - patch_applier.patch_wrapper.validate_config_db_patch_has_yang_models.assert_has_calls( - [call(Files.MULTI_OPERATION_CONFIG_DB_PATCH)]) - patch_applier.config_wrapper.get_config_db_as_json.assert_has_calls([call(), call()]) - patch_applier.patch_wrapper.simulate_patch.assert_has_calls( - [call(Files.MULTI_OPERATION_CONFIG_DB_PATCH, Files.CONFIG_DB_AS_JSON)]) - patch_applier.config_wrapper.validate_config_db_config.assert_has_calls( - [call(Files.CONFIG_DB_AFTER_MULTI_PATCH)]) - patch_applier.patchsorter.sort.assert_has_calls([call(Files.MULTI_OPERATION_CONFIG_DB_PATCH)]) - patch_applier.changeapplier.apply.assert_has_calls([call(changes[0]), call(changes[1])]) - patch_applier.patch_wrapper.verify_same_json.assert_has_calls( - [call(Files.CONFIG_DB_AFTER_MULTI_PATCH, Files.CONFIG_DB_AFTER_MULTI_PATCH)]) - - def __create_patch_applier(self, - changes=None, - valid_patch_only_tables_with_yang_models=True, - valid_config_db=True, - verified_same_config=True): - config_wrapper = Mock() - config_wrapper.get_config_db_as_json.side_effect = \ - [Files.CONFIG_DB_AS_JSON, Files.CONFIG_DB_AFTER_MULTI_PATCH] - config_wrapper.validate_config_db_config.side_effect = \ - create_side_effect_dict({(str(Files.CONFIG_DB_AFTER_MULTI_PATCH),): valid_config_db}) - - patch_wrapper = Mock() - patch_wrapper.validate_config_db_patch_has_yang_models.side_effect = \ - create_side_effect_dict( - {(str(Files.MULTI_OPERATION_CONFIG_DB_PATCH),): valid_patch_only_tables_with_yang_models}) - patch_wrapper.simulate_patch.side_effect = \ - create_side_effect_dict( - {(str(Files.MULTI_OPERATION_CONFIG_DB_PATCH), str(Files.CONFIG_DB_AS_JSON)): - Files.CONFIG_DB_AFTER_MULTI_PATCH}) - patch_wrapper.verify_same_json.side_effect = \ - create_side_effect_dict( - {(str(Files.CONFIG_DB_AFTER_MULTI_PATCH), str(Files.CONFIG_DB_AFTER_MULTI_PATCH)): - verified_same_config}) - - changes = [Mock(), Mock()] if not changes else changes - patchsorter = Mock() - patchsorter.sort.side_effect = \ - create_side_effect_dict({(str(Files.MULTI_OPERATION_CONFIG_DB_PATCH),): changes}) - - changeapplier = Mock() - changeapplier.apply.side_effect = create_side_effect_dict({(str(changes[0]),): 0, (str(changes[1]),): 0}) - - return gu.PatchApplier(patchsorter, changeapplier, config_wrapper, patch_wrapper) - -class TestConfigReplacer(unittest.TestCase): - def test_replace__invalid_config_db__failure(self): - # Arrange - config_replacer = self.__create_config_replacer(valid_config_db=False) - - # Act and assert - self.assertRaises(ValueError, config_replacer.replace, Files.CONFIG_DB_AFTER_MULTI_PATCH) - - def test_replace__json_not_fully_updated__failure(self): - # Arrange - config_replacer = self.__create_config_replacer(verified_same_config=False) - - # Act and assert - self.assertRaises(gu.GenericConfigUpdaterError, config_replacer.replace, Files.CONFIG_DB_AFTER_MULTI_PATCH) - - def test_replace__no_errors__update_successful(self): - # Arrange - config_replacer = self.__create_config_replacer() - - # Act - config_replacer.replace(Files.CONFIG_DB_AFTER_MULTI_PATCH) - - # Assert - config_replacer.config_wrapper.validate_config_db_config.assert_has_calls( - [call(Files.CONFIG_DB_AFTER_MULTI_PATCH)]) - config_replacer.config_wrapper.get_config_db_as_json.assert_has_calls([call(), call()]) - config_replacer.patch_wrapper.generate_patch.assert_has_calls( - [call(Files.CONFIG_DB_AS_JSON, Files.CONFIG_DB_AFTER_MULTI_PATCH)]) - config_replacer.patch_applier.apply.assert_has_calls([call(Files.MULTI_OPERATION_CONFIG_DB_PATCH)]) - config_replacer.patch_wrapper.verify_same_json.assert_has_calls( - [call(Files.CONFIG_DB_AFTER_MULTI_PATCH, Files.CONFIG_DB_AFTER_MULTI_PATCH)]) - - def __create_config_replacer(self, changes=None, valid_config_db=True, verified_same_config=True): - config_wrapper = Mock() - config_wrapper.validate_config_db_config.side_effect = \ - create_side_effect_dict({(str(Files.CONFIG_DB_AFTER_MULTI_PATCH),): valid_config_db}) - config_wrapper.get_config_db_as_json.side_effect = \ - [Files.CONFIG_DB_AS_JSON, Files.CONFIG_DB_AFTER_MULTI_PATCH] - - patch_wrapper = Mock() - patch_wrapper.generate_patch.side_effect = \ - create_side_effect_dict( - {(str(Files.CONFIG_DB_AS_JSON), str(Files.CONFIG_DB_AFTER_MULTI_PATCH)): - Files.MULTI_OPERATION_CONFIG_DB_PATCH}) - patch_wrapper.verify_same_json.side_effect = \ - create_side_effect_dict( - {(str(Files.CONFIG_DB_AFTER_MULTI_PATCH), str(Files.CONFIG_DB_AFTER_MULTI_PATCH)): \ - verified_same_config}) - - changes = [Mock(), Mock()] if not changes else changes - patchsorter = Mock() - patchsorter.sort.side_effect = create_side_effect_dict({(str(Files.MULTI_OPERATION_CONFIG_DB_PATCH),): \ - changes}) - - patch_applier = Mock() - patch_applier.apply.side_effect = create_side_effect_dict({(str(Files.MULTI_OPERATION_CONFIG_DB_PATCH),): 0}) - - return gu.ConfigReplacer(patch_applier, config_wrapper, patch_wrapper) - -class TestFileSystemConfigRollbacker(unittest.TestCase): - def setUp(self): - self.checkpoints_dir = os.path.join(os.getcwd(),"checkpoints") - self.checkpoint_ext = ".cp.json" - self.any_checkpoint_name = "anycheckpoint" - self.any_other_checkpoint_name = "anyothercheckpoint" - self.any_config = {} - self.clean_up() - - def tearDown(self): - self.clean_up() - - def test_rollback__checkpoint_does_not_exist__failure(self): - # Arrange - rollbacker = self.create_rollbacker() - - # Act and assert - self.assertRaises(ValueError, rollbacker.rollback, "NonExistingCheckpoint") - - def test_rollback__no_errors__success(self): - # Arrange - self.create_checkpoints_dir() - self.add_checkpoint(self.any_checkpoint_name, self.any_config) - rollbacker = self.create_rollbacker() - - # Act - rollbacker.rollback(self.any_checkpoint_name) - - # Assert - rollbacker.config_replacer.replace.assert_has_calls([call(self.any_config)]) - - def test_checkpoint__checkpoints_dir_does_not_exist__checkpoint_created(self): - # Arrange - rollbacker = self.create_rollbacker() - self.assertFalse(os.path.isdir(self.checkpoints_dir)) - - # Act - rollbacker.checkpoint(self.any_checkpoint_name) - - # Assert - self.assertTrue(os.path.isdir(self.checkpoints_dir)) - self.assertEqual(self.any_config, self.get_checkpoint(self.any_checkpoint_name)) - - def test_checkpoint__config_not_valid__failure(self): - # Arrange - rollbacker = self.create_rollbacker(valid_config=False) - - # Act and assert - self.assertRaises(ValueError, rollbacker.checkpoint, self.any_checkpoint_name) - - def test_checkpoint__checkpoints_dir_exists__checkpoint_created(self): - # Arrange - self.create_checkpoints_dir() - rollbacker = self.create_rollbacker() - - # Act - rollbacker.checkpoint(self.any_checkpoint_name) - - # Assert - self.assertEqual(self.any_config, self.get_checkpoint(self.any_checkpoint_name)) - - def test_list_checkpoints__checkpoints_dir_does_not_exist__empty_list(self): - # Arrange - rollbacker = self.create_rollbacker() - self.assertFalse(os.path.isdir(self.checkpoints_dir)) - expected = [] - - # Act - actual = rollbacker.list_checkpoints() - - # Assert - # 'assertCountEqual' does check same count, same elements ignoring order - self.assertCountEqual(expected, actual) - - def test_list_checkpoints__checkpoints_dir_exist_but_no_files__empty_list(self): - # Arrange - self.create_checkpoints_dir() - rollbacker = self.create_rollbacker() - expected = [] - - # Act - actual = rollbacker.list_checkpoints() - - # Assert - # 'assertCountEqual' does check same count, same elements ignoring order - self.assertCountEqual(expected, actual) - - def test_list_checkpoints__checkpoints_dir_has_multiple_files__multiple_files(self): - # Arrange - self.create_checkpoints_dir() - self.add_checkpoint(self.any_checkpoint_name, self.any_config) - self.add_checkpoint(self.any_other_checkpoint_name, self.any_config) - rollbacker = self.create_rollbacker() - expected = [self.any_checkpoint_name, self.any_other_checkpoint_name] - - # Act - actual = rollbacker.list_checkpoints() - - # Assert - # 'assertCountEqual' does check same count, same elements ignoring order - self.assertCountEqual(expected, actual) - - def test_list_checkpoints__checkpoints_names_have_special_characters__multiple_files(self): - # Arrange - self.create_checkpoints_dir() - self.add_checkpoint("check.point1", self.any_config) - self.add_checkpoint(".checkpoint2", self.any_config) - self.add_checkpoint("checkpoint3.", self.any_config) - rollbacker = self.create_rollbacker() - expected = ["check.point1", ".checkpoint2", "checkpoint3."] - - # Act - actual = rollbacker.list_checkpoints() - - # Assert - # 'assertCountEqual' does check same count, same elements ignoring order - self.assertCountEqual(expected, actual) - - def test_delete_checkpoint__checkpoint_does_not_exist__failure(self): - # Arrange - rollbacker = self.create_rollbacker() - - # Act and assert - self.assertRaises(ValueError, rollbacker.delete_checkpoint, self.any_checkpoint_name) - - def test_delete_checkpoint__checkpoint_exist__success(self): - # Arrange - self.create_checkpoints_dir() - self.add_checkpoint(self.any_checkpoint_name, self.any_config) - rollbacker = self.create_rollbacker() - - # Act - rollbacker.delete_checkpoint(self.any_checkpoint_name) - - # Assert - self.assertFalse(self.check_checkpoint_exists(self.any_checkpoint_name)) - - def test_multiple_operations(self): - rollbacker = self.create_rollbacker() - - # 'assertCountEqual' does check same count, same elements ignoring order - self.assertCountEqual([], rollbacker.list_checkpoints()) - - rollbacker.checkpoint(self.any_checkpoint_name) - self.assertCountEqual([self.any_checkpoint_name], rollbacker.list_checkpoints()) - self.assertEqual(self.any_config, self.get_checkpoint(self.any_checkpoint_name)) - - rollbacker.rollback(self.any_checkpoint_name) - rollbacker.config_replacer.replace.assert_has_calls([call(self.any_config)]) - - rollbacker.checkpoint(self.any_other_checkpoint_name) - self.assertCountEqual([self.any_checkpoint_name, self.any_other_checkpoint_name], rollbacker.list_checkpoints()) - self.assertEqual(self.any_config, self.get_checkpoint(self.any_other_checkpoint_name)) - - rollbacker.delete_checkpoint(self.any_checkpoint_name) - self.assertCountEqual([self.any_other_checkpoint_name], rollbacker.list_checkpoints()) - - rollbacker.delete_checkpoint(self.any_other_checkpoint_name) - self.assertCountEqual([], rollbacker.list_checkpoints()) - - def clean_up(self): - if os.path.isdir(self.checkpoints_dir): - shutil.rmtree(self.checkpoints_dir) - - def create_checkpoints_dir(self): - os.makedirs(self.checkpoints_dir) - - def add_checkpoint(self, name, json_content): - path=os.path.join(self.checkpoints_dir, f"{name}{self.checkpoint_ext}") - with open(path, "w") as fh: - fh.write(json.dumps(json_content)) - - def get_checkpoint(self, name): - path=os.path.join(self.checkpoints_dir, f"{name}{self.checkpoint_ext}") - with open(path) as fh: - text = fh.read() - return json.loads(text) - - def check_checkpoint_exists(self, name): - path=os.path.join(self.checkpoints_dir, f"{name}{self.checkpoint_ext}") - return os.path.isfile(path) - - def create_rollbacker(self, valid_config=True): - replacer = Mock() - replacer.replace.side_effect = create_side_effect_dict({(str(self.any_config),): 0}) - - config_wrapper = Mock() - config_wrapper.get_config_db_as_json.return_value = self.any_config - config_wrapper.validate_config_db_config.return_value = valid_config - - return gu.FileSystemConfigRollbacker(checkpoints_dir=self.checkpoints_dir, - config_replacer=replacer, - config_wrapper=config_wrapper) - -class TestGenericUpdateFactory(unittest.TestCase): - def setUp(self): - self.any_verbose=True - self.any_dry_run=True - - def test_create_patch_applier__invalid_config_format__failure(self): - # Arrange - factory = gu.GenericUpdateFactory() - - # Act and assert - self.assertRaises( - ValueError, factory.create_patch_applier, "INVALID_FORMAT", self.any_verbose, self.any_dry_run) - - def test_create_patch_applier__different_options(self): - # Arrange - options = [ - {"verbose": {True: None, False: None}}, - {"dry_run": {True: None, False: gu.ConfigLockDecorator}}, - { - "config_format": { - gu.ConfigFormat.SONICYANG: gu.SonicYangDecorator, - gu.ConfigFormat.CONFIGDB: None, - } - }, - ] - - # Act and assert - self.recursively_test_create_func(options, 0, {}, [], self.validate_create_patch_applier) - - def test_create_config_replacer__invalid_config_format__failure(self): - # Arrange - factory = gu.GenericUpdateFactory() - - # Act and assert - self.assertRaises( - ValueError, factory.create_config_replacer, "INVALID_FORMAT", self.any_verbose, self.any_dry_run) - - def test_create_config_replacer__different_options(self): - # Arrange - options = [ - {"verbose": {True: None, False: None}}, - {"dry_run": {True: None, False: gu.ConfigLockDecorator}}, - { - "config_format": { - gu.ConfigFormat.SONICYANG: gu.SonicYangDecorator, - gu.ConfigFormat.CONFIGDB: None, - } - }, - ] - - # Act and assert - self.recursively_test_create_func(options, 0, {}, [], self.validate_create_config_replacer) - - def test_create_config_rollbacker__different_options(self): - # Arrange - options = [ - {"verbose": {True: None, False: None}}, - {"dry_run": {True: None, False: gu.ConfigLockDecorator}} - ] - - # Act and assert - self.recursively_test_create_func(options, 0, {}, [], self.validate_create_config_rollbacker) - - def recursively_test_create_func(self, options, cur_option, params, expected_decorators, create_func): - if cur_option == len(options): - create_func(params, expected_decorators) - return - - param = list(options[cur_option].keys())[0] - for key in options[cur_option][param]: - params[param] = key - decorator = options[cur_option][param][key] - if decorator != None: - expected_decorators.append(decorator) - self.recursively_test_create_func(options, cur_option+1, params, expected_decorators, create_func) - if decorator != None: - expected_decorators.pop() - - def validate_create_patch_applier(self, params, expected_decorators): - factory = gu.GenericUpdateFactory() - patch_applier = factory.create_patch_applier(params["config_format"], params["verbose"], params["dry_run"]) - for decorator_type in expected_decorators: - self.assertIsInstance(patch_applier, decorator_type) - - patch_applier = patch_applier.decorated_patch_applier - - self.assertIsInstance(patch_applier, gu.PatchApplier) - if params["dry_run"]: - self.assertIsInstance(patch_applier.config_wrapper, gu.DryRunConfigWrapper) - else: - self.assertIsInstance(patch_applier.config_wrapper, gu.ConfigWrapper) - - def validate_create_config_replacer(self, params, expected_decorators): - factory = gu.GenericUpdateFactory() - config_replacer = factory.create_config_replacer(params["config_format"], params["verbose"], params["dry_run"]) - for decorator_type in expected_decorators: - self.assertIsInstance(config_replacer, decorator_type) - - config_replacer = config_replacer.decorated_config_replacer - - self.assertIsInstance(config_replacer, gu.ConfigReplacer) - if params["dry_run"]: - self.assertIsInstance(config_replacer.config_wrapper, gu.DryRunConfigWrapper) - self.assertIsInstance(config_replacer.patch_applier.config_wrapper, gu.DryRunConfigWrapper) - else: - self.assertIsInstance(config_replacer.config_wrapper, gu.ConfigWrapper) - self.assertIsInstance(config_replacer.patch_applier.config_wrapper, gu.ConfigWrapper) - - def validate_create_config_rollbacker(self, params, expected_decorators): - factory = gu.GenericUpdateFactory() - config_rollbacker = factory.create_config_rollbacker(params["verbose"], params["dry_run"]) - for decorator_type in expected_decorators: - self.assertIsInstance(config_rollbacker, decorator_type) - - config_rollbacker = config_rollbacker.decorated_config_rollbacker - - self.assertIsInstance(config_rollbacker, gu.FileSystemConfigRollbacker) - if params["dry_run"]: - self.assertIsInstance(config_rollbacker.config_wrapper, gu.DryRunConfigWrapper) - self.assertIsInstance(config_rollbacker.config_replacer.config_wrapper, gu.DryRunConfigWrapper) - self.assertIsInstance( - config_rollbacker.config_replacer.patch_applier.config_wrapper, gu.DryRunConfigWrapper) - else: - self.assertIsInstance(config_rollbacker.config_wrapper, gu.ConfigWrapper) - self.assertIsInstance(config_rollbacker.config_replacer.config_wrapper, gu.ConfigWrapper) - self.assertIsInstance( - config_rollbacker.config_replacer.patch_applier.config_wrapper, gu.ConfigWrapper) - -class TestGenericUpdater(unittest.TestCase): - def setUp(self): - self.any_checkpoint_name = "anycheckpoint" - self.any_other_checkpoint_name = "anyothercheckpoint" - self.any_checkpoints_list = [self.any_checkpoint_name, self.any_other_checkpoint_name] - self.any_config_format = gu.ConfigFormat.SONICYANG - self.any_verbose = True - self.any_dry_run = True - - def test_apply_patch__creates_applier_and_apply(self): - # Arrange - patch_applier = Mock() - patch_applier.apply.side_effect = create_side_effect_dict({(str(Files.SINGLE_OPERATION_SONIC_YANG_PATCH),): 0}) - - factory = Mock() - factory.create_patch_applier.side_effect = \ - create_side_effect_dict( - {(str(self.any_config_format), str(self.any_verbose), str(self.any_dry_run),): patch_applier}) - - generic_updater = gu.GenericUpdater(factory) - - # Act - generic_updater.apply_patch( - Files.SINGLE_OPERATION_SONIC_YANG_PATCH, self.any_config_format, self.any_verbose, self.any_dry_run) - - # Assert - patch_applier.apply.assert_has_calls([call(Files.SINGLE_OPERATION_SONIC_YANG_PATCH)]) - - def test_replace__creates_replacer_and_replace(self): - # Arrange - config_replacer = Mock() - config_replacer.replace.side_effect = create_side_effect_dict({(str(Files.SONIC_YANG_AS_JSON),): 0}) - - factory = Mock() - factory.create_config_replacer.side_effect = \ - create_side_effect_dict( - {(str(self.any_config_format), str(self.any_verbose), str(self.any_dry_run),): config_replacer}) - - generic_updater = gu.GenericUpdater(factory) - - # Act - generic_updater.replace(Files.SONIC_YANG_AS_JSON, self.any_config_format, self.any_verbose, self.any_dry_run) - - # Assert - config_replacer.replace.assert_has_calls([call(Files.SONIC_YANG_AS_JSON)]) - - def test_rollback__creates_rollbacker_and_rollback(self): - # Arrange - config_rollbacker = Mock() - config_rollbacker.rollback.side_effect = create_side_effect_dict({(self.any_checkpoint_name,): 0}) - - factory = Mock() - factory.create_config_rollbacker.side_effect = \ - create_side_effect_dict({(str(self.any_verbose), str(self.any_dry_run),): config_rollbacker}) - - generic_updater = gu.GenericUpdater(factory) - - # Act - generic_updater.rollback(self.any_checkpoint_name, self.any_verbose, self.any_dry_run) - - # Assert - config_rollbacker.rollback.assert_has_calls([call(self.any_checkpoint_name)]) - - def test_checkpoint__creates_rollbacker_and_checkpoint(self): - # Arrange - config_rollbacker = Mock() - config_rollbacker.checkpoint.side_effect = create_side_effect_dict({(self.any_checkpoint_name,): 0}) - - factory = Mock() - factory.create_config_rollbacker.side_effect = \ - create_side_effect_dict({(str(self.any_verbose),): config_rollbacker}) - - generic_updater = gu.GenericUpdater(factory) - - # Act - generic_updater.checkpoint(self.any_checkpoint_name, self.any_verbose) - - # Assert - config_rollbacker.checkpoint.assert_has_calls([call(self.any_checkpoint_name)]) - - def test_delete_checkpoint__creates_rollbacker_and_deletes_checkpoint(self): - # Arrange - config_rollbacker = Mock() - config_rollbacker.delete_checkpoint.side_effect = create_side_effect_dict({(self.any_checkpoint_name,): 0}) - - factory = Mock() - factory.create_config_rollbacker.side_effect = \ - create_side_effect_dict({(str(self.any_verbose),): config_rollbacker}) - - generic_updater = gu.GenericUpdater(factory) - - # Act - generic_updater.delete_checkpoint(self.any_checkpoint_name, self.any_verbose) - - # Assert - config_rollbacker.delete_checkpoint.assert_has_calls([call(self.any_checkpoint_name)]) - - def test_list_checkpoints__creates_rollbacker_and_list_checkpoints(self): - # Arrange - config_rollbacker = Mock() - config_rollbacker.list_checkpoints.return_value = self.any_checkpoints_list - - factory = Mock() - factory.create_config_rollbacker.side_effect = \ - create_side_effect_dict({(str(self.any_verbose),): config_rollbacker}) - - generic_updater = gu.GenericUpdater(factory) - - expected = self.any_checkpoints_list - - # Act - actual = generic_updater.list_checkpoints(self.any_verbose) - - # Assert - self.assertCountEqual(expected, actual) - -class TestDecorator(unittest.TestCase): - def setUp(self): - self.decorated_patch_applier = Mock() - self.decorated_config_replacer = Mock() - self.decorated_config_rollbacker = Mock() - - self.any_checkpoint_name = "anycheckpoint" - self.any_other_checkpoint_name = "anyothercheckpoint" - self.any_checkpoints_list = [self.any_checkpoint_name, self.any_other_checkpoint_name] - self.decorated_config_rollbacker.list_checkpoints.return_value = self.any_checkpoints_list - - self.decorator = gu.Decorator( - self.decorated_patch_applier, self.decorated_config_replacer, self.decorated_config_rollbacker) - - def test_apply__calls_decorated_applier(self): - # Act - self.decorator.apply(Files.SINGLE_OPERATION_SONIC_YANG_PATCH) - - # Assert - self.decorated_patch_applier.apply.assert_has_calls([call(Files.SINGLE_OPERATION_SONIC_YANG_PATCH)]) - - def test_replace__calls_decorated_replacer(self): - # Act - self.decorator.replace(Files.SONIC_YANG_AS_JSON) - - # Assert - self.decorated_config_replacer.replace.assert_has_calls([call(Files.SONIC_YANG_AS_JSON)]) - - def test_rollback__calls_decorated_rollbacker(self): - # Act - self.decorator.rollback(self.any_checkpoint_name) - - # Assert - self.decorated_config_rollbacker.rollback.assert_has_calls([call(self.any_checkpoint_name)]) - - def test_checkpoint__calls_decorated_rollbacker(self): - # Act - self.decorator.checkpoint(self.any_checkpoint_name) - - # Assert - self.decorated_config_rollbacker.checkpoint.assert_has_calls([call(self.any_checkpoint_name)]) - - def test_delete_checkpoint__calls_decorated_rollbacker(self): - # Act - self.decorator.delete_checkpoint(self.any_checkpoint_name) - - # Assert - self.decorated_config_rollbacker.delete_checkpoint.assert_has_calls([call(self.any_checkpoint_name)]) - - def test_list_checkpoints__calls_decorated_rollbacker(self): - # Arrange - expected = self.any_checkpoints_list - - # Act - actual = self.decorator.list_checkpoints() - - # Assert - self.decorated_config_rollbacker.list_checkpoints.assert_called_once() - self.assertListEqual(expected, actual) - -class TestSonicYangDecorator(unittest.TestCase): - def test_apply__converts_to_config_db_and_calls_decorated_class(self): - # Arrange - sonic_yang_decorator = self.__create_sonic_yang_decorator() - - # Act - sonic_yang_decorator.apply(Files.SINGLE_OPERATION_SONIC_YANG_PATCH) - - # Assert - sonic_yang_decorator.patch_wrapper.convert_sonic_yang_patch_to_config_db_patch.assert_has_calls( - [call(Files.SINGLE_OPERATION_SONIC_YANG_PATCH)]) - sonic_yang_decorator.decorated_patch_applier.apply.assert_has_calls( - [call(Files.SINGLE_OPERATION_CONFIG_DB_PATCH)]) - - def test_replace__converts_to_config_db_and_calls_decorated_class(self): - # Arrange - sonic_yang_decorator = self.__create_sonic_yang_decorator() - - # Act - sonic_yang_decorator.replace(Files.SONIC_YANG_AS_JSON) - - # Assert - sonic_yang_decorator.config_wrapper.convert_sonic_yang_to_config_db.assert_has_calls( - [call(Files.SONIC_YANG_AS_JSON)]) - sonic_yang_decorator.decorated_config_replacer.replace.assert_has_calls([call(Files.CONFIG_DB_AS_JSON)]) - - def __create_sonic_yang_decorator(self): - patch_applier = Mock() - patch_applier.apply.side_effect = create_side_effect_dict({(str(Files.SINGLE_OPERATION_CONFIG_DB_PATCH),): 0}) - - patch_wrapper = Mock() - patch_wrapper.convert_sonic_yang_patch_to_config_db_patch.side_effect = \ - create_side_effect_dict({(str(Files.SINGLE_OPERATION_SONIC_YANG_PATCH),): \ - Files.SINGLE_OPERATION_CONFIG_DB_PATCH}) - - config_replacer = Mock() - config_replacer.replace.side_effect = create_side_effect_dict({(str(Files.CONFIG_DB_AS_JSON),): 0}) - - config_wrapper = Mock() - config_wrapper.convert_sonic_yang_to_config_db.side_effect = \ - create_side_effect_dict({(str(Files.SONIC_YANG_AS_JSON),): Files.CONFIG_DB_AS_JSON}) - - return gu.SonicYangDecorator(decorated_patch_applier=patch_applier, - decorated_config_replacer=config_replacer, - patch_wrapper=patch_wrapper, - config_wrapper=config_wrapper) - -class TestConfigLockDecorator(unittest.TestCase): - def setUp(self): - self.any_checkpoint_name = "anycheckpoint" - - def test_apply__lock_config(self): - # Arrange - config_lock_decorator = self.__create_config_lock_decorator() - - # Act - config_lock_decorator.apply(Files.SINGLE_OPERATION_SONIC_YANG_PATCH) - - # Assert - config_lock_decorator.config_lock.acquire_lock.assert_called_once() - config_lock_decorator.decorated_patch_applier.apply.assert_has_calls( - [call(Files.SINGLE_OPERATION_SONIC_YANG_PATCH)]) - config_lock_decorator.config_lock.release_lock.assert_called_once() - - def test_replace__lock_config(self): - # Arrange - config_lock_decorator = self.__create_config_lock_decorator() - - # Act - config_lock_decorator.replace(Files.SONIC_YANG_AS_JSON) - - # Assert - config_lock_decorator.config_lock.acquire_lock.assert_called_once() - config_lock_decorator.decorated_config_replacer.replace.assert_has_calls([call(Files.SONIC_YANG_AS_JSON)]) - config_lock_decorator.config_lock.release_lock.assert_called_once() - - def test_rollback__lock_config(self): - # Arrange - config_lock_decorator = self.__create_config_lock_decorator() - - # Act - config_lock_decorator.rollback(self.any_checkpoint_name) - - # Assert - config_lock_decorator.config_lock.acquire_lock.assert_called_once() - config_lock_decorator.decorated_config_rollbacker.rollback.assert_has_calls([call(self.any_checkpoint_name)]) - config_lock_decorator.config_lock.release_lock.assert_called_once() - - def test_checkpoint__lock_config(self): - # Arrange - config_lock_decorator = self.__create_config_lock_decorator() - - # Act - config_lock_decorator.checkpoint(self.any_checkpoint_name) - - # Assert - config_lock_decorator.config_lock.acquire_lock.assert_called_once() - config_lock_decorator.decorated_config_rollbacker.checkpoint.assert_has_calls([call(self.any_checkpoint_name)]) - config_lock_decorator.config_lock.release_lock.assert_called_once() - - def __create_config_lock_decorator(self): - config_lock = Mock() - - patch_applier = Mock() - patch_applier.apply.side_effect = create_side_effect_dict({(str(Files.SINGLE_OPERATION_SONIC_YANG_PATCH),): 0}) - - config_replacer = Mock() - config_replacer.replace.side_effect = create_side_effect_dict({(str(Files.SONIC_YANG_AS_JSON),): 0}) - - config_rollbacker = Mock() - config_rollbacker.rollback.side_effect = create_side_effect_dict({(self.any_checkpoint_name,): 0}) - config_rollbacker.checkpoint.side_effect = create_side_effect_dict({(self.any_checkpoint_name,): 0}) - - config_rollbacker.delete_checkpoint.side_effect = create_side_effect_dict({(self.any_checkpoint_name,): 0}) - - return gu.ConfigLockDecorator(config_lock=config_lock, - decorated_patch_applier=patch_applier, - decorated_config_replacer=config_replacer, - decorated_config_rollbacker=config_rollbacker) diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py deleted file mode 100644 index f69ec08030..0000000000 --- a/tests/generic_config_updater/gu_common_test.py +++ /dev/null @@ -1,635 +0,0 @@ -import json -import jsonpatch -import sonic_yang -import unittest -from unittest.mock import MagicMock, Mock - -from .gutest_helpers import create_side_effect_dict, Files -import generic_config_updater.gu_common as gu_common - -class TestConfigWrapper(unittest.TestCase): - def setUp(self): - self.config_wrapper_mock = gu_common.ConfigWrapper() - self.config_wrapper_mock.get_config_db_as_json=MagicMock(return_value=Files.CONFIG_DB_AS_JSON) - - def test_ctor__default_values_set(self): - config_wrapper = gu_common.ConfigWrapper() - - self.assertEqual("/usr/local/yang-models", gu_common.YANG_DIR) - - def test_get_sonic_yang_as_json__returns_sonic_yang_as_json(self): - # Arrange - config_wrapper = self.config_wrapper_mock - expected = Files.SONIC_YANG_AS_JSON - - # Act - actual = config_wrapper.get_sonic_yang_as_json() - - # Assert - self.assertDictEqual(expected, actual) - - def test_convert_config_db_to_sonic_yang__empty_config_db__returns_empty_sonic_yang(self): - # Arrange - config_wrapper = gu_common.ConfigWrapper() - expected = {} - - # Act - actual = config_wrapper.convert_config_db_to_sonic_yang({}) - - # Assert - self.assertDictEqual(expected, actual) - - def test_convert_config_db_to_sonic_yang__non_empty_config_db__returns_sonic_yang_as_json(self): - # Arrange - config_wrapper = gu_common.ConfigWrapper() - expected = Files.SONIC_YANG_AS_JSON - - # Act - actual = config_wrapper.convert_config_db_to_sonic_yang(Files.CONFIG_DB_AS_JSON) - - # Assert - self.assertDictEqual(expected, actual) - - def test_convert_sonic_yang_to_config_db__empty_sonic_yang__returns_empty_config_db(self): - # Arrange - config_wrapper = gu_common.ConfigWrapper() - expected = {} - - # Act - actual = config_wrapper.convert_sonic_yang_to_config_db({}) - - # Assert - self.assertDictEqual(expected, actual) - - def test_convert_sonic_yang_to_config_db__non_empty_sonic_yang__returns_config_db_as_json(self): - # Arrange - config_wrapper = gu_common.ConfigWrapper() - expected = Files.CROPPED_CONFIG_DB_AS_JSON - - # Act - actual = config_wrapper.convert_sonic_yang_to_config_db(Files.SONIC_YANG_AS_JSON) - - # Assert - self.assertDictEqual(expected, actual) - - def test_convert_sonic_yang_to_config_db__table_name_without_colons__returns_config_db_as_json(self): - # Arrange - config_wrapper = gu_common.ConfigWrapper() - expected = Files.CROPPED_CONFIG_DB_AS_JSON - - # Act - actual = config_wrapper.convert_sonic_yang_to_config_db(Files.SONIC_YANG_AS_JSON_WITHOUT_COLONS) - - # Assert - self.assertDictEqual(expected, actual) - - def test_convert_sonic_yang_to_config_db__table_name_with_unexpected_colons__returns_config_db_as_json(self): - # Arrange - config_wrapper = gu_common.ConfigWrapper() - expected = Files.CROPPED_CONFIG_DB_AS_JSON - - # Act and assert - self.assertRaises(ValueError, - config_wrapper.convert_sonic_yang_to_config_db, - Files.SONIC_YANG_AS_JSON_WITH_UNEXPECTED_COLONS) - - def test_validate_sonic_yang_config__valid_config__returns_true(self): - # Arrange - config_wrapper = gu_common.ConfigWrapper() - expected = True - - # Act - actual = config_wrapper.validate_sonic_yang_config(Files.SONIC_YANG_AS_JSON) - - # Assert - self.assertEqual(expected, actual) - - def test_validate_sonic_yang_config__invvalid_config__returns_false(self): - # Arrange - config_wrapper = gu_common.ConfigWrapper() - expected = False - - # Act - actual = config_wrapper.validate_sonic_yang_config(Files.SONIC_YANG_AS_JSON_INVALID) - - # Assert - self.assertEqual(expected, actual) - - def test_validate_config_db_config__valid_config__returns_true(self): - # Arrange - config_wrapper = gu_common.ConfigWrapper() - expected = True - - # Act - actual = config_wrapper.validate_config_db_config(Files.CONFIG_DB_AS_JSON) - - # Assert - self.assertEqual(expected, actual) - - def test_validate_config_db_config__invalid_config__returns_false(self): - # Arrange - config_wrapper = gu_common.ConfigWrapper() - expected = False - - # Act - actual = config_wrapper.validate_config_db_config(Files.CONFIG_DB_AS_JSON_INVALID) - - # Assert - self.assertEqual(expected, actual) - - def test_crop_tables_without_yang__returns_cropped_config_db_as_json(self): - # Arrange - config_wrapper = gu_common.ConfigWrapper() - expected = Files.CROPPED_CONFIG_DB_AS_JSON - - # Act - actual = config_wrapper.crop_tables_without_yang(Files.CONFIG_DB_AS_JSON) - - # Assert - self.assertDictEqual(expected, actual) - -class TestPatchWrapper(unittest.TestCase): - def setUp(self): - self.config_wrapper_mock = gu_common.ConfigWrapper() - self.config_wrapper_mock.get_config_db_as_json=MagicMock(return_value=Files.CONFIG_DB_AS_JSON) - - def test_validate_config_db_patch_has_yang_models__table_without_yang_model__returns_false(self): - # Arrange - patch_wrapper = gu_common.PatchWrapper() - patch = [ { 'op': 'remove', 'path': '/TABLE_WITHOUT_YANG' } ] - expected = False - - # Act - actual = patch_wrapper.validate_config_db_patch_has_yang_models(patch) - - # Assert - self.assertEqual(expected, actual) - - def test_validate_config_db_patch_has_yang_models__table_with_yang_model__returns_true(self): - # Arrange - patch_wrapper = gu_common.PatchWrapper() - patch = [ { 'op': 'remove', 'path': '/ACL_TABLE' } ] - expected = True - - # Act - actual = patch_wrapper.validate_config_db_patch_has_yang_models(patch) - - # Assert - self.assertEqual(expected, actual) - - def test_convert_config_db_patch_to_sonic_yang_patch__invalid_config_db_patch__failure(self): - # Arrange - patch_wrapper = gu_common.PatchWrapper() - patch = [ { 'op': 'remove', 'path': '/TABLE_WITHOUT_YANG' } ] - - # Act and Assert - self.assertRaises(ValueError, patch_wrapper.convert_config_db_patch_to_sonic_yang_patch, patch) - - def test_same_patch__no_diff__returns_true(self): - # Arrange - patch_wrapper = gu_common.PatchWrapper() - - # Act and Assert - self.assertTrue(patch_wrapper.verify_same_json(Files.CONFIG_DB_AS_JSON, Files.CONFIG_DB_AS_JSON)) - - def test_same_patch__diff__returns_false(self): - # Arrange - patch_wrapper = gu_common.PatchWrapper() - - # Act and Assert - self.assertFalse(patch_wrapper.verify_same_json(Files.CONFIG_DB_AS_JSON, Files.CROPPED_CONFIG_DB_AS_JSON)) - - def test_generate_patch__no_diff__empty_patch(self): - # Arrange - patch_wrapper = gu_common.PatchWrapper() - - # Act - patch = patch_wrapper.generate_patch(Files.CONFIG_DB_AS_JSON, Files.CONFIG_DB_AS_JSON) - - # Assert - self.assertFalse(patch) - - def test_simulate_patch__empty_patch__no_changes(self): - # Arrange - patch_wrapper = gu_common.PatchWrapper() - patch = jsonpatch.JsonPatch([]) - expected = Files.CONFIG_DB_AS_JSON - - # Act - actual = patch_wrapper.simulate_patch(patch, Files.CONFIG_DB_AS_JSON) - - # Assert - self.assertDictEqual(expected, actual) - - def test_simulate_patch__non_empty_patch__changes_applied(self): - # Arrange - patch_wrapper = gu_common.PatchWrapper() - patch = Files.SINGLE_OPERATION_CONFIG_DB_PATCH - expected = Files.SINGLE_OPERATION_CONFIG_DB_PATCH.apply(Files.CONFIG_DB_AS_JSON) - - # Act - actual = patch_wrapper.simulate_patch(patch, Files.CONFIG_DB_AS_JSON) - - # Assert - self.assertDictEqual(expected, actual) - - def test_generate_patch__diff__non_empty_patch(self): - # Arrange - patch_wrapper = gu_common.PatchWrapper() - after_update_json = Files.SINGLE_OPERATION_CONFIG_DB_PATCH.apply(Files.CONFIG_DB_AS_JSON) - expected = Files.SINGLE_OPERATION_CONFIG_DB_PATCH - - # Act - actual = patch_wrapper.generate_patch(Files.CONFIG_DB_AS_JSON, after_update_json) - - # Assert - self.assertTrue(actual) - self.assertEqual(expected, actual) - - def test_convert_config_db_patch_to_sonic_yang_patch__empty_patch__returns_empty_patch(self): - # Arrange - patch_wrapper = gu_common.PatchWrapper(config_wrapper = self.config_wrapper_mock) - patch = jsonpatch.JsonPatch([]) - expected = jsonpatch.JsonPatch([]) - - # Act - actual = patch_wrapper.convert_config_db_patch_to_sonic_yang_patch(patch) - - # Assert - self.assertEqual(expected, actual) - - def test_convert_config_db_patch_to_sonic_yang_patch__single_operation_patch__returns_sonic_yang_patch(self): - # Arrange - patch_wrapper = gu_common.PatchWrapper(config_wrapper = self.config_wrapper_mock) - patch = Files.SINGLE_OPERATION_CONFIG_DB_PATCH - expected = Files.SINGLE_OPERATION_SONIC_YANG_PATCH - - # Act - actual = patch_wrapper.convert_config_db_patch_to_sonic_yang_patch(patch) - - # Assert - self.assertEqual(expected, actual) - - def test_convert_config_db_patch_to_sonic_yang_patch__multiple_operations_patch__returns_sonic_yang_patch(self): - # Arrange - config_wrapper = self.config_wrapper_mock - patch_wrapper = gu_common.PatchWrapper(config_wrapper = config_wrapper) - config_db_patch = Files.MULTI_OPERATION_CONFIG_DB_PATCH - - # Act - sonic_yang_patch = patch_wrapper.convert_config_db_patch_to_sonic_yang_patch(config_db_patch) - - # Assert - self.__assert_same_patch(config_db_patch, sonic_yang_patch, config_wrapper, patch_wrapper) - - def test_convert_sonic_yang_patch_to_config_db_patch__empty_patch__returns_empty_patch(self): - # Arrange - patch_wrapper = gu_common.PatchWrapper(config_wrapper = self.config_wrapper_mock) - patch = jsonpatch.JsonPatch([]) - expected = jsonpatch.JsonPatch([]) - - # Act - actual = patch_wrapper.convert_sonic_yang_patch_to_config_db_patch(patch) - - # Assert - self.assertEqual(expected, actual) - - def test_convert_sonic_yang_patch_to_config_db_patch__single_operation_patch__returns_config_db_patch(self): - # Arrange - patch_wrapper = gu_common.PatchWrapper(config_wrapper = self.config_wrapper_mock) - patch = Files.SINGLE_OPERATION_SONIC_YANG_PATCH - expected = Files.SINGLE_OPERATION_CONFIG_DB_PATCH - - # Act - actual = patch_wrapper.convert_sonic_yang_patch_to_config_db_patch(patch) - - # Assert - self.assertEqual(expected, actual) - - def test_convert_sonic_yang_patch_to_config_db_patch__multiple_operations_patch__returns_config_db_patch(self): - # Arrange - config_wrapper = self.config_wrapper_mock - patch_wrapper = gu_common.PatchWrapper(config_wrapper = config_wrapper) - sonic_yang_patch = Files.MULTI_OPERATION_SONIC_YANG_PATCH - - # Act - config_db_patch = patch_wrapper.convert_sonic_yang_patch_to_config_db_patch(sonic_yang_patch) - - # Assert - self.__assert_same_patch(config_db_patch, sonic_yang_patch, config_wrapper, patch_wrapper) - - def __assert_same_patch(self, config_db_patch, sonic_yang_patch, config_wrapper, patch_wrapper): - sonic_yang = config_wrapper.get_sonic_yang_as_json() - config_db = config_wrapper.get_config_db_as_json() - - after_update_sonic_yang = patch_wrapper.simulate_patch(sonic_yang_patch, sonic_yang) - after_update_config_db = patch_wrapper.simulate_patch(config_db_patch, config_db) - after_update_config_db_cropped = config_wrapper.crop_tables_without_yang(after_update_config_db) - - after_update_sonic_yang_as_config_db = \ - config_wrapper.convert_sonic_yang_to_config_db(after_update_sonic_yang) - - self.assertTrue(patch_wrapper.verify_same_json(after_update_config_db_cropped, after_update_sonic_yang_as_config_db)) - -class TestPathAddressing(unittest.TestCase): - def setUp(self): - self.path_addressing = gu_common.PathAddressing() - self.sy_only_models = sonic_yang.SonicYang(gu_common.YANG_DIR) - self.sy_only_models.loadYangModel() - - def test_get_path_tokens(self): - def check(path, tokens): - expected=tokens - actual=self.path_addressing.get_path_tokens(path) - self.assertEqual(expected, actual) - - check("", []) - check("/", [""]) - check("/token", ["token"]) - check("/more/than/one/token", ["more", "than", "one", "token"]) - check("/has/numbers/0/and/symbols/^", ["has", "numbers", "0", "and", "symbols", "^"]) - check("/~0/this/is/telda", ["~", "this", "is", "telda"]) - check("/~1/this/is/forward-slash", ["/", "this", "is", "forward-slash"]) - check("/\\\\/no-escaping", ["\\\\", "no-escaping"]) - check("////empty/tokens/are/ok", ["", "", "", "empty", "tokens", "are", "ok"]) - - def test_create_path(self): - def check(tokens, path): - expected=path - actual=self.path_addressing.create_path(tokens) - self.assertEqual(expected, actual) - - check([], "",) - check([""], "/",) - check(["token"], "/token") - check(["more", "than", "one", "token"], "/more/than/one/token") - check(["has", "numbers", "0", "and", "symbols", "^"], "/has/numbers/0/and/symbols/^") - check(["~", "this", "is", "telda"], "/~0/this/is/telda") - check(["/", "this", "is", "forward-slash"], "/~1/this/is/forward-slash") - check(["\\\\", "no-escaping"], "/\\\\/no-escaping") - check(["", "", "", "empty", "tokens", "are", "ok"], "////empty/tokens/are/ok") - check(["~token", "telda-not-followed-by-0-or-1"], "/~0token/telda-not-followed-by-0-or-1") - - def test_get_xpath_tokens(self): - def check(path, tokens): - expected=tokens - actual=self.path_addressing.get_xpath_tokens(path) - self.assertEqual(expected, actual) - - self.assertRaises(ValueError, check, "", []) - check("/", []) - check("/token", ["token"]) - check("/more/than/one/token", ["more", "than", "one", "token"]) - check("/multi/tokens/with/empty/last/token/", ["multi", "tokens", "with", "empty", "last", "token", ""]) - check("/has/numbers/0/and/symbols/^", ["has", "numbers", "0", "and", "symbols", "^"]) - check("/has[a='predicate']/in/the/beginning", ["has[a='predicate']", "in", "the", "beginning"]) - check("/ha/s[a='predicate']/in/the/middle", ["ha", "s[a='predicate']", "in", "the", "middle"]) - check("/ha/s[a='predicate-in-the-end']", ["ha", "s[a='predicate-in-the-end']"]) - check("/it/has[more='than'][one='predicate']/somewhere", ["it", "has[more='than'][one='predicate']", "somewhere"]) - check("/ha/s[a='predicate\"with']/double-quotes/inside", ["ha", "s[a='predicate\"with']", "double-quotes", "inside"]) - check('/a/predicate[with="double"]/quotes', ["a", 'predicate[with="double"]', "quotes"]) - check('/multiple["predicate"][with="double"]/quotes', ['multiple["predicate"][with="double"]', "quotes"]) - check('/multiple["predicate"][with="double"]/quotes', ['multiple["predicate"][with="double"]', "quotes"]) - check('/ha/s[a="predicate\'with"]/single-quote/inside', ["ha", 's[a="predicate\'with"]', "single-quote", "inside"]) - # XPATH 1.0 does not support single-quote within single-quoted string. str literal can be '[^']*' - # Not validating no single-quote within single-quoted string - check("/a/mix['of''quotes\"does']/not/work/well", ["a", "mix['of''quotes\"does']", "not", "work", "well"]) - # XPATH 1.0 does not support double-quotes within double-quoted string. str literal can be "[^"]*" - # Not validating no double-quotes within double-quoted string - check('/a/mix["of""quotes\'does"]/not/work/well', ["a", 'mix["of""quotes\'does"]', "not", "work", "well"]) - - def test_create_xpath(self): - def check(tokens, xpath): - expected=xpath - actual=self.path_addressing.create_xpath(tokens) - self.assertEqual(expected, actual) - - check([], "/") - check(["token"], "/token") - check(["more", "than", "one", "token"], "/more/than/one/token") - check(["multi", "tokens", "with", "empty", "last", "token", ""], "/multi/tokens/with/empty/last/token/") - check(["has", "numbers", "0", "and", "symbols", "^"], "/has/numbers/0/and/symbols/^") - check(["has[a='predicate']", "in", "the", "beginning"], "/has[a='predicate']/in/the/beginning") - check(["ha", "s[a='predicate']", "in", "the", "middle"], "/ha/s[a='predicate']/in/the/middle") - check(["ha", "s[a='predicate-in-the-end']"], "/ha/s[a='predicate-in-the-end']") - check(["it", "has[more='than'][one='predicate']", "somewhere"], "/it/has[more='than'][one='predicate']/somewhere") - check(["ha", "s[a='predicate\"with']", "double-quotes", "inside"], "/ha/s[a='predicate\"with']/double-quotes/inside") - check(["a", 'predicate[with="double"]', "quotes"], '/a/predicate[with="double"]/quotes') - check(['multiple["predicate"][with="double"]', "quotes"], '/multiple["predicate"][with="double"]/quotes') - check(['multiple["predicate"][with="double"]', "quotes"], '/multiple["predicate"][with="double"]/quotes') - check(["ha", 's[a="predicate\'with"]', "single-quote", "inside"], '/ha/s[a="predicate\'with"]/single-quote/inside') - # XPATH 1.0 does not support single-quote within single-quoted string. str literal can be '[^']*' - # Not validating no single-quote within single-quoted string - check(["a", "mix['of''quotes\"does']", "not", "work", "well"], "/a/mix['of''quotes\"does']/not/work/well", ) - # XPATH 1.0 does not support double-quotes within double-quoted string. str literal can be "[^"]*" - # Not validating no double-quotes within double-quoted string - check(["a", 'mix["of""quotes\'does"]', "not", "work", "well"], '/a/mix["of""quotes\'does"]/not/work/well') - - def test_find_ref_paths__ref_is_the_whole_key__returns_ref_paths(self): - # Arrange - path = "/PORT/Ethernet0" - expected = [ - "/ACL_TABLE/NO-NSW-PACL-V4/ports/0", - "/VLAN_MEMBER/Vlan1000|Ethernet0", - ] - - # Act - actual = self.path_addressing.find_ref_paths(path, Files.CROPPED_CONFIG_DB_AS_JSON) - - # Assert - self.assertCountEqual(expected, actual) - - def test_find_ref_paths__ref_is_a_part_of_key__returns_ref_paths(self): - # Arrange - path = "/VLAN/Vlan1000" - expected = [ - "/VLAN_MEMBER/Vlan1000|Ethernet0", - "/VLAN_MEMBER/Vlan1000|Ethernet4", - "/VLAN_MEMBER/Vlan1000|Ethernet8", - ] - - # Act - actual = self.path_addressing.find_ref_paths(path, Files.CROPPED_CONFIG_DB_AS_JSON) - - # Assert - self.assertCountEqual(expected, actual) - - def test_find_ref_paths__ref_is_in_multilist__returns_ref_paths(self): - # Arrange - path = "/PORT/Ethernet8" - expected = [ - "/INTERFACE/Ethernet8", - "/INTERFACE/Ethernet8|10.0.0.1~130", - ] - - # Act - actual = self.path_addressing.find_ref_paths(path, Files.CONFIG_DB_WITH_INTERFACE) - - # Assert - self.assertCountEqual(expected, actual) - - def test_find_ref_paths__ref_is_in_leafref_union__returns_ref_paths(self): - # Arrange - path = "/PORTCHANNEL/PortChannel0001" - expected = [ - "/ACL_TABLE/NO-NSW-PACL-V4/ports/1", - ] - - # Act - actual = self.path_addressing.find_ref_paths(path, Files.CONFIG_DB_WITH_PORTCHANNEL_AND_ACL) - - # Assert - self.assertCountEqual(expected, actual) - - def test_find_ref_paths__path_is_table__returns_ref_paths(self): - # Arrange - path = "/PORT" - expected = [ - "/ACL_TABLE/DATAACL/ports/0", - "/ACL_TABLE/EVERFLOW/ports/0", - "/ACL_TABLE/EVERFLOWV6/ports/0", - "/ACL_TABLE/EVERFLOWV6/ports/1", - "/ACL_TABLE/NO-NSW-PACL-V4/ports/0", - "/VLAN_MEMBER/Vlan1000|Ethernet0", - "/VLAN_MEMBER/Vlan1000|Ethernet4", - "/VLAN_MEMBER/Vlan1000|Ethernet8", - ] - - # Act - actual = self.path_addressing.find_ref_paths(path, Files.CROPPED_CONFIG_DB_AS_JSON) - - # Assert - self.assertCountEqual(expected, actual) - - def test_find_ref_paths__whole_config_path__returns_all_refs(self): - # Arrange - path = "" - expected = [ - "/ACL_TABLE/DATAACL/ports/0", - "/ACL_TABLE/EVERFLOW/ports/0", - "/ACL_TABLE/EVERFLOWV6/ports/0", - "/ACL_TABLE/EVERFLOWV6/ports/1", - "/ACL_TABLE/NO-NSW-PACL-V4/ports/0", - "/VLAN_MEMBER/Vlan1000|Ethernet0", - "/VLAN_MEMBER/Vlan1000|Ethernet4", - "/VLAN_MEMBER/Vlan1000|Ethernet8", - ] - - # Act - actual = self.path_addressing.find_ref_paths(path, Files.CROPPED_CONFIG_DB_AS_JSON) - - # Assert - self.assertCountEqual(expected, actual) - - def test_convert_path_to_xpath(self): - def check(path, xpath, config=None): - if not config: - config = Files.CROPPED_CONFIG_DB_AS_JSON - - expected=xpath - actual=self.path_addressing.convert_path_to_xpath(path, config, self.sy_only_models) - self.assertEqual(expected, actual) - - check(path="", xpath="/") - check(path="/VLAN_MEMBER", xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER") - check(path="/VLAN/Vlan1000/dhcp_servers", - xpath="/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers") - check(path="/VLAN/Vlan1000/dhcp_servers/0", - xpath="/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers[.='192.0.0.1']") - check(path="/PORT/Ethernet0/lanes", xpath="/sonic-port:sonic-port/PORT/PORT_LIST[name='Ethernet0']/lanes") - check(path="/ACL_TABLE/NO-NSW-PACL-V4/ports/0", - xpath="/sonic-acl:sonic-acl/ACL_TABLE/ACL_TABLE_LIST[ACL_TABLE_NAME='NO-NSW-PACL-V4']/ports[.='Ethernet0']") - check(path="/ACL_TABLE/NO-NSW-PACL-V4/ports/0", - xpath="/sonic-acl:sonic-acl/ACL_TABLE/ACL_TABLE_LIST[ACL_TABLE_NAME='NO-NSW-PACL-V4']/ports[.='Ethernet0']") - check(path="/VLAN_MEMBER/Vlan1000|Ethernet8/tagging_mode", - xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode") - check(path="/VLAN_MEMBER/Vlan1000|Ethernet8", - xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']") - check(path="/DEVICE_METADATA/localhost/hwsku", - xpath="/sonic-device_metadata:sonic-device_metadata/DEVICE_METADATA/localhost/hwsku", - config=Files.CONFIG_DB_WITH_DEVICE_METADATA) - check(path="/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK/FLEX_COUNTER_STATUS", - xpath="/sonic-flex_counter:sonic-flex_counter/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK/FLEX_COUNTER_STATUS", - config=Files.CONTRAINER_WITH_CONTAINER_CONFIG_DB) - check(path="/ACL_RULE/SSH_ONLY|RULE1/L4_SRC_PORT", - xpath="/sonic-acl:sonic-acl/ACL_RULE/ACL_RULE_LIST[ACL_TABLE_NAME='SSH_ONLY'][RULE_NAME='RULE1']/L4_SRC_PORT", - config=Files.CONFIG_DB_CHOICE) - check(path="/INTERFACE/Ethernet8", - xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_LIST[name='Ethernet8']", - config=Files.CONFIG_DB_WITH_INTERFACE) - check(path="/INTERFACE/Ethernet8|10.0.0.1~130", - xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_IPPREFIX_LIST[name='Ethernet8'][ip-prefix='10.0.0.1/30']", - config=Files.CONFIG_DB_WITH_INTERFACE) - check(path="/INTERFACE/Ethernet8|10.0.0.1~130/scope", - xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_IPPREFIX_LIST[name='Ethernet8'][ip-prefix='10.0.0.1/30']/scope", - config=Files.CONFIG_DB_WITH_INTERFACE) - check(path="/PORTCHANNEL_INTERFACE", - xpath="/sonic-portchannel:sonic-portchannel/PORTCHANNEL_INTERFACE", - config=Files.CONFIG_DB_WITH_PORTCHANNEL_INTERFACE) - check(path="/PORTCHANNEL_INTERFACE/PortChannel0001|1.1.1.1~124", - xpath="/sonic-portchannel:sonic-portchannel/PORTCHANNEL_INTERFACE/PORTCHANNEL_INTERFACE_IPPREFIX_LIST[name='PortChannel0001'][ip_prefix='1.1.1.1/24']", - config=Files.CONFIG_DB_WITH_PORTCHANNEL_INTERFACE) - - def test_convert_xpath_to_path(self): - def check(xpath, path, config=None): - if not config: - config = Files.CROPPED_CONFIG_DB_AS_JSON - - expected=path - actual=self.path_addressing.convert_xpath_to_path(xpath, config, self.sy_only_models) - self.assertEqual(expected, actual) - - check(xpath="/",path="") - check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER", path="/VLAN_MEMBER") - check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST",path="/VLAN_MEMBER") - check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']", - path="/VLAN_MEMBER/Vlan1000|Ethernet8") - check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/name", - path="/VLAN_MEMBER/Vlan1000|Ethernet8") - check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/port", - path="/VLAN_MEMBER/Vlan1000|Ethernet8") - check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode", - path="/VLAN_MEMBER/Vlan1000|Ethernet8/tagging_mode") - check(xpath="/sonic-vlan:sonic-acl/ACL_RULE", path="/ACL_RULE") - check(xpath="/sonic-vlan:sonic-acl/ACL_RULE/ACL_RULE_LIST[ACL_TABLE_NAME='SSH_ONLY'][RULE_NAME='RULE1']", - path="/ACL_RULE/SSH_ONLY|RULE1", - config=Files.CONFIG_DB_CHOICE) - check(xpath="/sonic-acl:sonic-acl/ACL_RULE/ACL_RULE_LIST[ACL_TABLE_NAME='SSH_ONLY'][RULE_NAME='RULE1']/L4_SRC_PORT", - path="/ACL_RULE/SSH_ONLY|RULE1/L4_SRC_PORT", - config=Files.CONFIG_DB_CHOICE) - check(xpath="/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers", - path="/VLAN/Vlan1000/dhcp_servers") - check(xpath="/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers[.='192.0.0.1']", - path="/VLAN/Vlan1000/dhcp_servers/0") - check(xpath="/sonic-port:sonic-port/PORT/PORT_LIST[name='Ethernet0']/lanes", path="/PORT/Ethernet0/lanes") - check(xpath="/sonic-acl:sonic-acl/ACL_TABLE/ACL_TABLE_LIST[ACL_TABLE_NAME='NO-NSW-PACL-V4']/ports[.='Ethernet0']", - path="/ACL_TABLE/NO-NSW-PACL-V4/ports/0") - check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode", - path="/VLAN_MEMBER/Vlan1000|Ethernet8/tagging_mode") - check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']", - path="/VLAN_MEMBER/Vlan1000|Ethernet8") - check(xpath="/sonic-device_metadata:sonic-device_metadata/DEVICE_METADATA/localhost/hwsku", - path="/DEVICE_METADATA/localhost/hwsku", - config=Files.CONFIG_DB_WITH_DEVICE_METADATA) - check(xpath="/sonic-flex_counter:sonic-flex_counter/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK", - path="/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK", - config=Files.CONTRAINER_WITH_CONTAINER_CONFIG_DB) - check(xpath="/sonic-flex_counter:sonic-flex_counter/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK/FLEX_COUNTER_STATUS", - path="/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK/FLEX_COUNTER_STATUS", - config=Files.CONTRAINER_WITH_CONTAINER_CONFIG_DB) - check(xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_LIST[name='Ethernet8']", - path="/INTERFACE/Ethernet8", - config=Files.CONFIG_DB_WITH_INTERFACE) - check(xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_IPPREFIX_LIST[name='Ethernet8'][ip-prefix='10.0.0.1/30']", - path="/INTERFACE/Ethernet8|10.0.0.1~130", - config=Files.CONFIG_DB_WITH_INTERFACE) - check(xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_IPPREFIX_LIST[name='Ethernet8'][ip-prefix='10.0.0.1/30']/scope", - path="/INTERFACE/Ethernet8|10.0.0.1~130/scope", - config=Files.CONFIG_DB_WITH_INTERFACE) - check(xpath="/sonic-portchannel:sonic-portchannel/PORTCHANNEL_INTERFACE", - path="/PORTCHANNEL_INTERFACE", - config=Files.CONFIG_DB_WITH_PORTCHANNEL_INTERFACE) - check(xpath="/sonic-portchannel:sonic-portchannel/PORTCHANNEL_INTERFACE/PORTCHANNEL_INTERFACE_IPPREFIX_LIST[name='PortChannel0001'][ip_prefix='1.1.1.1/24']", - path="/PORTCHANNEL_INTERFACE/PortChannel0001|1.1.1.1~124", - config=Files.CONFIG_DB_WITH_PORTCHANNEL_INTERFACE) - diff --git a/tests/generic_config_updater/gutest_helpers.py b/tests/generic_config_updater/gutest_helpers.py deleted file mode 100644 index 2e8984ad68..0000000000 --- a/tests/generic_config_updater/gutest_helpers.py +++ /dev/null @@ -1,53 +0,0 @@ -import json -import jsonpatch -import os -import shutil -import sys -import unittest -from unittest.mock import MagicMock, Mock, call - -class MockSideEffectDict: - def __init__(self, map): - self.map = map - - def side_effect_func(self, *args): - l = [str(arg) for arg in args] - key = tuple(l) - value = self.map.get(key) - if value is None: - raise ValueError(f"Given arguments were not found in arguments map.\n Arguments: {key}\n Map: {self.map}") - - return value - -def create_side_effect_dict(map): - return MockSideEffectDict(map).side_effect_func - -class FilesLoader: - def __init__(self): - self.files_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "files") - self.cache = {} - - def __getattr__(self, attr): - return self._load(attr) - - def _load(self, file_name): - normalized_file_name = file_name.lower() - - # Try load json file - json_file_path = os.path.join(self.files_path, f"{normalized_file_name}.json") - if os.path.isfile(json_file_path): - with open(json_file_path) as fh: - text = fh.read() - return json.loads(text) - - # Try load json-patch file - jsonpatch_file_path = os.path.join(self.files_path, f"{normalized_file_name}.json-patch") - if os.path.isfile(jsonpatch_file_path): - with open(jsonpatch_file_path) as fh: - text = fh.read() - return jsonpatch.JsonPatch(json.loads(text)) - - raise ValueError(f"There is no file called '{file_name}' in 'files/' directory") - -# Files.File_Name will look for a file called "file_name" in the "files/" directory -Files = FilesLoader() diff --git a/tests/generic_config_updater/patch_sorter_test.py b/tests/generic_config_updater/patch_sorter_test.py deleted file mode 100644 index 4da9fb901b..0000000000 --- a/tests/generic_config_updater/patch_sorter_test.py +++ /dev/null @@ -1,1730 +0,0 @@ -import jsonpatch -import unittest -from unittest.mock import MagicMock, Mock - -import generic_config_updater.patch_sorter as ps -from .gutest_helpers import Files, create_side_effect_dict -from generic_config_updater.gu_common import ConfigWrapper, PatchWrapper, OperationWrapper, \ - GenericConfigUpdaterError, OperationType, JsonChange, PathAddressing - -class TestDiff(unittest.TestCase): - def test_apply_move__updates_current_config(self): - # Arrange - diff = ps.Diff(current_config=Files.CROPPED_CONFIG_DB_AS_JSON, target_config=Files.ANY_CONFIG_DB) - move = ps.JsonMove.from_patch(Files.SINGLE_OPERATION_CONFIG_DB_PATCH) - - expected = ps.Diff(current_config=Files.CONFIG_DB_AFTER_SINGLE_OPERATION, target_config=Files.ANY_CONFIG_DB) - - # Act - actual = diff.apply_move(move) - - # Assert - self.assertEqual(expected.current_config, actual.current_config) - self.assertEqual(expected.target_config, actual.target_config) - - def test_has_no_diff__diff_exists__returns_false(self): - # Arrange - diff = ps.Diff(current_config=Files.CROPPED_CONFIG_DB_AS_JSON, - target_config=Files.CONFIG_DB_AFTER_SINGLE_OPERATION) - - # Act and Assert - self.assertFalse(diff.has_no_diff()) - - def test_has_no_diff__no_diff__returns_true(self): - # Arrange - diff = ps.Diff(current_config=Files.CROPPED_CONFIG_DB_AS_JSON, - target_config=Files.CROPPED_CONFIG_DB_AS_JSON) - - # Act and Assert - self.assertTrue(diff.has_no_diff()) - - def test_hash__different_current_config__different_hashes(self): - # Arrange - diff1 = ps.Diff(current_config=Files.CROPPED_CONFIG_DB_AS_JSON, target_config=Files.ANY_CONFIG_DB) - diff2 = ps.Diff(current_config=Files.CROPPED_CONFIG_DB_AS_JSON, target_config=Files.ANY_CONFIG_DB) - diff3 = ps.Diff(current_config=Files.CONFIG_DB_AFTER_SINGLE_OPERATION, target_config=Files.ANY_CONFIG_DB) - - # Act - hash1 = hash(diff1) - hash2 = hash(diff2) - hash3 = hash(diff3) - - # Assert - self.assertEqual(hash1, hash2) # same current config - self.assertNotEqual(hash1, hash3) - - def test_hash__different_target_config__different_hashes(self): - # Arrange - diff1 = ps.Diff(current_config=Files.ANY_CONFIG_DB, target_config=Files.CROPPED_CONFIG_DB_AS_JSON) - diff2 = ps.Diff(current_config=Files.ANY_CONFIG_DB, target_config=Files.CROPPED_CONFIG_DB_AS_JSON) - diff3 = ps.Diff(current_config=Files.ANY_CONFIG_DB, target_config=Files.CONFIG_DB_AFTER_SINGLE_OPERATION) - - # Act - hash1 = hash(diff1) - hash2 = hash(diff2) - hash3 = hash(diff3) - - # Assert - self.assertEqual(hash1, hash2) # same target config - self.assertNotEqual(hash1, hash3) - - def test_hash__swapped_current_and_target_configs__different_hashes(self): - # Arrange - diff1 = ps.Diff(current_config=Files.ANY_CONFIG_DB, target_config=Files.ANY_OTHER_CONFIG_DB) - diff2 = ps.Diff(current_config=Files.ANY_OTHER_CONFIG_DB, target_config=Files.ANY_CONFIG_DB) - - # Act - hash1 = hash(diff1) - hash2 = hash(diff2) - - # Assert - self.assertNotEqual(hash1, hash2) - - def test_eq__different_current_config__returns_false(self): - # Arrange - diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_CONFIG_DB) - other_diff = ps.Diff(Files.ANY_OTHER_CONFIG_DB, Files.ANY_CONFIG_DB) - - # Act and assert - self.assertNotEqual(diff, other_diff) - self.assertFalse(diff == other_diff) - - def test_eq__different_target_config__returns_false(self): - # Arrange - diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_CONFIG_DB) - other_diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_OTHER_CONFIG_DB) - - # Act and assert - self.assertNotEqual(diff, other_diff) - self.assertFalse(diff == other_diff) - - def test_eq__different_target_config__returns_true(self): - # Arrange - diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_CONFIG_DB) - other_diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_CONFIG_DB) - - # Act and assert - self.assertEqual(diff, other_diff) - self.assertTrue(diff == other_diff) - -class TestJsonMove(unittest.TestCase): - def setUp(self): - self.operation_wrapper = OperationWrapper() - self.any_op_type = OperationType.REPLACE - self.any_tokens = ["table1", "key11"] - self.any_path = "/table1/key11" - self.any_config = { - "table1": { - "key11": "value11" - } - } - self.any_value = "value11" - self.any_operation = self.operation_wrapper.create(self.any_op_type, self.any_path, self.any_value) - self.any_diff = ps.Diff(self.any_config, self.any_config) - - def test_ctor__delete_op_whole_config__none_value_and_empty_path(self): - # Arrange - path = "" - diff = ps.Diff(current_config={}, target_config=self.any_config) - - # Act - jsonmove = ps.JsonMove(diff, OperationType.REMOVE, []) - - # Assert - self.verify_jsonmove(self.operation_wrapper.create(OperationType.REMOVE, path), - OperationType.REMOVE, - [], - None, - jsonmove) - def test_ctor__remove_op__operation_created_directly(self): - # Arrange and Act - jsonmove = ps.JsonMove(self.any_diff, OperationType.REMOVE, self.any_tokens) - - # Assert - self.verify_jsonmove(self.operation_wrapper.create(OperationType.REMOVE, self.any_path), - OperationType.REMOVE, - self.any_tokens, - None, - jsonmove) - - def test_ctor__replace_op_whole_config__whole_config_value_and_empty_path(self): - # Arrange - path = "" - diff = ps.Diff(current_config={}, target_config=self.any_config) - - # Act - jsonmove = ps.JsonMove(diff, OperationType.REPLACE, [], []) - - # Assert - self.verify_jsonmove(self.operation_wrapper.create(OperationType.REPLACE, path, self.any_config), - OperationType.REPLACE, - [], - [], - jsonmove) - - def test_ctor__replace_op__operation_created_directly(self): - # Arrange and Act - jsonmove = ps.JsonMove(self.any_diff, OperationType.REPLACE, self.any_tokens, self.any_tokens) - - # Assert - self.verify_jsonmove(self.operation_wrapper.create(OperationType.REPLACE, self.any_path, self.any_value), - OperationType.REPLACE, - self.any_tokens, - self.any_tokens, - jsonmove) - - def test_ctor__add_op_whole_config__whole_config_value_and_empty_path(self): - # Arrange - path = "" - diff = ps.Diff(current_config={}, target_config=self.any_config) - - # Act - jsonmove = ps.JsonMove(diff, OperationType.ADD, [], []) - - # Assert - self.verify_jsonmove(self.operation_wrapper.create(OperationType.ADD, path, self.any_config), - OperationType.ADD, - [], - [], - jsonmove) - - def test_ctor__add_op_path_exist__same_value_and_path(self): - # Arrange and Act - jsonmove = ps.JsonMove(self.any_diff, OperationType.ADD, self.any_tokens, self.any_tokens) - - # Assert - self.verify_jsonmove(self.operation_wrapper.create(OperationType.ADD, self.any_path, self.any_value), - OperationType.ADD, - self.any_tokens, - self.any_tokens, - jsonmove) - - def test_ctor__add_op_path_exist_include_list__same_value_and_path(self): - # Arrange - current_config = { - "table1": { - "list1": ["value11", "value13"] - } - } - target_config = { - "table1": { - "list1": ["value11", "value12", "value13", "value14"] - } - } - diff = ps.Diff(current_config, target_config) - op_type = OperationType.ADD - current_config_tokens = ["table1", "list1", 1] # Index is 1 which does not exist in target - target_config_tokens = ["table1", "list1", 1] - expected_jsonpatch_path = "/table1/list1/1" - expected_jsonpatch_value = "value12" - # NOTE: the target config can contain more diff than the given move. - - # Act - jsonmove = ps.JsonMove(diff, op_type, current_config_tokens, target_config_tokens) - - # Assert - self.verify_jsonmove(self.operation_wrapper.create(op_type, expected_jsonpatch_path, expected_jsonpatch_value), - op_type, - current_config_tokens, - target_config_tokens, - jsonmove) - - def test_ctor__add_op_path_exist_list_index_doesnot_exist_in_target___same_value_and_path(self): - # Arrange - current_config = { - "table1": { - "list1": ["value11"] - } - } - target_config = { - "table1": { - "list1": ["value12"] - } - } - diff = ps.Diff(current_config, target_config) - op_type = OperationType.ADD - current_config_tokens = ["table1", "list1", 1] # Index is 1 which does not exist in target - target_config_tokens = ["table1", "list1", 0] - expected_jsonpatch_path = "/table1/list1/1" - expected_jsonpatch_value = "value12" - # NOTE: the target config can contain more diff than the given move. - - # Act - jsonmove = ps.JsonMove(diff, op_type, current_config_tokens, target_config_tokens) - - # Assert - self.verify_jsonmove(self.operation_wrapper.create(op_type, expected_jsonpatch_path, expected_jsonpatch_value), - op_type, - current_config_tokens, - target_config_tokens, - jsonmove) - - def test_ctor__add_op_path_doesnot_exist__value_and_path_of_parent(self): - # Arrange - current_config = { - } - target_config = { - "table1": { - "key11": { - "key111": "value111" - } - } - } - diff = ps.Diff(current_config, target_config) - op_type = OperationType.ADD - current_config_tokens = ["table1", "key11", "key111"] - target_config_tokens = ["table1", "key11", "key111"] - expected_jsonpatch_path = "/table1" - expected_jsonpatch_value = { - "key11": { - "key111": "value111" - } - } - # NOTE: the target config can contain more diff than the given move. - - # Act - jsonmove = ps.JsonMove(diff, op_type, current_config_tokens, target_config_tokens) - - # Assert - self.verify_jsonmove(self.operation_wrapper.create(op_type, expected_jsonpatch_path, expected_jsonpatch_value), - op_type, - current_config_tokens, - target_config_tokens, - jsonmove) - - def test_ctor__add_op_path_doesnot_exist_include_list__value_and_path_of_parent(self): - # Arrange - current_config = { - } - target_config = { - "table1": { - "list1": ["value11", "value12", "value13", "value14"] - } - } - diff = ps.Diff(current_config, target_config) - op_type = OperationType.ADD - current_config_tokens = ["table1", "list1", 0] - target_config_tokens = ["table1", "list1", 1] - expected_jsonpatch_path = "/table1" - expected_jsonpatch_value = { - "list1": ["value12"] - } - # NOTE: the target config can contain more diff than the given move. - - # Act - jsonmove = ps.JsonMove(diff, op_type, current_config_tokens, target_config_tokens) - - # Assert - self.verify_jsonmove(self.operation_wrapper.create(op_type, expected_jsonpatch_path, expected_jsonpatch_value), - op_type, - current_config_tokens, - target_config_tokens, - jsonmove) - - def test_from_patch__more_than_1_op__failure(self): - # Arrange - patch = jsonpatch.JsonPatch([self.any_operation, self.any_operation]) - - # Act and Assert - self.assertRaises(GenericConfigUpdaterError, ps.JsonMove.from_patch, patch) - - def test_from_patch__delete_op__delete_jsonmove(self): - # Arrange - operation = self.operation_wrapper.create(OperationType.REMOVE, self.any_path) - patch = jsonpatch.JsonPatch([operation]) - - # Act - jsonmove = ps.JsonMove.from_patch(patch) - - # Assert - self.verify_jsonmove(operation, - OperationType.REMOVE, - self.any_tokens, - None, - jsonmove) - - def test_from_patch__replace_op__replace_jsonmove(self): - # Arrange - operation = self.operation_wrapper.create(OperationType.REPLACE, self.any_path, self.any_value) - patch = jsonpatch.JsonPatch([operation]) - - # Act - jsonmove = ps.JsonMove.from_patch(patch) - - # Assert - self.verify_jsonmove(operation, - OperationType.REPLACE, - self.any_tokens, - self.any_tokens, - jsonmove) - - def test_from_patch__add_op__add_jsonmove(self): - # Arrange - operation = self.operation_wrapper.create(OperationType.ADD, self.any_path, self.any_value) - patch = jsonpatch.JsonPatch([operation]) - - # Act - jsonmove = ps.JsonMove.from_patch(patch) - - # Assert - self.verify_jsonmove(operation, - OperationType.ADD, - self.any_tokens, - self.any_tokens, - jsonmove) - - def test_from_patch__add_op_with_list_indexes__add_jsonmove(self): - # Arrange - path = "/table1/key11/list1111/3" - value = "value11111" - # From a JsonPatch it is not possible to figure out if the '3' is an item in a list or a dictionary, - # will assume by default a dictionary for simplicity. - tokens = ["table1", "key11", "list1111", "3"] - operation = self.operation_wrapper.create(OperationType.ADD, path, value) - patch = jsonpatch.JsonPatch([operation]) - - # Act - jsonmove = ps.JsonMove.from_patch(patch) - - # Assert - self.verify_jsonmove(operation, - OperationType.ADD, - tokens, - tokens, - jsonmove) - - def test_from_patch__replace_whole_config__whole_config_jsonmove(self): - # Arrange - tokens = [] - path = "" - value = {"table1": {"key1": "value1"} } - operation = self.operation_wrapper.create(OperationType.REPLACE, path, value) - patch = jsonpatch.JsonPatch([operation]) - - # Act - jsonmove = ps.JsonMove.from_patch(patch) - - # Assert - self.verify_jsonmove(operation, - OperationType.REPLACE, - tokens, - tokens, - jsonmove) - - def verify_jsonmove(self, - expected_operation, - expected_op_type, - expected_current_config_tokens, - expected_target_config_tokens, - jsonmove): - expected_patch = jsonpatch.JsonPatch([expected_operation]) - self.assertEqual(expected_patch, jsonmove.patch) - self.assertEqual(expected_op_type, jsonmove.op_type) - self.assertListEqual(expected_current_config_tokens, jsonmove.current_config_tokens) - self.assertEqual(expected_target_config_tokens, jsonmove.target_config_tokens) - -class TestMoveWrapper(unittest.TestCase): - def setUp(self): - self.any_current_config = {} - self.any_target_config = {} - self.any_diff = ps.Diff(self.any_current_config, self.any_target_config) - self.any_move = Mock() - self.any_other_move1 = Mock() - self.any_other_move2 = Mock() - self.any_extended_move = Mock() - self.any_other_extended_move1 = Mock() - self.any_other_extended_move2 = Mock() - - self.single_move_generator = Mock() - self.single_move_generator.generate.side_effect = \ - create_side_effect_dict({(str(self.any_diff),): [self.any_move]}) - - self.another_single_move_generator = Mock() - self.another_single_move_generator.generate.side_effect = \ - create_side_effect_dict({(str(self.any_diff),): [self.any_other_move1]}) - - self.multiple_move_generator = Mock() - self.multiple_move_generator.generate.side_effect = create_side_effect_dict( - {(str(self.any_diff),): [self.any_move, self.any_other_move1, self.any_other_move2]}) - - self.single_move_extender = Mock() - self.single_move_extender.extend.side_effect = create_side_effect_dict( - { - (str(self.any_move), str(self.any_diff)): [self.any_extended_move], - (str(self.any_extended_move), str(self.any_diff)): [], # As first extended move will be extended - (str(self.any_other_extended_move1), str(self.any_diff)): [] # Needed when mixed with other extenders - }) - - self.another_single_move_extender = Mock() - self.another_single_move_extender.extend.side_effect = create_side_effect_dict( - { - (str(self.any_move), str(self.any_diff)): [self.any_other_extended_move1], - (str(self.any_other_extended_move1), str(self.any_diff)): [], # As first extended move will be extended - (str(self.any_extended_move), str(self.any_diff)): [] # Needed when mixed with other extenders - }) - - self.multiple_move_extender = Mock() - self.multiple_move_extender.extend.side_effect = create_side_effect_dict( - { - (str(self.any_move), str(self.any_diff)): \ - [self.any_extended_move, self.any_other_extended_move1, self.any_other_extended_move2], - # All extended moves will be extended - (str(self.any_extended_move), str(self.any_diff)): [], - (str(self.any_other_extended_move1), str(self.any_diff)): [], - (str(self.any_other_extended_move2), str(self.any_diff)): [], - }) - - self.mixed_move_extender = Mock() - self.mixed_move_extender.extend.side_effect = create_side_effect_dict( - { - (str(self.any_move), str(self.any_diff)): [self.any_extended_move], - (str(self.any_other_move1), str(self.any_diff)): [self.any_other_extended_move1], - (str(self.any_extended_move), str(self.any_diff)): \ - [self.any_other_extended_move1, self.any_other_extended_move2], - # All extended moves will be extended - (str(self.any_other_extended_move1), str(self.any_diff)): [], - (str(self.any_other_extended_move2), str(self.any_diff)): [], - }) - - self.fail_move_validator = Mock() - self.fail_move_validator.validate.side_effect = create_side_effect_dict( - {(str(self.any_move), str(self.any_diff)): False}) - - self.success_move_validator = Mock() - self.success_move_validator.validate.side_effect = create_side_effect_dict( - {(str(self.any_move), str(self.any_diff)): True}) - - def test_ctor__assigns_values_correctly(self): - # Arrange - move_generators = Mock() - move_extenders = Mock() - move_validators = Mock() - - # Act - move_wrapper = ps.MoveWrapper(move_generators, move_extenders, move_validators) - - # Assert - self.assertIs(move_generators, move_wrapper.move_generators) - self.assertIs(move_extenders, move_wrapper.move_extenders) - self.assertIs(move_validators, move_wrapper.move_validators) - - def test_generate__single_move_generator__single_move_returned(self): - # Arrange - move_generators = [self.single_move_generator] - move_wrapper = ps.MoveWrapper(move_generators, [], []) - expected = [self.any_move] - - # Act - actual = list(move_wrapper.generate(self.any_diff)) - - # Assert - self.assertListEqual(expected, actual) - - def test_generate__multiple_move_generator__multiple_move_returned(self): - # Arrange - move_generators = [self.multiple_move_generator] - move_wrapper = ps.MoveWrapper(move_generators, [], []) - expected = [self.any_move, self.any_other_move1, self.any_other_move2] - - # Act - actual = list(move_wrapper.generate(self.any_diff)) - - # Assert - self.assertListEqual(expected, actual) - - def test_generate__different_move_generators__different_moves_returned(self): - # Arrange - move_generators = [self.single_move_generator, self.another_single_move_generator] - move_wrapper = ps.MoveWrapper(move_generators, [], []) - expected = [self.any_move, self.any_other_move1] - - # Act - actual = list(move_wrapper.generate(self.any_diff)) - - # Assert - self.assertListEqual(expected, actual) - - def test_generate__duplicate_generated_moves__unique_moves_returned(self): - # Arrange - move_generators = [self.single_move_generator, self.single_move_generator] - move_wrapper = ps.MoveWrapper(move_generators, [], []) - expected = [self.any_move] - - # Act - actual = list(move_wrapper.generate(self.any_diff)) - - # Assert - self.assertListEqual(expected, actual) - - def test_generate__single_move_extender__one_extended_move_returned(self): - # Arrange - move_generators = [self.single_move_generator] - move_extenders = [self.single_move_extender] - move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) - expected = [self.any_move, self.any_extended_move] - - # Act - actual = list(move_wrapper.generate(self.any_diff)) - - # Assert - self.assertListEqual(expected, actual) - - def test_generate__multiple_move_extender__multiple_extended_move_returned(self): - # Arrange - move_generators = [self.single_move_generator] - move_extenders = [self.multiple_move_extender] - move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) - expected = [self.any_move, self.any_extended_move, self.any_other_extended_move1, self.any_other_extended_move2] - - # Act - actual = list(move_wrapper.generate(self.any_diff)) - - # Assert - self.assertListEqual(expected, actual) - - def test_generate__different_move_extenders__different_extended_moves_returned(self): - # Arrange - move_generators = [self.single_move_generator] - move_extenders = [self.single_move_extender, self.another_single_move_extender] - move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) - expected = [self.any_move, self.any_extended_move, self.any_other_extended_move1] - - # Act - actual = list(move_wrapper.generate(self.any_diff)) - - # Assert - self.assertListEqual(expected, actual) - - def test_generate__duplicate_extended_moves__unique_moves_returned(self): - # Arrange - move_generators = [self.single_move_generator] - move_extenders = [self.single_move_extender, self.single_move_extender] - move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) - expected = [self.any_move, self.any_extended_move] - - # Act - actual = list(move_wrapper.generate(self.any_diff)) - - # Assert - self.assertListEqual(expected, actual) - - def test_generate__mixed_extended_moves__unique_moves_returned(self): - # Arrange - move_generators = [self.single_move_generator, self.another_single_move_generator] - move_extenders = [self.mixed_move_extender] - move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) - expected = [self.any_move, - self.any_other_move1, - self.any_extended_move, - self.any_other_extended_move1, - self.any_other_extended_move2] - - # Act - actual = list(move_wrapper.generate(self.any_diff)) - - # Assert - self.assertListEqual(expected, actual) - - def test_validate__validation_fail__false_returned(self): - # Arrange - move_validators = [self.fail_move_validator] - move_wrapper = ps.MoveWrapper([], [], move_validators) - - # Act and assert - self.assertFalse(move_wrapper.validate(self.any_move, self.any_diff)) - - def test_validate__validation_succeed__true_returned(self): - # Arrange - move_validators = [self.success_move_validator] - move_wrapper = ps.MoveWrapper([], [], move_validators) - - # Act and assert - self.assertTrue(move_wrapper.validate(self.any_move, self.any_diff)) - - def test_validate__multiple_validators_last_fail___false_returned(self): - # Arrange - move_validators = [self.success_move_validator, self.success_move_validator, self.fail_move_validator] - move_wrapper = ps.MoveWrapper([], [], move_validators) - - # Act and assert - self.assertFalse(move_wrapper.validate(self.any_move, self.any_diff)) - - def test_validate__multiple_validators_succeed___true_returned(self): - # Arrange - move_validators = [self.success_move_validator, self.success_move_validator, self.success_move_validator] - move_wrapper = ps.MoveWrapper([], [], move_validators) - - # Act and assert - self.assertTrue(move_wrapper.validate(self.any_move, self.any_diff)) - - def test_simulate__applies_move(self): - # Arrange - diff = Mock() - diff.apply_move.side_effect = create_side_effect_dict({(str(self.any_move), ): self.any_diff}) - move_wrapper = ps.MoveWrapper(None, None, None) - - # Act - actual = move_wrapper.simulate(self.any_move, diff) - - # Assert - self.assertIs(self.any_diff, actual) - -class TestDeleteWholeConfigMoveValidator(unittest.TestCase): - def setUp(self): - self.operation_wrapper = OperationWrapper() - self.validator = ps.DeleteWholeConfigMoveValidator() - self.any_diff = Mock() - self.any_non_whole_config_path = "/table1" - self.whole_config_path = "" - - def test_validate__non_remove_op_non_whole_config__success(self): - self.verify(OperationType.REPLACE, self.any_non_whole_config_path, True) - self.verify(OperationType.ADD, self.any_non_whole_config_path, True) - - def test_validate__remove_op_non_whole_config__success(self): - self.verify(OperationType.REMOVE, self.any_non_whole_config_path, True) - - def test_validate__non_remove_op_whole_config__success(self): - self.verify(OperationType.REPLACE, self.whole_config_path, True) - self.verify(OperationType.ADD, self.whole_config_path, True) - - def test_validate__remove_op_whole_config__failure(self): - self.verify(OperationType.REMOVE, self.whole_config_path, False) - - def verify(self, operation_type, path, expected): - # Arrange - value = None - if operation_type in [OperationType.ADD, OperationType.REPLACE]: - value = Mock() - - operation = self.operation_wrapper.create(operation_type, path, value) - move = ps.JsonMove.from_operation(operation) - - # Act - actual = self.validator.validate(move, self.any_diff) - - # Assert - self.assertEqual(expected, actual) - -class TestUniqueLanesMoveValidator(unittest.TestCase): - def setUp(self): - self.validator = ps.UniqueLanesMoveValidator() - - def test_validate__no_port_table__success(self): - config = {"ACL_TABLE": {}} - self.validate_target_config(config) - - def test_validate__empty_port_table__success(self): - config = {"PORT": {}} - self.validate_target_config(config) - - def test_validate__single_lane__success(self): - config = {"PORT": {"Ethernet0": {"lanes": "66", "speed":"10000"}}} - self.validate_target_config(config) - - def test_validate__different_lanes_single_port___success(self): - config = {"PORT": {"Ethernet0": {"lanes": "66, 67, 68", "speed":"10000"}}} - self.validate_target_config(config) - - def test_validate__different_lanes_multi_ports___success(self): - config = {"PORT": { - "Ethernet0": {"lanes": "64, 65", "speed":"10000"}, - "Ethernet1": {"lanes": "66, 67, 68", "speed":"10000"}, - }} - self.validate_target_config(config) - - def test_validate__same_lanes_single_port___success(self): - config = {"PORT": {"Ethernet0": {"lanes": "65, 65", "speed":"10000"}}} - self.validate_target_config(config, False) - - def validate_target_config(self, target_config, expected=True): - # Arrange - current_config = {} - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) - - # Act - actual = self.validator.validate(move, diff) - - # Assert - self.assertEqual(expected, actual) - -class TestFullConfigMoveValidator(unittest.TestCase): - def setUp(self): - self.any_current_config = Mock() - self.any_target_config = Mock() - self.any_simulated_config = Mock() - self.any_diff = ps.Diff(self.any_current_config, self.any_target_config) - self.any_move = Mock() - self.any_move.apply.side_effect = \ - create_side_effect_dict({(str(self.any_current_config),): self.any_simulated_config}) - - def test_validate__invalid_config_db_after_applying_move__failure(self): - # Arrange - config_wrapper = Mock() - config_wrapper.validate_config_db_config.side_effect = \ - create_side_effect_dict({(str(self.any_simulated_config),): False}) - validator = ps.FullConfigMoveValidator(config_wrapper) - - # Act and assert - self.assertFalse(validator.validate(self.any_move, self.any_diff)) - - def test_validate__valid_config_db_after_applying_move__success(self): - # Arrange - config_wrapper = Mock() - config_wrapper.validate_config_db_config.side_effect = \ - create_side_effect_dict({(str(self.any_simulated_config),): True}) - validator = ps.FullConfigMoveValidator(config_wrapper) - - # Act and assert - self.assertTrue(validator.validate(self.any_move, self.any_diff)) - -class TestCreateOnlyMoveValidator(unittest.TestCase): - def setUp(self): - self.validator = ps.CreateOnlyMoveValidator(ps.PathAddressing()) - self.any_diff = ps.Diff({}, {}) - - def test_validate__non_replace_operation__success(self): - # Assert - self.assertTrue(self.validator.validate( \ - ps.JsonMove(self.any_diff, OperationType.ADD, [], []), self.any_diff)) - self.assertTrue(self.validator.validate( \ - ps.JsonMove(self.any_diff, OperationType.REMOVE, [], []), self.any_diff)) - - def test_validate__no_create_only_field__success(self): - current_config = {"PORT": {}} - target_config = {"PORT": {}, "ACL_TABLE": {}} - self.verify_diff(current_config, target_config) - - def test_validate__same_create_only_field__success(self): - current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} - target_config = {"PORT": {"Ethernet0":{"lanes":"65"}}, "ACL_TABLE": {}} - self.verify_diff(current_config, target_config) - - def test_validate__different_create_only_field__failure(self): - current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} - target_config = {"PORT": {"Ethernet0":{"lanes":"66"}}, "ACL_TABLE": {}} - self.verify_diff(current_config, target_config, expected=False) - - def test_validate__different_create_only_field_directly_updated__failure(self): - current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} - target_config = {"PORT": {"Ethernet0":{"lanes":"66"}}, "ACL_TABLE": {}} - self.verify_diff(current_config, - target_config, - ["PORT", "Ethernet0", "lanes"], - ["PORT", "Ethernet0", "lanes"], - False) - - def test_validate__different_create_only_field_updating_parent__failure(self): - current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} - target_config = {"PORT": {"Ethernet0":{"lanes":"66"}}, "ACL_TABLE": {}} - self.verify_diff(current_config, - target_config, - ["PORT", "Ethernet0"], - ["PORT", "Ethernet0"], - False) - - def test_validate__different_create_only_field_updating_grandparent__failure(self): - current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} - target_config = {"PORT": {"Ethernet0":{"lanes":"66"}}, "ACL_TABLE": {}} - self.verify_diff(current_config, - target_config, - ["PORT"], - ["PORT"], - False) - - def test_validate__same_create_only_field_directly_updated__failure(self): - current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} - target_config = {"PORT": {"Ethernet0":{"lanes":"65"}}, "ACL_TABLE": {}} - self.verify_diff(current_config, - target_config, - ["PORT", "Ethernet0", "lanes"], - ["PORT", "Ethernet0", "lanes"]) - - def test_validate__same_create_only_field_updating_parent__failure(self): - current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} - target_config = {"PORT": {"Ethernet0":{"lanes":"65"}}, "ACL_TABLE": {}} - self.verify_diff(current_config, - target_config, - ["PORT", "Ethernet0"], - ["PORT", "Ethernet0"]) - - def test_validate__same_create_only_field_updating_grandparent__failure(self): - current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} - target_config = {"PORT": {"Ethernet0":{"lanes":"65"}}, "ACL_TABLE": {}} - self.verify_diff(current_config, - target_config, - ["PORT"], - ["PORT"]) - - def verify_diff(self, current_config, target_config, current_config_tokens=None, target_config_tokens=None, expected=True): - # Arrange - current_config_tokens = current_config_tokens if current_config_tokens else [] - target_config_tokens = target_config_tokens if target_config_tokens else [] - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, current_config_tokens, target_config_tokens) - - # Act - actual = self.validator.validate(move, diff) - - # Assert - self.assertEqual(expected, actual) - -class TestNoDependencyMoveValidator(unittest.TestCase): - def setUp(self): - path_addressing = ps.PathAddressing() - config_wrapper = ConfigWrapper() - self.validator = ps.NoDependencyMoveValidator(path_addressing, config_wrapper) - - def test_validate__add_full_config_has_dependencies__failure(self): - # Arrange - # CROPPED_CONFIG_DB_AS_JSON has dependencies between PORT and ACL_TABLE - diff = ps.Diff(Files.EMPTY_CONFIG_DB, Files.CROPPED_CONFIG_DB_AS_JSON) - move = ps.JsonMove(diff, OperationType.ADD, [], []) - - # Act and assert - self.assertFalse(self.validator.validate(move, diff)) - - def test_validate__add_full_config_no_dependencies__success(self): - # Arrange - diff = ps.Diff(Files.EMPTY_CONFIG_DB, Files.CONFIG_DB_NO_DEPENDENCIES) - move = ps.JsonMove(diff, OperationType.ADD, [], []) - - # Act and assert - self.assertTrue(self.validator.validate(move, diff)) - - def test_validate__add_table_has_no_dependencies__success(self): - # Arrange - target_config = Files.CROPPED_CONFIG_DB_AS_JSON - # prepare current config by removing ACL_TABLE from current config - current_config = self.prepare_config(target_config, jsonpatch.JsonPatch([ - {"op": "remove", "path":"/ACL_TABLE"} - ])) - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.ADD, ["ACL_TABLE"], ["ACL_TABLE"]) - - # Act and assert - self.assertTrue(self.validator.validate(move, diff)) - - def test_validate__remove_full_config_has_dependencies__failure(self): - # Arrange - # CROPPED_CONFIG_DB_AS_JSON has dependencies between PORT and ACL_TABLE - diff = ps.Diff(Files.CROPPED_CONFIG_DB_AS_JSON, Files.EMPTY_CONFIG_DB) - move = ps.JsonMove(diff, OperationType.REMOVE, [], []) - - # Act and assert - self.assertFalse(self.validator.validate(move, diff)) - - def test_validate__remove_full_config_no_dependencies__success(self): - # Arrange - diff = ps.Diff(Files.EMPTY_CONFIG_DB, Files.CONFIG_DB_NO_DEPENDENCIES) - move = ps.JsonMove(diff, OperationType.REMOVE, [], []) - - # Act and assert - self.assertTrue(self.validator.validate(move, diff)) - - def test_validate__remove_table_has_no_dependencies__success(self): - # Arrange - current_config = Files.CROPPED_CONFIG_DB_AS_JSON - target_config = self.prepare_config(current_config, jsonpatch.JsonPatch([ - {"op": "remove", "path":"/ACL_TABLE"} - ])) - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REMOVE, ["ACL_TABLE"]) - - # Act and assert - self.assertTrue(self.validator.validate(move, diff)) - - def test_validate__replace_whole_config_item_added_ref_added__failure(self): - # Arrange - target_config = Files.SIMPLE_CONFIG_DB_INC_DEPS - # prepare current config by removing an item and its ref from target config - current_config = self.prepare_config(target_config, jsonpatch.JsonPatch([ - {"op": "replace", "path":"/ACL_TABLE/EVERFLOW/ports/0", "value":""}, - {"op": "remove", "path":"/PORT/Ethernet0"} - ])) - - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) - - # Act and assert - self.assertFalse(self.validator.validate(move, diff)) - - def test_validate__replace_whole_config_item_removed_ref_removed__false(self): - # Arrange - current_config = Files.SIMPLE_CONFIG_DB_INC_DEPS - # prepare target config by removing an item and its ref from current config - target_config = self.prepare_config(current_config, jsonpatch.JsonPatch([ - {"op": "replace", "path":"/ACL_TABLE/EVERFLOW/ports/0", "value":""}, - {"op": "remove", "path":"/PORT/Ethernet0"} - ])) - - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) - - # Act and assert - self.assertFalse(self.validator.validate(move, diff)) - - def test_validate__replace_whole_config_item_same_ref_added__true(self): - # Arrange - target_config = Files.SIMPLE_CONFIG_DB_INC_DEPS - # prepare current config by removing ref from target config - current_config = self.prepare_config(target_config, jsonpatch.JsonPatch([ - {"op": "replace", "path":"/ACL_TABLE/EVERFLOW/ports/0", "value":""} - ])) - - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) - - # Act and assert - self.assertTrue(self.validator.validate(move, diff)) - - def test_validate__replace_whole_config_item_same_ref_removed__true(self): - # Arrange - current_config= Files.SIMPLE_CONFIG_DB_INC_DEPS - # prepare target config by removing ref from current config - target_config = self.prepare_config(current_config, jsonpatch.JsonPatch([ - {"op": "replace", "path":"/ACL_TABLE/EVERFLOW/ports/0", "value":""} - ])) - - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) - - # Act and assert - self.assertTrue(self.validator.validate(move, diff)) - - def test_validate__replace_whole_config_item_same_ref_same__true(self): - # Arrange - current_config= Files.SIMPLE_CONFIG_DB_INC_DEPS - # prepare target config by removing ref from current config - target_config = current_config - - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) - - # Act and assert - self.assertTrue(self.validator.validate(move, diff)) - - def prepare_config(self, config, patch): - return patch.apply(config) - -class TestLowLevelMoveGenerator(unittest.TestCase): - def setUp(self): - path_addressing = PathAddressing() - self.generator = ps.LowLevelMoveGenerator(path_addressing) - - def test_generate__no_diff__no_moves(self): - self.verify() - - def test_generate__replace_key__replace_move(self): - self.verify(tc_ops=[{"op": "replace", 'path': '/PORT/Ethernet0/description', 'value':'any-desc'}]) - - def test_generate__leaf_key_missing__add_move(self): - self.verify( - cc_ops=[{"op": "remove", 'path': '/ACL_TABLE/EVERFLOW/policy_desc'}], - ex_ops=[{"op": "add", 'path': '/ACL_TABLE/EVERFLOW/policy_desc', 'value':'EVERFLOW'}] - ) - - def test_generate__leaf_key_additional__remove_move(self): - self.verify( - tc_ops=[{"op": "remove", 'path': '/ACL_TABLE/EVERFLOW/policy_desc'}] - ) - - def test_generate__table_missing__add_leafs_moves(self): - self.verify( - cc_ops=[{"op": "remove", 'path': '/VLAN'}], - ex_ops=[{'op': 'add', 'path': '/VLAN', 'value': {'Vlan1000': {'vlanid': '1000'}}}, - {'op': 'add', 'path': '/VLAN', 'value': {'Vlan1000': {'dhcp_servers': ['192.0.0.1']}}}, - {'op': 'add', 'path': '/VLAN', 'value': {'Vlan1000': {'dhcp_servers': ['192.0.0.2']}}}, - {'op': 'add', 'path': '/VLAN', 'value': {'Vlan1000': {'dhcp_servers': ['192.0.0.3']}}}, - {'op': 'add', 'path': '/VLAN', 'value': {'Vlan1000': {'dhcp_servers': ['192.0.0.4']}}}] - ) - - def test_generate__table_additional__remove_leafs_moves(self): - self.verify( - tc_ops=[{"op": "remove", 'path': '/VLAN'}], - ex_ops=[{'op': 'remove', 'path': '/VLAN/Vlan1000/vlanid'}, - {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, - {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/1'}, - {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/2'}, - {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/3'}] - ) - - def test_generate__leaf_table_missing__add_table(self): - self.verify( - tc_ops=[{"op": "add", 'path': '/NEW_TABLE', 'value':{}}] - ) - - def test_generate__leaf_table_additional__remove_table(self): - self.verify( - cc_ops=[{"op": "add", 'path': '/NEW_TABLE', 'value':{}}], - ex_ops=[{"op": "remove", 'path': '/NEW_TABLE'}] - ) - - def test_generate__replace_list_item__remove_add_replace_moves(self): - self.verify( - tc_ops=[{"op": "replace", 'path': '/ACL_TABLE/EVERFLOW/ports/0', 'value':'Ethernet0'}], - ex_ops=[ - {"op": "remove", 'path': '/ACL_TABLE/EVERFLOW/ports/0'}, - {"op": "add", 'path': '/ACL_TABLE/EVERFLOW/ports/0', 'value':'Ethernet0'}, - {"op": "replace", 'path': '/ACL_TABLE/EVERFLOW/ports/0', 'value':'Ethernet0'}, - ]) - - def test_generate__remove_list_item__remove_move(self): - self.verify( - tc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}]) - - def test_generate__remove_multiple_list_items__multiple_remove_moves(self): - self.verify( - tc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, - {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}], - ex_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, - {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/1'}] - ) - - def test_generate__remove_all_list_items__multiple_remove_moves(self): - self.verify( - tc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[]}], - ex_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, - {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/2'}, - {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/3'}, - {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/1'}] - ) - - def test_generate__add_list_items__add_move(self): - self.verify( - tc_ops=[{"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.5'}] - ) - - def test_generate__add_multiple_list_items__multiple_add_moves(self): - self.verify( - tc_ops=[{"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.5'}, - {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/3', 'value':'192.168.1.6'}] - ) - - def test_generate__add_all_list_items__multiple_add_moves(self): - self.verify( - cc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[]}], - ex_ops=[{"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.0.0.1'}, - {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.0.0.2'}, - {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.0.0.3'}, - {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.0.0.4'}] - ) - - def test_generate__replace_multiple_list_items__multiple_remove_add_replace_moves(self): - self.verify( - tc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.5'}, - {"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/3', 'value':'192.168.1.6'}], - ex_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, - {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/3'}, - {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.5'}, - {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/3', 'value':'192.168.1.6'}, - {"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.5'}, - {"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/3', 'value':'192.168.1.6'}, - {"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/3', 'value':'192.168.1.5'}, - {"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.6'}] - ) - - def test_generate__different_order_list_items__whole_list_replace_move(self): - self.verify( - tc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[ - "192.0.0.4", - "192.0.0.3", - "192.0.0.2", - "192.0.0.1" - ]}]) - - def test_generate__whole_list_missing__add_items_moves(self): - self.verify( - cc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers'}], - ex_ops=[{'op': 'add', 'path': '/VLAN/Vlan1000/dhcp_servers', 'value': ['192.0.0.1']}, - {'op': 'add', 'path': '/VLAN/Vlan1000/dhcp_servers', 'value': ['192.0.0.2']}, - {'op': 'add', 'path': '/VLAN/Vlan1000/dhcp_servers', 'value': ['192.0.0.3']}, - {'op': 'add', 'path': '/VLAN/Vlan1000/dhcp_servers', 'value': ['192.0.0.4']}]) - - def test_generate__whole_list_additional__remove_items_moves(self): - self.verify( - tc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers'}], - ex_ops=[{'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, - {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/1'}, - {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/2'}, - {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/3'}]) - - def test_generate__empty_list_missing__add_whole_list(self): - self.verify( - tc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[]}], - cc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers'}], - ex_ops=[{'op': 'add', 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[]}]) - - def test_generate__empty_list_additional__remove_whole_list(self): - self.verify( - tc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers'}], - cc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[]}], - ex_ops=[{'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers'}]) - - def test_generate__dpb_1_to_4_example(self): - # Arrange - diff = ps.Diff(Files.DPB_1_SPLIT_FULL_CONFIG, Files.DPB_4_SPLITS_FULL_CONFIG) - - # Act - moves = list(self.generator.generate(diff)) - - # Assert - self.verify_moves([{'op': 'replace', 'path': '/PORT/Ethernet0/alias', 'value': 'Eth1/1'}, - {'op': 'replace', 'path': '/PORT/Ethernet0/lanes', 'value': '65'}, - {'op': 'replace', 'path': '/PORT/Ethernet0/description', 'value': ''}, - {'op': 'replace', 'path': '/PORT/Ethernet0/speed', 'value': '10000'}, - {'op': 'add', 'path': '/PORT/Ethernet1', 'value': {'alias': 'Eth1/2'}}, - {'op': 'add', 'path': '/PORT/Ethernet1', 'value': {'lanes': '66'}}, - {'op': 'add', 'path': '/PORT/Ethernet1', 'value': {'description': ''}}, - {'op': 'add', 'path': '/PORT/Ethernet1', 'value': {'speed': '10000'}}, - {'op': 'add', 'path': '/PORT/Ethernet2', 'value': {'alias': 'Eth1/3'}}, - {'op': 'add', 'path': '/PORT/Ethernet2', 'value': {'lanes': '67'}}, - {'op': 'add', 'path': '/PORT/Ethernet2', 'value': {'description': ''}}, - {'op': 'add', 'path': '/PORT/Ethernet2', 'value': {'speed': '10000'}}, - {'op': 'add', 'path': '/PORT/Ethernet3', 'value': {'alias': 'Eth1/4'}}, - {'op': 'add', 'path': '/PORT/Ethernet3', 'value': {'lanes': '68'}}, - {'op': 'add', 'path': '/PORT/Ethernet3', 'value': {'description': ''}}, - {'op': 'add', 'path': '/PORT/Ethernet3', 'value': {'speed': '10000'}}, - {'op': 'add', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/1', 'value': 'Ethernet1'}, - {'op': 'add', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/1', 'value': 'Ethernet2'}, - {'op': 'add', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/1', 'value': 'Ethernet3'}, - {'op': 'add', 'path': '/VLAN_MEMBER/Vlan100|Ethernet1', 'value': {'tagging_mode': 'untagged'}}, - {'op': 'add', 'path': '/VLAN_MEMBER/Vlan100|Ethernet2', 'value': {'tagging_mode': 'untagged'}}, - {'op': 'add', 'path': '/VLAN_MEMBER/Vlan100|Ethernet3', 'value': {'tagging_mode': 'untagged'}}], - moves) - - def test_generate__dpb_4_to_1_example(self): - # Arrange - diff = ps.Diff(Files.DPB_4_SPLITs_FULL_CONFIG, Files.DPB_1_SPLIT_FULL_CONFIG) - - # Act - moves = list(self.generator.generate(diff)) - - # Assert - self.verify_moves([{'op': 'replace', 'path': '/PORT/Ethernet0/alias', 'value': 'Eth1'}, - {'op': 'replace', 'path': '/PORT/Ethernet0/lanes', 'value': '65, 66, 67, 68'}, - {'op': 'replace', 'path': '/PORT/Ethernet0/description', 'value': 'Ethernet0 100G link'}, - {'op': 'replace', 'path': '/PORT/Ethernet0/speed', 'value': '100000'}, - {'op': 'remove', 'path': '/PORT/Ethernet1/alias'}, - {'op': 'remove', 'path': '/PORT/Ethernet1/lanes'}, - {'op': 'remove', 'path': '/PORT/Ethernet1/description'}, - {'op': 'remove', 'path': '/PORT/Ethernet1/speed'}, - {'op': 'remove', 'path': '/PORT/Ethernet2/alias'}, - {'op': 'remove', 'path': '/PORT/Ethernet2/lanes'}, - {'op': 'remove', 'path': '/PORT/Ethernet2/description'}, - {'op': 'remove', 'path': '/PORT/Ethernet2/speed'}, - {'op': 'remove', 'path': '/PORT/Ethernet3/alias'}, - {'op': 'remove', 'path': '/PORT/Ethernet3/lanes'}, - {'op': 'remove', 'path': '/PORT/Ethernet3/description'}, - {'op': 'remove', 'path': '/PORT/Ethernet3/speed'}, - {'op': 'remove', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/1'}, - {'op': 'remove', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/2'}, - {'op': 'remove', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/3'}, - {'op': 'remove', 'path': '/VLAN_MEMBER/Vlan100|Ethernet1/tagging_mode'}, - {'op': 'remove', 'path': '/VLAN_MEMBER/Vlan100|Ethernet2/tagging_mode'}, - {'op': 'remove', 'path': '/VLAN_MEMBER/Vlan100|Ethernet3/tagging_mode'}], - moves) - - def verify(self, tc_ops=None, cc_ops=None, ex_ops=None): - """ - Generate a diff where target config is modified using the given tc_ops. - The expected low level moves should ex_ops if it is not None, otherwise tc_ops - """ - # Arrange - diff = self.get_diff(target_config_ops=tc_ops, current_config_ops=cc_ops) - expected = ex_ops if ex_ops is not None else \ - tc_ops if tc_ops is not None else \ - [] - - # Act - actual = self.generator.generate(diff) - - # Assert - self.verify_moves(expected, actual) - - def verify_moves(self, ops, moves): - moves_ops = [list(move.patch)[0] for move in moves] - self.assertCountEqual(ops, moves_ops) - - def get_diff(self, target_config_ops = None, current_config_ops = None): - current_config = Files.CROPPED_CONFIG_DB_AS_JSON - if current_config_ops: - cc_patch = jsonpatch.JsonPatch(current_config_ops) - current_config = cc_patch.apply(current_config) - - target_config = Files.CROPPED_CONFIG_DB_AS_JSON - if target_config_ops: - tc_patch = jsonpatch.JsonPatch(target_config_ops) - target_config = tc_patch.apply(target_config) - - return ps.Diff(current_config, target_config) - -class TestUpperLevelMoveExtender(unittest.TestCase): - def setUp(self): - self.extender = ps.UpperLevelMoveExtender() - self.any_diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_CONFIG_DB) - - def test_extend__root_level_move__no_extended_moves(self): - self.verify(OperationType.REMOVE, []) - self.verify(OperationType.ADD, [], []) - self.verify(OperationType.REPLACE, [], []) - - def test_extend__remove_key_upper_level_does_not_exist__remove_upper_level(self): - self.verify(OperationType.REMOVE, - ["ACL_TABLE", "EVERFLOW", "policy_desc"], - tc_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW'}], - ex_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW'}]) - - def test_extend__remove_key_upper_level_does_exist__replace_upper_level(self): - self.verify(OperationType.REMOVE, - ["ACL_TABLE", "EVERFLOW", "policy_desc"], - tc_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW/policy_desc'}], - ex_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW', 'value':{ - "ports": [ - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRROR" - }}]) - - def test_extend__remove_list_item_upper_level_does_not_exist__remove_upper_level(self): - self.verify(OperationType.REMOVE, - ["VLAN", "Vlan1000", "dhcp_servers", 1], - tc_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers'}], - ex_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers'}]) - - def test_extend__remove_list_item_upper_level_does_exist__replace_upper_level(self): - self.verify(OperationType.REMOVE, - ["VLAN", "Vlan1000", "dhcp_servers", 1], - tc_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers/1'}], - ex_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers', 'value':[ - "192.0.0.1", - "192.0.0.3", - "192.0.0.4" - ]}]) - - def test_extend__add_key_upper_level_missing__add_upper_level(self): - self.verify(OperationType.ADD, - ["ACL_TABLE", "EVERFLOW", "policy_desc"], - ["ACL_TABLE", "EVERFLOW", "policy_desc"], - cc_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW'}], - ex_ops=[{'op':'add', 'path':'/ACL_TABLE/EVERFLOW', 'value':{ - "policy_desc": "EVERFLOW", - "ports": [ - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRROR" - }}]) - - def test_extend__add_key_upper_level_exist__replace_upper_level(self): - self.verify(OperationType.ADD, - ["ACL_TABLE", "EVERFLOW", "policy_desc"], - ["ACL_TABLE", "EVERFLOW", "policy_desc"], - cc_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW/policy_desc'}], - ex_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW', 'value':{ - "policy_desc": "EVERFLOW", - "ports": [ - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRROR" - }}]) - - def test_extend__add_list_item_upper_level_missing__add_upper_level(self): - self.verify(OperationType.ADD, - ["VLAN", "Vlan1000", "dhcp_servers", 1], - ["VLAN", "Vlan1000", "dhcp_servers", 1], - cc_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers'}], - ex_ops=[{'op':'add', 'path':'/VLAN/Vlan1000/dhcp_servers', 'value':[ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ]}]) - - def test_extend__add_list_item_upper_level_exist__replace_upper_level(self): - self.verify(OperationType.ADD, - ["VLAN", "Vlan1000", "dhcp_servers", 1], - ["VLAN", "Vlan1000", "dhcp_servers", 1], - cc_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers/1'}], - ex_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers', 'value':[ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ]}]) - - def test_extend__add_table__replace_whole_config(self): - self.verify(OperationType.ADD, - ["ACL_TABLE"], - ["ACL_TABLE"], - cc_ops=[{'op':'remove', 'path':'/ACL_TABLE'}], - ex_ops=[{'op':'replace', 'path':'', 'value':Files.CROPPED_CONFIG_DB_AS_JSON}]) - - def test_extend__replace_key__replace_upper_level(self): - self.verify(OperationType.REPLACE, - ["ACL_TABLE", "EVERFLOW", "policy_desc"], - ["ACL_TABLE", "EVERFLOW", "policy_desc"], - cc_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW/policy_desc', 'value':'old_desc'}], - ex_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW', 'value':{ - "policy_desc": "EVERFLOW", - "ports": [ - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRROR" - }}]) - - def test_extend__replace_list_item__replace_upper_level(self): - self.verify(OperationType.REPLACE, - ["VLAN", "Vlan1000", "dhcp_servers", 1], - ["VLAN", "Vlan1000", "dhcp_servers", 1], - cc_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers/1', 'value':'192.0.0.7'}], - ex_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers', 'value':[ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ]}]) - - def test_extend__replace_table__replace_whole_config(self): - self.verify(OperationType.REPLACE, - ["VLAN"], - ["VLAN"], - cc_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers/1', 'value':'192.0.0.7'}], - ex_ops=[{'op':'replace', 'path':'', 'value':Files.CROPPED_CONFIG_DB_AS_JSON}]) - - def verify(self, op_type, ctokens, ttokens=None, cc_ops=[], tc_ops=[], ex_ops=[]): - """ - cc_ops, tc_ops are used to build the diff object. - diff, op_type, ctokens, ttokens are used to build the move. - move is extended and the result should match ex_ops. - """ - # Arrange - current_config=jsonpatch.JsonPatch(cc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) - target_config=jsonpatch.JsonPatch(tc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, op_type, ctokens, ttokens) - - # Act - moves = self.extender.extend(move, diff) - - # Assert - self.verify_moves(ex_ops, moves) - - def verify_moves(self, ex_ops, moves): - moves_ops = [list(move.patch)[0] for move in moves] - self.assertCountEqual(ex_ops, moves_ops) - -class TestDeleteInsteadOfReplaceMoveExtender(unittest.TestCase): - def setUp(self): - self.extender = ps.DeleteInsteadOfReplaceMoveExtender() - - def test_extend__non_replace__no_extended_moves(self): - self.verify(OperationType.REMOVE, - ["ACL_TABLE"], - tc_ops=[{'op':'remove', 'path':'/ACL_TABLE'}], - ex_ops=[]) - self.verify(OperationType.ADD, - ["ACL_TABLE"], - ["ACL_TABLE"], - cc_ops=[{'op':'remove', 'path':'/ACL_TABLE'}], - ex_ops=[]) - - def test_extend__replace_key__delete_key(self): - self.verify(OperationType.REPLACE, - ["ACL_TABLE", "EVERFLOW", "policy_desc"], - ["ACL_TABLE", "EVERFLOW", "policy_desc"], - cc_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW/policy_desc', 'value':'old_desc'}], - ex_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW/policy_desc'}]) - - def test_extend__replace_list_item__delete_list_item(self): - self.verify(OperationType.REPLACE, - ["VLAN", "Vlan1000", "dhcp_servers", 1], - ["VLAN", "Vlan1000", "dhcp_servers", 1], - cc_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers/1', 'value':'192.0.0.7'}], - ex_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers/1'}]) - - def test_extend__replace_table__delete_table(self): - self.verify(OperationType.REPLACE, - ["ACL_TABLE"], - ["ACL_TABLE"], - cc_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW/policy_desc', 'value':'old_desc'}], - ex_ops=[{'op':'remove', 'path':'/ACL_TABLE'}]) - - def test_extend__replace_whole_config__delete_whole_config(self): - self.verify(OperationType.REPLACE, - [], - [], - cc_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW/policy_desc', 'value':'old_desc'}], - ex_ops=[{'op':'remove', 'path':''}]) - - def verify(self, op_type, ctokens, ttokens=None, cc_ops=[], tc_ops=[], ex_ops=[]): - """ - cc_ops, tc_ops are used to build the diff object. - diff, op_type, ctokens, ttokens are used to build the move. - move is extended and the result should match ex_ops. - """ - # Arrange - current_config=jsonpatch.JsonPatch(cc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) - target_config=jsonpatch.JsonPatch(tc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, op_type, ctokens, ttokens) - - # Act - moves = self.extender.extend(move, diff) - - # Assert - self.verify_moves(ex_ops, moves) - - def verify_moves(self, ex_ops, moves): - moves_ops = [list(move.patch)[0] for move in moves] - self.assertCountEqual(ex_ops, moves_ops) - -class DeleteRefsMoveExtender(unittest.TestCase): - def setUp(self): - self.extender = ps.DeleteRefsMoveExtender(PathAddressing()) - - def test_extend__non_delete_ops__no_extended_moves(self): - self.verify(OperationType.ADD, - ["ACL_TABLE"], - ["ACL_TABLE"], - cc_ops=[{'op':'remove', 'path':'/ACL_TABLE'}], - ex_ops=[]) - self.verify(OperationType.REPLACE, - ["ACL_TABLE"], - ["ACL_TABLE"], - cc_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW'}], - ex_ops=[]) - - def test_extend__path_with_no_refs__no_extended_moves(self): - self.verify(OperationType.REMOVE, - ["ACL_TABLE"], - tc_ops=[{'op':'remove', 'path':'/ACL_TABLE'}], - ex_ops=[]) - - def test_extend__path_with_direct_refs__extended_moves(self): - self.verify(OperationType.REMOVE, - ["PORT", "Ethernet0"], - tc_ops=[{'op':'remove', 'path':'/PORT/Ethernet0'}], - ex_ops=[{'op': 'remove', 'path': '/VLAN_MEMBER/Vlan1000|Ethernet0'}, - {'op': 'remove', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/0'}]) - - def test_extend__path_with_refs_to_children__extended_moves(self): - self.verify(OperationType.REMOVE, - ["PORT"], - tc_ops=[{'op':'remove', 'path':'/PORT/Ethernet0'}], - ex_ops=[{'op': 'remove', 'path': '/VLAN_MEMBER/Vlan1000|Ethernet0'}, - {'op': 'remove', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/0'}, - {'op': 'remove', 'path': '/VLAN_MEMBER/Vlan1000|Ethernet4'}, - {'op': 'remove', 'path': '/ACL_TABLE/DATAACL/ports/0'}, - {'op': 'remove', 'path': '/VLAN_MEMBER/Vlan1000|Ethernet8'}, - {'op': 'remove', 'path': '/ACL_TABLE/EVERFLOWV6/ports/0'}, - {'op': 'remove', 'path': '/ACL_TABLE/EVERFLOW/ports/0'}, - {'op': 'remove', 'path': '/ACL_TABLE/EVERFLOWV6/ports/1'}]) - - def verify(self, op_type, ctokens, ttokens=None, cc_ops=[], tc_ops=[], ex_ops=[]): - """ - cc_ops, tc_ops are used to build the diff object. - diff, op_type, ctokens, ttokens are used to build the move. - move is extended and the result should match ex_ops. - """ - # Arrange - current_config=jsonpatch.JsonPatch(cc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) - target_config=jsonpatch.JsonPatch(tc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, op_type, ctokens, ttokens) - - # Act - moves = self.extender.extend(move, diff) - - # Assert - self.verify_moves(ex_ops, moves) - - def verify_moves(self, ex_ops, moves): - moves_ops = [list(move.patch)[0] for move in moves] - self.assertCountEqual(ex_ops, moves_ops) - -class TestSortAlgorithmFactory(unittest.TestCase): - def test_dfs_sorter(self): - self.verify(ps.Algorithm.DFS, ps.DfsSorter) - - def test_bfs_sorter(self): - self.verify(ps.Algorithm.BFS, ps.BfsSorter) - - def test_memoization_sorter(self): - self.verify(ps.Algorithm.MEMOIZATION, ps.MemoizationSorter) - - def verify(self, algo, algo_class): - # Arrange - factory = ps.SortAlgorithmFactory(OperationWrapper(), ConfigWrapper(), PathAddressing()) - expected_generators = [ps.LowLevelMoveGenerator] - expected_extenders = [ps.UpperLevelMoveExtender, ps.DeleteInsteadOfReplaceMoveExtender, ps.DeleteRefsMoveExtender] - expected_validator = [ps.DeleteWholeConfigMoveValidator, - ps.FullConfigMoveValidator, - ps.NoDependencyMoveValidator, - ps.UniqueLanesMoveValidator, - ps.CreateOnlyMoveValidator] - - # Act - sorter = factory.create(algo) - actual_generators = [type(item) for item in sorter.move_wrapper.move_generators] - actual_extenders = [type(item) for item in sorter.move_wrapper.move_extenders] - actual_validators = [type(item) for item in sorter.move_wrapper.move_validators] - - # Assert - self.assertIsInstance(sorter, algo_class) - self.assertCountEqual(expected_generators, actual_generators) - self.assertCountEqual(expected_extenders, actual_extenders) - self.assertCountEqual(expected_validator, actual_validators) - -class TestPatchSorter(unittest.TestCase): - def create_patch_sorter(self, config=None): - if config is None: - config=Files.CROPPED_CONFIG_DB_AS_JSON - config_wrapper = ConfigWrapper() - config_wrapper.get_config_db_as_json = MagicMock(return_value=config) - patch_wrapper = PatchWrapper(config_wrapper) - operation_wrapper = OperationWrapper() - path_addressing= ps.PathAddressing() - sort_algorithm_factory = ps.SortAlgorithmFactory(operation_wrapper, config_wrapper, path_addressing) - - return ps.PatchSorter(config_wrapper, patch_wrapper, sort_algorithm_factory) - - def test_sort__empty_patch__returns_empty_changes_list(self): - # Arrange - patch = jsonpatch.JsonPatch([]) - expected = [] - - # Act - actual = self.create_patch_sorter().sort(patch) - - # Assert - self.assertCountEqual(expected, actual) - - def test_sort__patch_with_single_simple_operation__returns_one_change(self): - # Arrange - patch = jsonpatch.JsonPatch([{"op":"remove", "path":"/VLAN/Vlan1000/dhcp_servers/0"}]) - expected = [JsonChange(patch)] - - # Act - actual = self.create_patch_sorter().sort(patch) - - # Assert - self.assertCountEqual(expected, actual) - - def test_sort__replacing_create_only_field__success(self): - # Arrange - patch = jsonpatch.JsonPatch([{"op":"replace", "path": "/PORT/Ethernet0/lanes", "value":"67"}]) - - # Act - actual = self.create_patch_sorter(Files.DPB_1_SPLIT_FULL_CONFIG).sort(patch) - - # Assert - self.assertNotEqual(None, actual) - - def test_sort__inter_dependency_within_same_table__success(self): - # Arrange - patch = jsonpatch.JsonPatch([{"op":"add", "path":"/VLAN_INTERFACE", "value": { - "Vlan1000|fc02:1000::1/64": {}, - "Vlan1000|192.168.0.1/21": {}, - "Vlan1000": {} - }}]) - expected = [ - JsonChange(jsonpatch.JsonPatch([{"op": "add", "path": "/VLAN_INTERFACE", "value": {"Vlan1000": {}}}])), - JsonChange(jsonpatch.JsonPatch([{"op": "add", "path": "/VLAN_INTERFACE/Vlan1000|fc02:1000::1~164", "value": {}}])), - JsonChange(jsonpatch.JsonPatch([{"op": "add", "path": "/VLAN_INTERFACE/Vlan1000|192.168.0.1~121", "value": {}}])) - ] - - # Act - actual = self.create_patch_sorter().sort(patch) - - # Assert - self.assertListEqual(expected, actual) - - def test_sort__add_table__success(self): - self.verify(cc_ops=[{"op":"remove", "path":"/ACL_TABLE"}]) - - def test_sort__remove_table__success(self): - self.verify(tc_ops=[{"op":"remove", "path":"/ACL_TABLE"}]) - - def test_sort__modify_value_in_existing_table__success(self): - self.verify(tc_ops=[{"op":"replace", "path":"/ACL_TABLE/EVERFLOW/stage", "value":"egress"}]) - - def test_sort__modify_value_in_existing_array__success(self): - self.verify(tc_ops=[{"op":"replace", "path":"/ACL_TABLE/EVERFLOWV6/ports/0", "value":"Ethernet0"}]) - - def test_sort__add_value_to_existing_array__success(self): - self.verify(tc_ops=[{"op":"add", "path":"/ACL_TABLE/EVERFLOWV6/ports/0", "value":"Ethernet0"}]) - - def test_sort__add_new_key_to_existing_table__success(self): - self.verify(cc_ops=[{"op":"remove", "path":"/ACL_TABLE/EVERFLOWV6"}]) - - def test_sort__remove_2_items_with_dependency_from_different_tables__success(self): - self.verify(tc_ops=[{"op":"remove", "path":"/PORT/Ethernet0"}, - {"op":"remove", "path":"/VLAN_MEMBER/Vlan1000|Ethernet0"}, - {"op":"remove", "path":"/ACL_TABLE/NO-NSW-PACL-V4"}], # removing ACL from current and target - cc_ops=[{"op":"remove", "path":"/ACL_TABLE/NO-NSW-PACL-V4"}]) - - def test_sort__add_2_items_with_dependency_from_different_tables__success(self): - self.verify(tc_ops=[{"op":"remove", "path":"/ACL_TABLE/NO-NSW-PACL-V4"}], # removing ACL from current and target - cc_ops=[{"op":"remove", "path":"/PORT/Ethernet0"}, - {"op":"remove", "path":"/VLAN_MEMBER/Vlan1000|Ethernet0"}, - {"op":"remove", "path":"/ACL_TABLE/NO-NSW-PACL-V4"}]) - - def test_sort__remove_2_items_with_dependency_from_same_table__success(self): - self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_INTERFACE}, - {"op":"remove", "path":"/INTERFACE/Ethernet8"}, - {"op":"remove", "path":"/INTERFACE/Ethernet8|10.0.0.1~130"}], - cc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_INTERFACE}]) - - def test_sort__add_2_items_with_dependency_from_same_table__success(self): - self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_INTERFACE}], - cc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_INTERFACE}, - {"op":"remove", "path":"/INTERFACE/Ethernet8"}, - {"op":"remove", "path":"/INTERFACE/Ethernet8|10.0.0.1~130"}]) - - def test_sort__replace_mandatory_item__success(self): - self.verify(tc_ops=[{"op":"replace", "path":"/ACL_TABLE/EVERFLOWV6/type", "value":"L2"}]) - - def test_sort__dpb_1_to_4__success(self): - self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.DPB_4_SPLITS_FULL_CONFIG}], - cc_ops=[{"op":"replace", "path":"", "value":Files.DPB_1_SPLIT_FULL_CONFIG}]) - - def test_sort__dpb_4_to_1__success(self): - self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.DPB_1_SPLIT_FULL_CONFIG}], - cc_ops=[{"op":"replace", "path":"", "value":Files.DPB_4_SPLITS_FULL_CONFIG}]) - - def test_sort__remove_an_item_with_default_value__success(self): - self.verify(tc_ops=[{"op":"remove", "path":"/ACL_TABLE/EVERFLOW/stage"}]) - - def test_sort__modify_items_with_dependencies_using_must__success(self): - self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_CRM}, - {"op":"replace", "path":"/CRM/Config/acl_counter_high_threshold", "value":"60"}, - {"op":"replace", "path":"/CRM/Config/acl_counter_low_threshold", "value":"50"}], - cc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_CRM}]) - - # in the following example, it is possible to start with acl_counter_high_threshold - self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_CRM}, - {"op":"replace", "path":"/CRM/Config/acl_counter_high_threshold", "value":"80"}, - {"op":"replace", "path":"/CRM/Config/acl_counter_low_threshold", "value":"60"}], - cc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_CRM}]) - - def verify(self, cc_ops=[], tc_ops=[]): - # Arrange - config_wrapper=ConfigWrapper() - target_config=jsonpatch.JsonPatch(tc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) - current_config=jsonpatch.JsonPatch(cc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) - patch=jsonpatch.make_patch(current_config, target_config) - - # Act - actual = self.create_patch_sorter(current_config).sort(patch) - - # Assert - simulated_config = current_config - for move in actual: - simulated_config = move.apply(simulated_config) - self.assertTrue(config_wrapper.validate_config_db_config(simulated_config)) - self.assertEqual(target_config, simulated_config) From 6bfb465e1d55a1e112850063fd473bcd34ee7a9e Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Wed, 11 Aug 2021 19:02:03 +0000 Subject: [PATCH 11/60] Added setup.py Signed-off-by: Vivek Reddy Karri --- setup.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 963c9af9b1..1b8bda9985 100644 --- a/setup.py +++ b/setup.py @@ -147,7 +147,9 @@ def run_tests(self): 'scripts/watermarkcfg', 'scripts/sonic-kdump-config', 'scripts/centralize_database', - 'scripts/null_route_helper' + 'scripts/null_route_helper', + 'scripts/coredump_gen_handler', + 'scripts/techsupport_cleanup' ], entry_points={ 'console_scripts': [ @@ -235,6 +237,5 @@ def run_tests(self): 'Topic :: Utilities', ], keywords='sonic SONiC utilities command line cli CLI', - cmdclass={"pytest": PyTest}, test_suite='setup.get_test_suite' ) From eba8261bcbf84f65c7320a1cd8a0eb9cbca4e61a Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Wed, 11 Aug 2021 23:57:29 +0000 Subject: [PATCH 12/60] Revert "Removed a few tests" This reverts commit 7def4b78f40496e8b9797087e30698dfea8c1eca. --- tests/cli_autogen_yang_parser_test.py | 196 ++ tests/generic_config_updater/__init__.py | 0 .../files/any_config_db.json | 2 + .../files/any_other_config_db.json | 4 + .../files/config_db_after_multi_patch.json | 122 ++ .../config_db_after_single_operation.json | 83 + .../files/config_db_as_json.json | 92 + .../files/config_db_as_json_invalid.json | 7 + .../files/config_db_choice.json | 17 + .../files/config_db_no_dependencies.json | 39 + .../files/config_db_with_crm.json | 9 + .../files/config_db_with_device_metadata.json | 16 + .../files/config_db_with_interface.json | 20 + .../config_db_with_portchannel_and_acl.json | 25 + .../config_db_with_portchannel_interface.json | 10 + .../contrainer_with_container_config_db.json | 7 + .../files/cropped_config_db_as_json.json | 86 + .../files/dpb_1_split_full_config.json | 35 + .../files/dpb_1_to_4.json-patch | 88 + .../files/dpb_4_splits_full_config.json | 65 + .../files/dpb_4_to_1.json-patch | 58 + .../files/empty_config_db.json | 2 + ...multi_operation_config_db_patch.json-patch | 88 + ...ulti_operation_sonic_yang_patch.json-patch | 97 + .../files/simple_config_db_inc_deps.json | 20 + ...ingle_operation_config_db_patch.json-patch | 6 + ...ngle_operation_sonic_yang_patch.json-patch | 6 + .../files/sonic_yang_after_multi_patch.json | 153 ++ .../files/sonic_yang_as_json.json | 114 ++ .../files/sonic_yang_as_json_invalid.json | 13 + ...c_yang_as_json_with_unexpected_colons.json | 114 ++ .../sonic_yang_as_json_without_colons.json | 114 ++ .../generic_updater_test.py | 766 ++++++++ .../generic_config_updater/gu_common_test.py | 635 ++++++ .../generic_config_updater/gutest_helpers.py | 53 + .../patch_sorter_test.py | 1730 +++++++++++++++++ 36 files changed, 4892 insertions(+) create mode 100644 tests/cli_autogen_yang_parser_test.py create mode 100644 tests/generic_config_updater/__init__.py create mode 100644 tests/generic_config_updater/files/any_config_db.json create mode 100644 tests/generic_config_updater/files/any_other_config_db.json create mode 100644 tests/generic_config_updater/files/config_db_after_multi_patch.json create mode 100644 tests/generic_config_updater/files/config_db_after_single_operation.json create mode 100644 tests/generic_config_updater/files/config_db_as_json.json create mode 100644 tests/generic_config_updater/files/config_db_as_json_invalid.json create mode 100644 tests/generic_config_updater/files/config_db_choice.json create mode 100644 tests/generic_config_updater/files/config_db_no_dependencies.json create mode 100644 tests/generic_config_updater/files/config_db_with_crm.json create mode 100644 tests/generic_config_updater/files/config_db_with_device_metadata.json create mode 100644 tests/generic_config_updater/files/config_db_with_interface.json create mode 100644 tests/generic_config_updater/files/config_db_with_portchannel_and_acl.json create mode 100644 tests/generic_config_updater/files/config_db_with_portchannel_interface.json create mode 100644 tests/generic_config_updater/files/contrainer_with_container_config_db.json create mode 100644 tests/generic_config_updater/files/cropped_config_db_as_json.json create mode 100644 tests/generic_config_updater/files/dpb_1_split_full_config.json create mode 100644 tests/generic_config_updater/files/dpb_1_to_4.json-patch create mode 100644 tests/generic_config_updater/files/dpb_4_splits_full_config.json create mode 100644 tests/generic_config_updater/files/dpb_4_to_1.json-patch create mode 100644 tests/generic_config_updater/files/empty_config_db.json create mode 100644 tests/generic_config_updater/files/multi_operation_config_db_patch.json-patch create mode 100644 tests/generic_config_updater/files/multi_operation_sonic_yang_patch.json-patch create mode 100644 tests/generic_config_updater/files/simple_config_db_inc_deps.json create mode 100644 tests/generic_config_updater/files/single_operation_config_db_patch.json-patch create mode 100644 tests/generic_config_updater/files/single_operation_sonic_yang_patch.json-patch create mode 100644 tests/generic_config_updater/files/sonic_yang_after_multi_patch.json create mode 100644 tests/generic_config_updater/files/sonic_yang_as_json.json create mode 100644 tests/generic_config_updater/files/sonic_yang_as_json_invalid.json create mode 100644 tests/generic_config_updater/files/sonic_yang_as_json_with_unexpected_colons.json create mode 100644 tests/generic_config_updater/files/sonic_yang_as_json_without_colons.json create mode 100644 tests/generic_config_updater/generic_updater_test.py create mode 100644 tests/generic_config_updater/gu_common_test.py create mode 100644 tests/generic_config_updater/gutest_helpers.py create mode 100644 tests/generic_config_updater/patch_sorter_test.py diff --git a/tests/cli_autogen_yang_parser_test.py b/tests/cli_autogen_yang_parser_test.py new file mode 100644 index 0000000000..9ed915c69b --- /dev/null +++ b/tests/cli_autogen_yang_parser_test.py @@ -0,0 +1,196 @@ +import os +import logging +import pprint + +from sonic_cli_gen.yang_parser import YangParser +from .cli_autogen_input import assert_dictionaries + +logger = logging.getLogger(__name__) + +test_path = os.path.dirname(os.path.abspath(__file__)) +yang_models_path = '/usr/local/yang-models' +test_yang_models = [ + 'sonic-1-table-container', + 'sonic-2-table-containers', + 'sonic-1-object-container', + 'sonic-2-object-containers', + 'sonic-1-list', + 'sonic-2-lists', + 'sonic-static-object-complex-1', + 'sonic-static-object-complex-2', + 'sonic-dynamic-object-complex-1', + 'sonic-dynamic-object-complex-2', + 'sonic-choice-complex', + 'sonic-grouping-complex', + 'sonic-grouping-1', + 'sonic-grouping-2', +] + + +class TestYangParser: + @classmethod + def setup_class(cls): + logger.info("SETUP") + os.environ['UTILITIES_UNIT_TESTING'] = "1" + move_yang_models() + + @classmethod + def teardown_class(cls): + logger.info("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + remove_yang_models() + + def test_1_table_container(self): + """ Test for 1 'table' container + 'table' container represent TABLE in Config DB schema: + { + "TABLE": { + "OBJECT": { + "attr": "value" + ... + } + } + } + """ + + base_test('sonic-1-table-container', + assert_dictionaries.one_table_container) + + def test_2_table_containers(self): + """ Test for 2 'table' containers """ + + base_test('sonic-2-table-containers', + assert_dictionaries.two_table_containers) + + def test_1_object_container(self): + """ Test for 1 'object' container + 'object' container represent OBJECT in Config DB schema: + { + "TABLE": { + "OBJECT": { + "attr": "value" + ... + } + } + } + """ + + base_test('sonic-1-object-container', + assert_dictionaries.one_object_container) + + def test_2_object_containers(self): + """ Test for 2 'object' containers """ + + base_test('sonic-2-object-containers', + assert_dictionaries.two_object_containers) + + def test_1_list(self): + """ Test for 1 container that has inside + the YANG 'list' entity + """ + + base_test('sonic-1-list', assert_dictionaries.one_list) + + def test_2_lists(self): + """ Test for 2 containers that have inside + the YANG 'list' entity + """ + + base_test('sonic-2-lists', assert_dictionaries.two_lists) + + def test_static_object_complex_1(self): + """ Test for the object container with: + 1 leaf, 1 leaf-list, 1 choice. + """ + + base_test('sonic-static-object-complex-1', + assert_dictionaries.static_object_complex_1) + + def test_static_object_complex_2(self): + """ Test for object container with: + 2 leafs, 2 leaf-lists, 2 choices. + """ + + base_test('sonic-static-object-complex-2', + assert_dictionaries.static_object_complex_2) + + def test_dynamic_object_complex_1(self): + """ Test for object container with: + 1 key, 1 leaf, 1 leaf-list, 1 choice. + """ + + base_test('sonic-dynamic-object-complex-1', + assert_dictionaries.dynamic_object_complex_1) + + def test_dynamic_object_complex_2(self): + """ Test for object container with: + 2 keys, 2 leafs, 2 leaf-list, 2 choice. + """ + + base_test('sonic-dynamic-object-complex-2', + assert_dictionaries.dynamic_object_complex_2) + + def test_choice_complex(self): + """ Test for object container with the 'choice' + that have complex strucutre: + leafs, leaf-lists, multiple 'uses' from different files + """ + + base_test('sonic-choice-complex', + assert_dictionaries.choice_complex) + + def test_grouping_complex(self): + """ Test for object container with multitple 'uses' that using 'grouping' + from different files. The used 'grouping' have a complex structure: + leafs, leaf-lists, choices + """ + + base_test('sonic-grouping-complex', + assert_dictionaries.grouping_complex) + + +def base_test(yang_model_name, correct_dict): + """ General logic for each test case """ + + config_db_path = os.path.join(test_path, + 'cli_autogen_input/config_db.json') + parser = YangParser(yang_model_name=yang_model_name, + config_db_path=config_db_path, + allow_tbl_without_yang=True, + debug=False) + yang_dict = parser.parse_yang_model() + pretty_log_debug(yang_dict) + assert yang_dict == correct_dict + + +def move_yang_models(): + """ Move a test YANG models to known location + in order to be parsed by YangParser class + """ + + for yang_model in test_yang_models: + src_path = os.path.join(test_path, + 'cli_autogen_input', + yang_model + '.yang') + cmd = 'sudo cp {} {}'.format(src_path, yang_models_path) + os.system(cmd) + + +def remove_yang_models(): + """ Remove a test YANG models to known location + in order to be parsed by YangParser class + """ + + for yang_model in test_yang_models: + yang_model_path = os.path.join(yang_models_path, + yang_model + '.yang') + cmd = 'sudo rm {}'.format(yang_model_path) + os.system(cmd) + + +def pretty_log_debug(dictionary): + """ Pretty print of parsed dictionary """ + + for line in pprint.pformat(dictionary).split('\n'): + logging.debug(line) + diff --git a/tests/generic_config_updater/__init__.py b/tests/generic_config_updater/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/generic_config_updater/files/any_config_db.json b/tests/generic_config_updater/files/any_config_db.json new file mode 100644 index 0000000000..2c63c08510 --- /dev/null +++ b/tests/generic_config_updater/files/any_config_db.json @@ -0,0 +1,2 @@ +{ +} diff --git a/tests/generic_config_updater/files/any_other_config_db.json b/tests/generic_config_updater/files/any_other_config_db.json new file mode 100644 index 0000000000..c258f768cf --- /dev/null +++ b/tests/generic_config_updater/files/any_other_config_db.json @@ -0,0 +1,4 @@ +{ + "VLAN": { + } +} diff --git a/tests/generic_config_updater/files/config_db_after_multi_patch.json b/tests/generic_config_updater/files/config_db_after_multi_patch.json new file mode 100644 index 0000000000..39dff7d688 --- /dev/null +++ b/tests/generic_config_updater/files/config_db_after_multi_patch.json @@ -0,0 +1,122 @@ +{ + "VLAN_MEMBER": { + "Vlan1000|Ethernet0": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet4": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet8": { + "tagging_mode": "untagged" + }, + "Vlan100|Ethernet2": { + "tagging_mode": "untagged" + }, + "Vlan100|Ethernet3": { + "tagging_mode": "untagged" + }, + "Vlan100|Ethernet1": { + "tagging_mode": "untagged" + } + }, + "VLAN": { + "Vlan1000": { + "vlanid": "1000", + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + }, + "ACL_TABLE": { + "NO-NSW-PACL-V4": { + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0", + "Ethernet1", + "Ethernet2", + "Ethernet3" + ] + }, + "DATAACL": { + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + "EVERFLOW": { + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }, + "EVERFLOWV6": { + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet4", + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + }, + "PORT": { + "Ethernet0": { + "alias": "Eth1/1", + "lanes": "65", + "description": "", + "speed": "10000" + }, + "Ethernet4": { + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": "1", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet8": { + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": "2", + "lanes": "33,34,35,36", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet3": { + "alias": "Eth1/4", + "lanes": "68", + "description": "", + "speed": "10000" + }, + "Ethernet1": { + "alias": "Eth1/2", + "lanes": "66", + "description": "", + "speed": "10000" + }, + "Ethernet2": { + "alias": "Eth1/3", + "lanes": "67", + "description": "", + "speed": "10000" + } + }, + "TABLE_WITHOUT_YANG": { + "Item1": { + "key11": "value11", + "key12": "value12" + } + } +} diff --git a/tests/generic_config_updater/files/config_db_after_single_operation.json b/tests/generic_config_updater/files/config_db_after_single_operation.json new file mode 100644 index 0000000000..0f2f447537 --- /dev/null +++ b/tests/generic_config_updater/files/config_db_after_single_operation.json @@ -0,0 +1,83 @@ +{ + "VLAN_MEMBER": { + "Vlan1000|Ethernet0": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet4": { + "tagging_mode": "untagged" + } + }, + "VLAN": { + "Vlan1000": { + "vlanid": "1000", + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + }, + "ACL_TABLE": { + "NO-NSW-PACL-V4": { + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0" + ] + }, + "DATAACL": { + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + "EVERFLOW": { + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }, + "EVERFLOWV6": { + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet4", + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + }, + "PORT": { + "Ethernet0": { + "alias": "Eth1", + "lanes": "65, 66, 67, 68", + "description": "Ethernet0 100G link", + "speed": "100000" + }, + "Ethernet4": { + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": "1", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet8": { + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": "2", + "lanes": "33,34,35,36", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + } + } +} diff --git a/tests/generic_config_updater/files/config_db_as_json.json b/tests/generic_config_updater/files/config_db_as_json.json new file mode 100644 index 0000000000..02fb7c7e6a --- /dev/null +++ b/tests/generic_config_updater/files/config_db_as_json.json @@ -0,0 +1,92 @@ +{ + "VLAN_MEMBER": { + "Vlan1000|Ethernet0": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet4": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet8": { + "tagging_mode": "untagged" + } + }, + "VLAN": { + "Vlan1000": { + "vlanid": "1000", + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + }, + "ACL_TABLE": { + "NO-NSW-PACL-V4": { + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0" + ] + }, + "DATAACL": { + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + "EVERFLOW": { + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }, + "EVERFLOWV6": { + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet4", + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + }, + "PORT": { + "Ethernet0": { + "alias": "Eth1", + "lanes": "65, 66, 67, 68", + "description": "Ethernet0 100G link", + "speed": "100000" + }, + "Ethernet4": { + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": "1", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet8": { + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": "2", + "lanes": "33,34,35,36", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + } + }, + "TABLE_WITHOUT_YANG": { + "Item1": { + "key11": "value11", + "key12": "value12" + } + } +} diff --git a/tests/generic_config_updater/files/config_db_as_json_invalid.json b/tests/generic_config_updater/files/config_db_as_json_invalid.json new file mode 100644 index 0000000000..a2cfdc91df --- /dev/null +++ b/tests/generic_config_updater/files/config_db_as_json_invalid.json @@ -0,0 +1,7 @@ +{ + "VLAN_MEMBER": { + "Vlan1000|Ethernet8": { + "tagging_mode": "untagged" + } + } +} diff --git a/tests/generic_config_updater/files/config_db_choice.json b/tests/generic_config_updater/files/config_db_choice.json new file mode 100644 index 0000000000..eaece3248f --- /dev/null +++ b/tests/generic_config_updater/files/config_db_choice.json @@ -0,0 +1,17 @@ +{ + "ACL_RULE": { + "SSH_ONLY|RULE1": { + "L4_SRC_PORT":"65174-6530" + } + }, + "ACL_TABLE": { + "SSH_ONLY": { + "policy_desc": "SSH_ONLY", + "type": "CTRLPLANE", + "stage": "ingress", + "services": [ + "SSH" + ] + } + } +} diff --git a/tests/generic_config_updater/files/config_db_no_dependencies.json b/tests/generic_config_updater/files/config_db_no_dependencies.json new file mode 100644 index 0000000000..12bdd464a5 --- /dev/null +++ b/tests/generic_config_updater/files/config_db_no_dependencies.json @@ -0,0 +1,39 @@ +{ + "VLAN": { + "Vlan1000": { + "vlanid": "1000", + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + }, + "ACL_TABLE": { + "EVERFLOW": { + "policy_desc": "EVERFLOW", + "ports": [ + "" + ], + "stage": "ingress", + "type": "MIRROR" + }, + "EVERFLOWV6": { + "policy_desc": "EVERFLOWV6", + "ports": [ + "" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + }, + "PORT": { + "Ethernet0": { + "alias": "Eth1", + "lanes": "65, 66, 67, 68", + "description": "Ethernet0 100G link", + "speed": "100000" + } + } +} diff --git a/tests/generic_config_updater/files/config_db_with_crm.json b/tests/generic_config_updater/files/config_db_with_crm.json new file mode 100644 index 0000000000..5fd324d988 --- /dev/null +++ b/tests/generic_config_updater/files/config_db_with_crm.json @@ -0,0 +1,9 @@ +{ + "CRM": { + "Config": { + "acl_counter_high_threshold": "90", + "acl_counter_low_threshold": "70", + "acl_counter_threshold_type": "free" + } + } +} \ No newline at end of file diff --git a/tests/generic_config_updater/files/config_db_with_device_metadata.json b/tests/generic_config_updater/files/config_db_with_device_metadata.json new file mode 100644 index 0000000000..34def579f6 --- /dev/null +++ b/tests/generic_config_updater/files/config_db_with_device_metadata.json @@ -0,0 +1,16 @@ +{ + "DEVICE_METADATA": { + "localhost": { + "default_bgp_status": "up", + "default_pfcwd_status": "disable", + "bgp_asn": "65100", + "deployment_id": "1", + "docker_routing_config_mode": "separated", + "hostname": "vlab-01", + "hwsku": "Force10-S6000", + "type": "ToRRouter", + "platform": "x86_64-kvm_x86_64-r0", + "mac": "52:54:00:99:7e:85" + } + } +} \ No newline at end of file diff --git a/tests/generic_config_updater/files/config_db_with_interface.json b/tests/generic_config_updater/files/config_db_with_interface.json new file mode 100644 index 0000000000..2e1c488a4a --- /dev/null +++ b/tests/generic_config_updater/files/config_db_with_interface.json @@ -0,0 +1,20 @@ +{ + "INTERFACE": { + "Ethernet8": {}, + "Ethernet8|10.0.0.1/30": { + "family": "IPv4", + "scope": "global" + } + }, + "PORT": { + "Ethernet8": { + "admin_status": "up", + "alias": "eth8", + "description": "Ethernet8", + "fec": "rs", + "lanes": "65", + "mtu": "9000", + "speed": "25000" + } + } +} diff --git a/tests/generic_config_updater/files/config_db_with_portchannel_and_acl.json b/tests/generic_config_updater/files/config_db_with_portchannel_and_acl.json new file mode 100644 index 0000000000..23d33890f3 --- /dev/null +++ b/tests/generic_config_updater/files/config_db_with_portchannel_and_acl.json @@ -0,0 +1,25 @@ +{ + "PORT": { + "Ethernet0": { + "alias": "Eth1/1", + "lanes": "65", + "description": "", + "speed": "10000" + } + }, + "PORTCHANNEL": { + "PortChannel0001": { + "admin_status": "up" + } + }, + "ACL_TABLE": { + "NO-NSW-PACL-V4": { + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0", + "PortChannel0001" + ] + } + } +} diff --git a/tests/generic_config_updater/files/config_db_with_portchannel_interface.json b/tests/generic_config_updater/files/config_db_with_portchannel_interface.json new file mode 100644 index 0000000000..4e05639dc5 --- /dev/null +++ b/tests/generic_config_updater/files/config_db_with_portchannel_interface.json @@ -0,0 +1,10 @@ +{ + "PORTCHANNEL": { + "PortChannel0001": { + "admin_status": "up" + } + }, + "PORTCHANNEL_INTERFACE": { + "PortChannel0001|1.1.1.1/24": {} + } +} diff --git a/tests/generic_config_updater/files/contrainer_with_container_config_db.json b/tests/generic_config_updater/files/contrainer_with_container_config_db.json new file mode 100644 index 0000000000..b0680b22b5 --- /dev/null +++ b/tests/generic_config_updater/files/contrainer_with_container_config_db.json @@ -0,0 +1,7 @@ +{ + "FLEX_COUNTER_TABLE": { + "BUFFER_POOL_WATERMARK": { + "FLEX_COUNTER_STATUS": "enable" + } + } +} diff --git a/tests/generic_config_updater/files/cropped_config_db_as_json.json b/tests/generic_config_updater/files/cropped_config_db_as_json.json new file mode 100644 index 0000000000..261e912c71 --- /dev/null +++ b/tests/generic_config_updater/files/cropped_config_db_as_json.json @@ -0,0 +1,86 @@ +{ + "VLAN_MEMBER": { + "Vlan1000|Ethernet0": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet4": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet8": { + "tagging_mode": "untagged" + } + }, + "VLAN": { + "Vlan1000": { + "vlanid": "1000", + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + }, + "ACL_TABLE": { + "NO-NSW-PACL-V4": { + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0" + ] + }, + "DATAACL": { + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + "EVERFLOW": { + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }, + "EVERFLOWV6": { + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet4", + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + }, + "PORT": { + "Ethernet0": { + "alias": "Eth1", + "lanes": "65, 66, 67, 68", + "description": "Ethernet0 100G link", + "speed": "100000" + }, + "Ethernet4": { + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": "1", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet8": { + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": "2", + "lanes": "33,34,35,36", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + } + } +} diff --git a/tests/generic_config_updater/files/dpb_1_split_full_config.json b/tests/generic_config_updater/files/dpb_1_split_full_config.json new file mode 100644 index 0000000000..2097289606 --- /dev/null +++ b/tests/generic_config_updater/files/dpb_1_split_full_config.json @@ -0,0 +1,35 @@ +{ + "PORT": { + "Ethernet0": { + "alias": "Eth1", + "lanes": "65, 66, 67, 68", + "description": "Ethernet0 100G link", + "speed": "100000" + } + }, + "ACL_TABLE": { + "NO-NSW-PACL-V4": { + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0" + ] + } + }, + "VLAN_MEMBER": { + "Vlan100|Ethernet0": { + "tagging_mode": "untagged" + } + }, + "VLAN": { + "Vlan100": { + "vlanid": "100", + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + } +} diff --git a/tests/generic_config_updater/files/dpb_1_to_4.json-patch b/tests/generic_config_updater/files/dpb_1_to_4.json-patch new file mode 100644 index 0000000000..8eddd7a19d --- /dev/null +++ b/tests/generic_config_updater/files/dpb_1_to_4.json-patch @@ -0,0 +1,88 @@ +[ + { + "op": "add", + "path": "/PORT/Ethernet3", + "value": { + "alias": "Eth1/4", + "lanes": "68", + "description": "", + "speed": "10000" + } + }, + { + "op": "add", + "path": "/PORT/Ethernet1", + "value": { + "alias": "Eth1/2", + "lanes": "66", + "description": "", + "speed": "10000" + } + }, + { + "op": "add", + "path": "/PORT/Ethernet2", + "value": { + "alias": "Eth1/3", + "lanes": "67", + "description": "", + "speed": "10000" + } + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/lanes", + "value": "65" + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/alias", + "value": "Eth1/1" + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/description", + "value": "" + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/speed", + "value": "10000" + }, + { + "op": "add", + "path": "/VLAN_MEMBER/Vlan100|Ethernet2", + "value": { + "tagging_mode": "untagged" + } + }, + { + "op": "add", + "path": "/VLAN_MEMBER/Vlan100|Ethernet3", + "value": { + "tagging_mode": "untagged" + } + }, + { + "op": "add", + "path": "/VLAN_MEMBER/Vlan100|Ethernet1", + "value": { + "tagging_mode": "untagged" + } + }, + { + "op": "add", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1", + "value": "Ethernet1" + }, + { + "op": "add", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/2", + "value": "Ethernet2" + }, + { + "op": "add", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/3", + "value": "Ethernet3" + } +] diff --git a/tests/generic_config_updater/files/dpb_4_splits_full_config.json b/tests/generic_config_updater/files/dpb_4_splits_full_config.json new file mode 100644 index 0000000000..23d1b9ecfc --- /dev/null +++ b/tests/generic_config_updater/files/dpb_4_splits_full_config.json @@ -0,0 +1,65 @@ +{ + "PORT": { + "Ethernet0": { + "alias": "Eth1/1", + "lanes": "65", + "description": "", + "speed": "10000" + }, + "Ethernet1": { + "alias": "Eth1/2", + "lanes": "66", + "description": "", + "speed": "10000" + }, + "Ethernet2": { + "alias": "Eth1/3", + "lanes": "67", + "description": "", + "speed": "10000" + }, + "Ethernet3": { + "alias": "Eth1/4", + "lanes": "68", + "description": "", + "speed": "10000" + } + }, + "ACL_TABLE": { + "NO-NSW-PACL-V4": { + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0", + "Ethernet1", + "Ethernet2", + "Ethernet3" + ] + } + }, + "VLAN_MEMBER": { + "Vlan100|Ethernet0": { + "tagging_mode": "untagged" + }, + "Vlan100|Ethernet1": { + "tagging_mode": "untagged" + }, + "Vlan100|Ethernet2": { + "tagging_mode": "untagged" + }, + "Vlan100|Ethernet3": { + "tagging_mode": "untagged" + } + }, + "VLAN": { + "Vlan100": { + "vlanid": "100", + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + } +} diff --git a/tests/generic_config_updater/files/dpb_4_to_1.json-patch b/tests/generic_config_updater/files/dpb_4_to_1.json-patch new file mode 100644 index 0000000000..33addd290d --- /dev/null +++ b/tests/generic_config_updater/files/dpb_4_to_1.json-patch @@ -0,0 +1,58 @@ +[ + { + "op": "remove", + "path": "/PORT/Ethernet2" + }, + { + "op": "remove", + "path": "/PORT/Ethernet1" + }, + { + "op": "remove", + "path": "/PORT/Ethernet3" + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/alias", + "value": "Eth1" + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/lanes", + "value": "65, 66, 67, 68" + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/description", + "value": "Ethernet0 100G link" + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/speed", + "value": "100000" + }, + { + "op": "remove", + "path": "/VLAN_MEMBER/Vlan100|Ethernet1" + }, + { + "op": "remove", + "path": "/VLAN_MEMBER/Vlan100|Ethernet3" + }, + { + "op": "remove", + "path": "/VLAN_MEMBER/Vlan100|Ethernet2" + }, + { + "op": "remove", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1" + }, + { + "op": "remove", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1" + }, + { + "op": "remove", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1" + } +] diff --git a/tests/generic_config_updater/files/empty_config_db.json b/tests/generic_config_updater/files/empty_config_db.json new file mode 100644 index 0000000000..2c63c08510 --- /dev/null +++ b/tests/generic_config_updater/files/empty_config_db.json @@ -0,0 +1,2 @@ +{ +} diff --git a/tests/generic_config_updater/files/multi_operation_config_db_patch.json-patch b/tests/generic_config_updater/files/multi_operation_config_db_patch.json-patch new file mode 100644 index 0000000000..8eddd7a19d --- /dev/null +++ b/tests/generic_config_updater/files/multi_operation_config_db_patch.json-patch @@ -0,0 +1,88 @@ +[ + { + "op": "add", + "path": "/PORT/Ethernet3", + "value": { + "alias": "Eth1/4", + "lanes": "68", + "description": "", + "speed": "10000" + } + }, + { + "op": "add", + "path": "/PORT/Ethernet1", + "value": { + "alias": "Eth1/2", + "lanes": "66", + "description": "", + "speed": "10000" + } + }, + { + "op": "add", + "path": "/PORT/Ethernet2", + "value": { + "alias": "Eth1/3", + "lanes": "67", + "description": "", + "speed": "10000" + } + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/lanes", + "value": "65" + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/alias", + "value": "Eth1/1" + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/description", + "value": "" + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/speed", + "value": "10000" + }, + { + "op": "add", + "path": "/VLAN_MEMBER/Vlan100|Ethernet2", + "value": { + "tagging_mode": "untagged" + } + }, + { + "op": "add", + "path": "/VLAN_MEMBER/Vlan100|Ethernet3", + "value": { + "tagging_mode": "untagged" + } + }, + { + "op": "add", + "path": "/VLAN_MEMBER/Vlan100|Ethernet1", + "value": { + "tagging_mode": "untagged" + } + }, + { + "op": "add", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1", + "value": "Ethernet1" + }, + { + "op": "add", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/2", + "value": "Ethernet2" + }, + { + "op": "add", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/3", + "value": "Ethernet3" + } +] diff --git a/tests/generic_config_updater/files/multi_operation_sonic_yang_patch.json-patch b/tests/generic_config_updater/files/multi_operation_sonic_yang_patch.json-patch new file mode 100644 index 0000000000..f7005bb4a0 --- /dev/null +++ b/tests/generic_config_updater/files/multi_operation_sonic_yang_patch.json-patch @@ -0,0 +1,97 @@ +[ + { + "op": "add", + "path": "/sonic-vlan:sonic-vlan/sonic-vlan:VLAN_MEMBER/VLAN_MEMBER_LIST/3", + "value": { + "name": "Vlan100", + "port": "Ethernet2", + "tagging_mode": "untagged" + } + }, + { + "op": "add", + "path": "/sonic-vlan:sonic-vlan/sonic-vlan:VLAN_MEMBER/VLAN_MEMBER_LIST/4", + "value": { + "name": "Vlan100", + "port": "Ethernet3", + "tagging_mode": "untagged" + } + }, + { + "op": "add", + "path": "/sonic-vlan:sonic-vlan/sonic-vlan:VLAN_MEMBER/VLAN_MEMBER_LIST/5", + "value": { + "name": "Vlan100", + "port": "Ethernet1", + "tagging_mode": "untagged" + } + }, + { + "op": "replace", + "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/0/lanes", + "value": "65" + }, + { + "op": "replace", + "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/0/alias", + "value": "Eth1/1" + }, + { + "op": "replace", + "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/0/speed", + "value": 10000 + }, + { + "op": "replace", + "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/0/description", + "value": "" + }, + { + "op": "add", + "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/3", + "value": { + "name": "Ethernet3", + "alias": "Eth1/4", + "lanes": "68", + "description": "", + "speed": 10000 + } + }, + { + "op": "add", + "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/4", + "value": { + "name": "Ethernet1", + "alias": "Eth1/2", + "lanes": "66", + "description": "", + "speed": 10000 + } + }, + { + "op": "add", + "path": "/sonic-port:sonic-port/sonic-port:PORT/PORT_LIST/5", + "value": { + "name": "Ethernet2", + "alias": "Eth1/3", + "lanes": "67", + "description": "", + "speed": 10000 + } + }, + { + "op": "add", + "path": "/sonic-acl:sonic-acl/sonic-acl:ACL_TABLE/ACL_TABLE_LIST/0/ports/1", + "value": "Ethernet1" + }, + { + "op": "add", + "path": "/sonic-acl:sonic-acl/sonic-acl:ACL_TABLE/ACL_TABLE_LIST/0/ports/2", + "value": "Ethernet2" + }, + { + "op": "add", + "path": "/sonic-acl:sonic-acl/sonic-acl:ACL_TABLE/ACL_TABLE_LIST/0/ports/3", + "value": "Ethernet3" + } +] diff --git a/tests/generic_config_updater/files/simple_config_db_inc_deps.json b/tests/generic_config_updater/files/simple_config_db_inc_deps.json new file mode 100644 index 0000000000..4554582103 --- /dev/null +++ b/tests/generic_config_updater/files/simple_config_db_inc_deps.json @@ -0,0 +1,20 @@ +{ + "ACL_TABLE": { + "EVERFLOW": { + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet0" + ], + "stage": "ingress", + "type": "MIRROR" + } + }, + "PORT": { + "Ethernet0": { + "alias": "Eth1", + "lanes": "65, 66, 67, 68", + "description": "Ethernet0 100G link", + "speed": "100000" + } + } +} diff --git a/tests/generic_config_updater/files/single_operation_config_db_patch.json-patch b/tests/generic_config_updater/files/single_operation_config_db_patch.json-patch new file mode 100644 index 0000000000..7cc0967bf0 --- /dev/null +++ b/tests/generic_config_updater/files/single_operation_config_db_patch.json-patch @@ -0,0 +1,6 @@ +[ + { + "op": "remove", + "path": "/VLAN_MEMBER/Vlan1000|Ethernet8" + } +] diff --git a/tests/generic_config_updater/files/single_operation_sonic_yang_patch.json-patch b/tests/generic_config_updater/files/single_operation_sonic_yang_patch.json-patch new file mode 100644 index 0000000000..5a46560496 --- /dev/null +++ b/tests/generic_config_updater/files/single_operation_sonic_yang_patch.json-patch @@ -0,0 +1,6 @@ +[ + { + "op": "remove", + "path": "/sonic-vlan:sonic-vlan/sonic-vlan:VLAN_MEMBER/VLAN_MEMBER_LIST/2" + } +] diff --git a/tests/generic_config_updater/files/sonic_yang_after_multi_patch.json b/tests/generic_config_updater/files/sonic_yang_after_multi_patch.json new file mode 100644 index 0000000000..0c9ddd4546 --- /dev/null +++ b/tests/generic_config_updater/files/sonic_yang_after_multi_patch.json @@ -0,0 +1,153 @@ +{ + "sonic-vlan:sonic-vlan": { + "sonic-vlan:VLAN_MEMBER": { + "VLAN_MEMBER_LIST": [ + { + "name": "Vlan1000", + "port": "Ethernet0", + "tagging_mode": "untagged" + }, + { + "name": "Vlan1000", + "port": "Ethernet4", + "tagging_mode": "untagged" + }, + { + "name": "Vlan1000", + "port": "Ethernet8", + "tagging_mode": "untagged" + }, + { + "name": "Vlan100", + "port": "Ethernet2", + "tagging_mode": "untagged" + }, + { + "name": "Vlan100", + "port": "Ethernet3", + "tagging_mode": "untagged" + }, + { + "name": "Vlan100", + "port": "Ethernet1", + "tagging_mode": "untagged" + } + ] + }, + "sonic-vlan:VLAN": { + "VLAN_LIST": [ + { + "name": "Vlan1000", + "vlanid": 1000, + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + ] + } + }, + "sonic-acl:sonic-acl": { + "sonic-acl:ACL_TABLE": { + "ACL_TABLE_LIST": [ + { + "ACL_TABLE_NAME": "NO-NSW-PACL-V4", + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0", + "Ethernet1", + "Ethernet2", + "Ethernet3" + ] + }, + { + "ACL_TABLE_NAME": "DATAACL", + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + { + "ACL_TABLE_NAME": "EVERFLOW", + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }, + { + "ACL_TABLE_NAME": "EVERFLOWV6", + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet4", + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + ] + } + }, + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "name": "Ethernet0", + "alias": "Eth1/1", + "lanes": "65", + "description": "", + "speed": 10000 + }, + { + "name": "Ethernet4", + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": 1, + "lanes": "29,30,31,32", + "mtu": 9100, + "pfc_asym": "off", + "speed": 40000 + }, + { + "name": "Ethernet8", + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": 2, + "lanes": "33,34,35,36", + "mtu": 9100, + "pfc_asym": "off", + "speed": 40000 + }, + { + "name": "Ethernet3", + "alias": "Eth1/4", + "lanes": "68", + "description": "", + "speed": 10000 + }, + { + "name": "Ethernet1", + "alias": "Eth1/2", + "lanes": "66", + "description": "", + "speed": 10000 + }, + { + "name": "Ethernet2", + "alias": "Eth1/3", + "lanes": "67", + "description": "", + "speed": 10000 + } + ] + } + } +} diff --git a/tests/generic_config_updater/files/sonic_yang_as_json.json b/tests/generic_config_updater/files/sonic_yang_as_json.json new file mode 100644 index 0000000000..37f0fe6ba7 --- /dev/null +++ b/tests/generic_config_updater/files/sonic_yang_as_json.json @@ -0,0 +1,114 @@ +{ + "sonic-vlan:sonic-vlan": { + "sonic-vlan:VLAN_MEMBER": { + "VLAN_MEMBER_LIST": [ + { + "name": "Vlan1000", + "port": "Ethernet0", + "tagging_mode": "untagged" + }, + { + "name": "Vlan1000", + "port": "Ethernet4", + "tagging_mode": "untagged" + }, + { + "name": "Vlan1000", + "port": "Ethernet8", + "tagging_mode": "untagged" + } + ] + }, + "sonic-vlan:VLAN": { + "VLAN_LIST": [ + { + "name": "Vlan1000", + "vlanid": 1000, + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + ] + } + }, + "sonic-acl:sonic-acl": { + "sonic-acl:ACL_TABLE": { + "ACL_TABLE_LIST": [ + { + "ACL_TABLE_NAME": "NO-NSW-PACL-V4", + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0" + ] + }, + { + "ACL_TABLE_NAME": "DATAACL", + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + { + "ACL_TABLE_NAME": "EVERFLOW", + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }, + { + "ACL_TABLE_NAME": "EVERFLOWV6", + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet4", + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + ] + } + }, + "sonic-port:sonic-port": { + "sonic-port:PORT": { + "PORT_LIST": [ + { + "name": "Ethernet0", + "alias": "Eth1", + "lanes": "65, 66, 67, 68", + "description": "Ethernet0 100G link", + "speed": 100000 + }, + { + "name": "Ethernet4", + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": 1, + "lanes": "29,30,31,32", + "mtu": 9100, + "pfc_asym": "off", + "speed": 40000 + }, + { + "name": "Ethernet8", + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": 2, + "lanes": "33,34,35,36", + "mtu": 9100, + "pfc_asym": "off", + "speed": 40000 + } + ] + } + } +} diff --git a/tests/generic_config_updater/files/sonic_yang_as_json_invalid.json b/tests/generic_config_updater/files/sonic_yang_as_json_invalid.json new file mode 100644 index 0000000000..4f67d7e6a6 --- /dev/null +++ b/tests/generic_config_updater/files/sonic_yang_as_json_invalid.json @@ -0,0 +1,13 @@ +{ + "sonic-vlan:sonic-vlan": { + "sonic-vlan:VLAN_MEMBER": { + "VLAN_MEMBER_LIST": [ + { + "name": "Vlan1000", + "port": "Ethernet4", + "tagging_mode": "untagged" + } + ] + } + } +} diff --git a/tests/generic_config_updater/files/sonic_yang_as_json_with_unexpected_colons.json b/tests/generic_config_updater/files/sonic_yang_as_json_with_unexpected_colons.json new file mode 100644 index 0000000000..aac97da42b --- /dev/null +++ b/tests/generic_config_updater/files/sonic_yang_as_json_with_unexpected_colons.json @@ -0,0 +1,114 @@ +{ + "sonic-vlan:sonic-vlan": { + "sonic-vlan::VLAN_MEMBER": { + "VLAN_MEMBER_LIST": [ + { + "name": "Vlan1000", + "port": "Ethernet0", + "tagging_mode": "untagged" + }, + { + "name": "Vlan1000", + "port": "Ethernet4", + "tagging_mode": "untagged" + }, + { + "name": "Vlan1000", + "port": "Ethernet8", + "tagging_mode": "untagged" + } + ] + }, + "sonic-vlan::VLAN": { + "VLAN_LIST": [ + { + "name": "Vlan1000", + "vlanid": 1000, + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + ] + } + }, + "sonic-acl:sonic-acl": { + "sonic-vlan::ACL_TABLE": { + "ACL_TABLE_LIST": [ + { + "ACL_TABLE_NAME": "NO-NSW-PACL-V4", + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0" + ] + }, + { + "ACL_TABLE_NAME": "DATAACL", + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + { + "ACL_TABLE_NAME": "EVERFLOW", + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }, + { + "ACL_TABLE_NAME": "EVERFLOWV6", + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet4", + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + ] + } + }, + "sonic-port:sonic-port": { + "sonic-vlan::PORT": { + "PORT_LIST": [ + { + "name": "Ethernet0", + "alias": "Eth1", + "lanes": "65, 66, 67, 68", + "description": "Ethernet0 100G link", + "speed": 100000 + }, + { + "name": "Ethernet4", + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": 1, + "lanes": "29,30,31,32", + "mtu": 9100, + "pfc_asym": "off", + "speed": 40000 + }, + { + "name": "Ethernet8", + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": 2, + "lanes": "33,34,35,36", + "mtu": 9100, + "pfc_asym": "off", + "speed": 40000 + } + ] + } + } +} diff --git a/tests/generic_config_updater/files/sonic_yang_as_json_without_colons.json b/tests/generic_config_updater/files/sonic_yang_as_json_without_colons.json new file mode 100644 index 0000000000..ad4ab15f4a --- /dev/null +++ b/tests/generic_config_updater/files/sonic_yang_as_json_without_colons.json @@ -0,0 +1,114 @@ +{ + "sonic-vlan:sonic-vlan": { + "VLAN_MEMBER": { + "VLAN_MEMBER_LIST": [ + { + "name": "Vlan1000", + "port": "Ethernet0", + "tagging_mode": "untagged" + }, + { + "name": "Vlan1000", + "port": "Ethernet4", + "tagging_mode": "untagged" + }, + { + "name": "Vlan1000", + "port": "Ethernet8", + "tagging_mode": "untagged" + } + ] + }, + "VLAN": { + "VLAN_LIST": [ + { + "name": "Vlan1000", + "vlanid": 1000, + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + ] + } + }, + "sonic-acl:sonic-acl": { + "ACL_TABLE": { + "ACL_TABLE_LIST": [ + { + "ACL_TABLE_NAME": "NO-NSW-PACL-V4", + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0" + ] + }, + { + "ACL_TABLE_NAME": "DATAACL", + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + { + "ACL_TABLE_NAME": "EVERFLOW", + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }, + { + "ACL_TABLE_NAME": "EVERFLOWV6", + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet4", + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + ] + } + }, + "sonic-port:sonic-port": { + "PORT": { + "PORT_LIST": [ + { + "name": "Ethernet0", + "alias": "Eth1", + "lanes": "65, 66, 67, 68", + "description": "Ethernet0 100G link", + "speed": 100000 + }, + { + "name": "Ethernet4", + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": 1, + "lanes": "29,30,31,32", + "mtu": 9100, + "pfc_asym": "off", + "speed": 40000 + }, + { + "name": "Ethernet8", + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": 2, + "lanes": "33,34,35,36", + "mtu": 9100, + "pfc_asym": "off", + "speed": 40000 + } + ] + } + } +} diff --git a/tests/generic_config_updater/generic_updater_test.py b/tests/generic_config_updater/generic_updater_test.py new file mode 100644 index 0000000000..f201280062 --- /dev/null +++ b/tests/generic_config_updater/generic_updater_test.py @@ -0,0 +1,766 @@ +import json +import os +import shutil +import unittest +from unittest.mock import MagicMock, Mock, call +from .gutest_helpers import create_side_effect_dict, Files + +import generic_config_updater.generic_updater as gu + +# import sys +# sys.path.insert(0,'../../generic_config_updater') +# import generic_updater as gu + +class TestPatchApplier(unittest.TestCase): + def test_apply__invalid_patch_updating_tables_without_yang_models__failure(self): + # Arrange + patch_applier = self.__create_patch_applier(valid_patch_only_tables_with_yang_models=False) + + # Act and assert + self.assertRaises(ValueError, patch_applier.apply, Files.MULTI_OPERATION_CONFIG_DB_PATCH) + + def test_apply__invalid_config_db__failure(self): + # Arrange + patch_applier = self.__create_patch_applier(valid_config_db=False) + + # Act and assert + self.assertRaises(ValueError, patch_applier.apply, Files.MULTI_OPERATION_CONFIG_DB_PATCH) + + def test_apply__json_not_fully_updated__failure(self): + # Arrange + patch_applier = self.__create_patch_applier(verified_same_config=False) + + # Act and assert + self.assertRaises(gu.GenericConfigUpdaterError, patch_applier.apply, Files.MULTI_OPERATION_CONFIG_DB_PATCH) + + def test_apply__no_errors__update_successful(self): + # Arrange + changes = [Mock(), Mock()] + patch_applier = self.__create_patch_applier(changes) + + # Act + patch_applier.apply(Files.MULTI_OPERATION_CONFIG_DB_PATCH) + + # Assert + patch_applier.patch_wrapper.validate_config_db_patch_has_yang_models.assert_has_calls( + [call(Files.MULTI_OPERATION_CONFIG_DB_PATCH)]) + patch_applier.config_wrapper.get_config_db_as_json.assert_has_calls([call(), call()]) + patch_applier.patch_wrapper.simulate_patch.assert_has_calls( + [call(Files.MULTI_OPERATION_CONFIG_DB_PATCH, Files.CONFIG_DB_AS_JSON)]) + patch_applier.config_wrapper.validate_config_db_config.assert_has_calls( + [call(Files.CONFIG_DB_AFTER_MULTI_PATCH)]) + patch_applier.patchsorter.sort.assert_has_calls([call(Files.MULTI_OPERATION_CONFIG_DB_PATCH)]) + patch_applier.changeapplier.apply.assert_has_calls([call(changes[0]), call(changes[1])]) + patch_applier.patch_wrapper.verify_same_json.assert_has_calls( + [call(Files.CONFIG_DB_AFTER_MULTI_PATCH, Files.CONFIG_DB_AFTER_MULTI_PATCH)]) + + def __create_patch_applier(self, + changes=None, + valid_patch_only_tables_with_yang_models=True, + valid_config_db=True, + verified_same_config=True): + config_wrapper = Mock() + config_wrapper.get_config_db_as_json.side_effect = \ + [Files.CONFIG_DB_AS_JSON, Files.CONFIG_DB_AFTER_MULTI_PATCH] + config_wrapper.validate_config_db_config.side_effect = \ + create_side_effect_dict({(str(Files.CONFIG_DB_AFTER_MULTI_PATCH),): valid_config_db}) + + patch_wrapper = Mock() + patch_wrapper.validate_config_db_patch_has_yang_models.side_effect = \ + create_side_effect_dict( + {(str(Files.MULTI_OPERATION_CONFIG_DB_PATCH),): valid_patch_only_tables_with_yang_models}) + patch_wrapper.simulate_patch.side_effect = \ + create_side_effect_dict( + {(str(Files.MULTI_OPERATION_CONFIG_DB_PATCH), str(Files.CONFIG_DB_AS_JSON)): + Files.CONFIG_DB_AFTER_MULTI_PATCH}) + patch_wrapper.verify_same_json.side_effect = \ + create_side_effect_dict( + {(str(Files.CONFIG_DB_AFTER_MULTI_PATCH), str(Files.CONFIG_DB_AFTER_MULTI_PATCH)): + verified_same_config}) + + changes = [Mock(), Mock()] if not changes else changes + patchsorter = Mock() + patchsorter.sort.side_effect = \ + create_side_effect_dict({(str(Files.MULTI_OPERATION_CONFIG_DB_PATCH),): changes}) + + changeapplier = Mock() + changeapplier.apply.side_effect = create_side_effect_dict({(str(changes[0]),): 0, (str(changes[1]),): 0}) + + return gu.PatchApplier(patchsorter, changeapplier, config_wrapper, patch_wrapper) + +class TestConfigReplacer(unittest.TestCase): + def test_replace__invalid_config_db__failure(self): + # Arrange + config_replacer = self.__create_config_replacer(valid_config_db=False) + + # Act and assert + self.assertRaises(ValueError, config_replacer.replace, Files.CONFIG_DB_AFTER_MULTI_PATCH) + + def test_replace__json_not_fully_updated__failure(self): + # Arrange + config_replacer = self.__create_config_replacer(verified_same_config=False) + + # Act and assert + self.assertRaises(gu.GenericConfigUpdaterError, config_replacer.replace, Files.CONFIG_DB_AFTER_MULTI_PATCH) + + def test_replace__no_errors__update_successful(self): + # Arrange + config_replacer = self.__create_config_replacer() + + # Act + config_replacer.replace(Files.CONFIG_DB_AFTER_MULTI_PATCH) + + # Assert + config_replacer.config_wrapper.validate_config_db_config.assert_has_calls( + [call(Files.CONFIG_DB_AFTER_MULTI_PATCH)]) + config_replacer.config_wrapper.get_config_db_as_json.assert_has_calls([call(), call()]) + config_replacer.patch_wrapper.generate_patch.assert_has_calls( + [call(Files.CONFIG_DB_AS_JSON, Files.CONFIG_DB_AFTER_MULTI_PATCH)]) + config_replacer.patch_applier.apply.assert_has_calls([call(Files.MULTI_OPERATION_CONFIG_DB_PATCH)]) + config_replacer.patch_wrapper.verify_same_json.assert_has_calls( + [call(Files.CONFIG_DB_AFTER_MULTI_PATCH, Files.CONFIG_DB_AFTER_MULTI_PATCH)]) + + def __create_config_replacer(self, changes=None, valid_config_db=True, verified_same_config=True): + config_wrapper = Mock() + config_wrapper.validate_config_db_config.side_effect = \ + create_side_effect_dict({(str(Files.CONFIG_DB_AFTER_MULTI_PATCH),): valid_config_db}) + config_wrapper.get_config_db_as_json.side_effect = \ + [Files.CONFIG_DB_AS_JSON, Files.CONFIG_DB_AFTER_MULTI_PATCH] + + patch_wrapper = Mock() + patch_wrapper.generate_patch.side_effect = \ + create_side_effect_dict( + {(str(Files.CONFIG_DB_AS_JSON), str(Files.CONFIG_DB_AFTER_MULTI_PATCH)): + Files.MULTI_OPERATION_CONFIG_DB_PATCH}) + patch_wrapper.verify_same_json.side_effect = \ + create_side_effect_dict( + {(str(Files.CONFIG_DB_AFTER_MULTI_PATCH), str(Files.CONFIG_DB_AFTER_MULTI_PATCH)): \ + verified_same_config}) + + changes = [Mock(), Mock()] if not changes else changes + patchsorter = Mock() + patchsorter.sort.side_effect = create_side_effect_dict({(str(Files.MULTI_OPERATION_CONFIG_DB_PATCH),): \ + changes}) + + patch_applier = Mock() + patch_applier.apply.side_effect = create_side_effect_dict({(str(Files.MULTI_OPERATION_CONFIG_DB_PATCH),): 0}) + + return gu.ConfigReplacer(patch_applier, config_wrapper, patch_wrapper) + +class TestFileSystemConfigRollbacker(unittest.TestCase): + def setUp(self): + self.checkpoints_dir = os.path.join(os.getcwd(),"checkpoints") + self.checkpoint_ext = ".cp.json" + self.any_checkpoint_name = "anycheckpoint" + self.any_other_checkpoint_name = "anyothercheckpoint" + self.any_config = {} + self.clean_up() + + def tearDown(self): + self.clean_up() + + def test_rollback__checkpoint_does_not_exist__failure(self): + # Arrange + rollbacker = self.create_rollbacker() + + # Act and assert + self.assertRaises(ValueError, rollbacker.rollback, "NonExistingCheckpoint") + + def test_rollback__no_errors__success(self): + # Arrange + self.create_checkpoints_dir() + self.add_checkpoint(self.any_checkpoint_name, self.any_config) + rollbacker = self.create_rollbacker() + + # Act + rollbacker.rollback(self.any_checkpoint_name) + + # Assert + rollbacker.config_replacer.replace.assert_has_calls([call(self.any_config)]) + + def test_checkpoint__checkpoints_dir_does_not_exist__checkpoint_created(self): + # Arrange + rollbacker = self.create_rollbacker() + self.assertFalse(os.path.isdir(self.checkpoints_dir)) + + # Act + rollbacker.checkpoint(self.any_checkpoint_name) + + # Assert + self.assertTrue(os.path.isdir(self.checkpoints_dir)) + self.assertEqual(self.any_config, self.get_checkpoint(self.any_checkpoint_name)) + + def test_checkpoint__config_not_valid__failure(self): + # Arrange + rollbacker = self.create_rollbacker(valid_config=False) + + # Act and assert + self.assertRaises(ValueError, rollbacker.checkpoint, self.any_checkpoint_name) + + def test_checkpoint__checkpoints_dir_exists__checkpoint_created(self): + # Arrange + self.create_checkpoints_dir() + rollbacker = self.create_rollbacker() + + # Act + rollbacker.checkpoint(self.any_checkpoint_name) + + # Assert + self.assertEqual(self.any_config, self.get_checkpoint(self.any_checkpoint_name)) + + def test_list_checkpoints__checkpoints_dir_does_not_exist__empty_list(self): + # Arrange + rollbacker = self.create_rollbacker() + self.assertFalse(os.path.isdir(self.checkpoints_dir)) + expected = [] + + # Act + actual = rollbacker.list_checkpoints() + + # Assert + # 'assertCountEqual' does check same count, same elements ignoring order + self.assertCountEqual(expected, actual) + + def test_list_checkpoints__checkpoints_dir_exist_but_no_files__empty_list(self): + # Arrange + self.create_checkpoints_dir() + rollbacker = self.create_rollbacker() + expected = [] + + # Act + actual = rollbacker.list_checkpoints() + + # Assert + # 'assertCountEqual' does check same count, same elements ignoring order + self.assertCountEqual(expected, actual) + + def test_list_checkpoints__checkpoints_dir_has_multiple_files__multiple_files(self): + # Arrange + self.create_checkpoints_dir() + self.add_checkpoint(self.any_checkpoint_name, self.any_config) + self.add_checkpoint(self.any_other_checkpoint_name, self.any_config) + rollbacker = self.create_rollbacker() + expected = [self.any_checkpoint_name, self.any_other_checkpoint_name] + + # Act + actual = rollbacker.list_checkpoints() + + # Assert + # 'assertCountEqual' does check same count, same elements ignoring order + self.assertCountEqual(expected, actual) + + def test_list_checkpoints__checkpoints_names_have_special_characters__multiple_files(self): + # Arrange + self.create_checkpoints_dir() + self.add_checkpoint("check.point1", self.any_config) + self.add_checkpoint(".checkpoint2", self.any_config) + self.add_checkpoint("checkpoint3.", self.any_config) + rollbacker = self.create_rollbacker() + expected = ["check.point1", ".checkpoint2", "checkpoint3."] + + # Act + actual = rollbacker.list_checkpoints() + + # Assert + # 'assertCountEqual' does check same count, same elements ignoring order + self.assertCountEqual(expected, actual) + + def test_delete_checkpoint__checkpoint_does_not_exist__failure(self): + # Arrange + rollbacker = self.create_rollbacker() + + # Act and assert + self.assertRaises(ValueError, rollbacker.delete_checkpoint, self.any_checkpoint_name) + + def test_delete_checkpoint__checkpoint_exist__success(self): + # Arrange + self.create_checkpoints_dir() + self.add_checkpoint(self.any_checkpoint_name, self.any_config) + rollbacker = self.create_rollbacker() + + # Act + rollbacker.delete_checkpoint(self.any_checkpoint_name) + + # Assert + self.assertFalse(self.check_checkpoint_exists(self.any_checkpoint_name)) + + def test_multiple_operations(self): + rollbacker = self.create_rollbacker() + + # 'assertCountEqual' does check same count, same elements ignoring order + self.assertCountEqual([], rollbacker.list_checkpoints()) + + rollbacker.checkpoint(self.any_checkpoint_name) + self.assertCountEqual([self.any_checkpoint_name], rollbacker.list_checkpoints()) + self.assertEqual(self.any_config, self.get_checkpoint(self.any_checkpoint_name)) + + rollbacker.rollback(self.any_checkpoint_name) + rollbacker.config_replacer.replace.assert_has_calls([call(self.any_config)]) + + rollbacker.checkpoint(self.any_other_checkpoint_name) + self.assertCountEqual([self.any_checkpoint_name, self.any_other_checkpoint_name], rollbacker.list_checkpoints()) + self.assertEqual(self.any_config, self.get_checkpoint(self.any_other_checkpoint_name)) + + rollbacker.delete_checkpoint(self.any_checkpoint_name) + self.assertCountEqual([self.any_other_checkpoint_name], rollbacker.list_checkpoints()) + + rollbacker.delete_checkpoint(self.any_other_checkpoint_name) + self.assertCountEqual([], rollbacker.list_checkpoints()) + + def clean_up(self): + if os.path.isdir(self.checkpoints_dir): + shutil.rmtree(self.checkpoints_dir) + + def create_checkpoints_dir(self): + os.makedirs(self.checkpoints_dir) + + def add_checkpoint(self, name, json_content): + path=os.path.join(self.checkpoints_dir, f"{name}{self.checkpoint_ext}") + with open(path, "w") as fh: + fh.write(json.dumps(json_content)) + + def get_checkpoint(self, name): + path=os.path.join(self.checkpoints_dir, f"{name}{self.checkpoint_ext}") + with open(path) as fh: + text = fh.read() + return json.loads(text) + + def check_checkpoint_exists(self, name): + path=os.path.join(self.checkpoints_dir, f"{name}{self.checkpoint_ext}") + return os.path.isfile(path) + + def create_rollbacker(self, valid_config=True): + replacer = Mock() + replacer.replace.side_effect = create_side_effect_dict({(str(self.any_config),): 0}) + + config_wrapper = Mock() + config_wrapper.get_config_db_as_json.return_value = self.any_config + config_wrapper.validate_config_db_config.return_value = valid_config + + return gu.FileSystemConfigRollbacker(checkpoints_dir=self.checkpoints_dir, + config_replacer=replacer, + config_wrapper=config_wrapper) + +class TestGenericUpdateFactory(unittest.TestCase): + def setUp(self): + self.any_verbose=True + self.any_dry_run=True + + def test_create_patch_applier__invalid_config_format__failure(self): + # Arrange + factory = gu.GenericUpdateFactory() + + # Act and assert + self.assertRaises( + ValueError, factory.create_patch_applier, "INVALID_FORMAT", self.any_verbose, self.any_dry_run) + + def test_create_patch_applier__different_options(self): + # Arrange + options = [ + {"verbose": {True: None, False: None}}, + {"dry_run": {True: None, False: gu.ConfigLockDecorator}}, + { + "config_format": { + gu.ConfigFormat.SONICYANG: gu.SonicYangDecorator, + gu.ConfigFormat.CONFIGDB: None, + } + }, + ] + + # Act and assert + self.recursively_test_create_func(options, 0, {}, [], self.validate_create_patch_applier) + + def test_create_config_replacer__invalid_config_format__failure(self): + # Arrange + factory = gu.GenericUpdateFactory() + + # Act and assert + self.assertRaises( + ValueError, factory.create_config_replacer, "INVALID_FORMAT", self.any_verbose, self.any_dry_run) + + def test_create_config_replacer__different_options(self): + # Arrange + options = [ + {"verbose": {True: None, False: None}}, + {"dry_run": {True: None, False: gu.ConfigLockDecorator}}, + { + "config_format": { + gu.ConfigFormat.SONICYANG: gu.SonicYangDecorator, + gu.ConfigFormat.CONFIGDB: None, + } + }, + ] + + # Act and assert + self.recursively_test_create_func(options, 0, {}, [], self.validate_create_config_replacer) + + def test_create_config_rollbacker__different_options(self): + # Arrange + options = [ + {"verbose": {True: None, False: None}}, + {"dry_run": {True: None, False: gu.ConfigLockDecorator}} + ] + + # Act and assert + self.recursively_test_create_func(options, 0, {}, [], self.validate_create_config_rollbacker) + + def recursively_test_create_func(self, options, cur_option, params, expected_decorators, create_func): + if cur_option == len(options): + create_func(params, expected_decorators) + return + + param = list(options[cur_option].keys())[0] + for key in options[cur_option][param]: + params[param] = key + decorator = options[cur_option][param][key] + if decorator != None: + expected_decorators.append(decorator) + self.recursively_test_create_func(options, cur_option+1, params, expected_decorators, create_func) + if decorator != None: + expected_decorators.pop() + + def validate_create_patch_applier(self, params, expected_decorators): + factory = gu.GenericUpdateFactory() + patch_applier = factory.create_patch_applier(params["config_format"], params["verbose"], params["dry_run"]) + for decorator_type in expected_decorators: + self.assertIsInstance(patch_applier, decorator_type) + + patch_applier = patch_applier.decorated_patch_applier + + self.assertIsInstance(patch_applier, gu.PatchApplier) + if params["dry_run"]: + self.assertIsInstance(patch_applier.config_wrapper, gu.DryRunConfigWrapper) + else: + self.assertIsInstance(patch_applier.config_wrapper, gu.ConfigWrapper) + + def validate_create_config_replacer(self, params, expected_decorators): + factory = gu.GenericUpdateFactory() + config_replacer = factory.create_config_replacer(params["config_format"], params["verbose"], params["dry_run"]) + for decorator_type in expected_decorators: + self.assertIsInstance(config_replacer, decorator_type) + + config_replacer = config_replacer.decorated_config_replacer + + self.assertIsInstance(config_replacer, gu.ConfigReplacer) + if params["dry_run"]: + self.assertIsInstance(config_replacer.config_wrapper, gu.DryRunConfigWrapper) + self.assertIsInstance(config_replacer.patch_applier.config_wrapper, gu.DryRunConfigWrapper) + else: + self.assertIsInstance(config_replacer.config_wrapper, gu.ConfigWrapper) + self.assertIsInstance(config_replacer.patch_applier.config_wrapper, gu.ConfigWrapper) + + def validate_create_config_rollbacker(self, params, expected_decorators): + factory = gu.GenericUpdateFactory() + config_rollbacker = factory.create_config_rollbacker(params["verbose"], params["dry_run"]) + for decorator_type in expected_decorators: + self.assertIsInstance(config_rollbacker, decorator_type) + + config_rollbacker = config_rollbacker.decorated_config_rollbacker + + self.assertIsInstance(config_rollbacker, gu.FileSystemConfigRollbacker) + if params["dry_run"]: + self.assertIsInstance(config_rollbacker.config_wrapper, gu.DryRunConfigWrapper) + self.assertIsInstance(config_rollbacker.config_replacer.config_wrapper, gu.DryRunConfigWrapper) + self.assertIsInstance( + config_rollbacker.config_replacer.patch_applier.config_wrapper, gu.DryRunConfigWrapper) + else: + self.assertIsInstance(config_rollbacker.config_wrapper, gu.ConfigWrapper) + self.assertIsInstance(config_rollbacker.config_replacer.config_wrapper, gu.ConfigWrapper) + self.assertIsInstance( + config_rollbacker.config_replacer.patch_applier.config_wrapper, gu.ConfigWrapper) + +class TestGenericUpdater(unittest.TestCase): + def setUp(self): + self.any_checkpoint_name = "anycheckpoint" + self.any_other_checkpoint_name = "anyothercheckpoint" + self.any_checkpoints_list = [self.any_checkpoint_name, self.any_other_checkpoint_name] + self.any_config_format = gu.ConfigFormat.SONICYANG + self.any_verbose = True + self.any_dry_run = True + + def test_apply_patch__creates_applier_and_apply(self): + # Arrange + patch_applier = Mock() + patch_applier.apply.side_effect = create_side_effect_dict({(str(Files.SINGLE_OPERATION_SONIC_YANG_PATCH),): 0}) + + factory = Mock() + factory.create_patch_applier.side_effect = \ + create_side_effect_dict( + {(str(self.any_config_format), str(self.any_verbose), str(self.any_dry_run),): patch_applier}) + + generic_updater = gu.GenericUpdater(factory) + + # Act + generic_updater.apply_patch( + Files.SINGLE_OPERATION_SONIC_YANG_PATCH, self.any_config_format, self.any_verbose, self.any_dry_run) + + # Assert + patch_applier.apply.assert_has_calls([call(Files.SINGLE_OPERATION_SONIC_YANG_PATCH)]) + + def test_replace__creates_replacer_and_replace(self): + # Arrange + config_replacer = Mock() + config_replacer.replace.side_effect = create_side_effect_dict({(str(Files.SONIC_YANG_AS_JSON),): 0}) + + factory = Mock() + factory.create_config_replacer.side_effect = \ + create_side_effect_dict( + {(str(self.any_config_format), str(self.any_verbose), str(self.any_dry_run),): config_replacer}) + + generic_updater = gu.GenericUpdater(factory) + + # Act + generic_updater.replace(Files.SONIC_YANG_AS_JSON, self.any_config_format, self.any_verbose, self.any_dry_run) + + # Assert + config_replacer.replace.assert_has_calls([call(Files.SONIC_YANG_AS_JSON)]) + + def test_rollback__creates_rollbacker_and_rollback(self): + # Arrange + config_rollbacker = Mock() + config_rollbacker.rollback.side_effect = create_side_effect_dict({(self.any_checkpoint_name,): 0}) + + factory = Mock() + factory.create_config_rollbacker.side_effect = \ + create_side_effect_dict({(str(self.any_verbose), str(self.any_dry_run),): config_rollbacker}) + + generic_updater = gu.GenericUpdater(factory) + + # Act + generic_updater.rollback(self.any_checkpoint_name, self.any_verbose, self.any_dry_run) + + # Assert + config_rollbacker.rollback.assert_has_calls([call(self.any_checkpoint_name)]) + + def test_checkpoint__creates_rollbacker_and_checkpoint(self): + # Arrange + config_rollbacker = Mock() + config_rollbacker.checkpoint.side_effect = create_side_effect_dict({(self.any_checkpoint_name,): 0}) + + factory = Mock() + factory.create_config_rollbacker.side_effect = \ + create_side_effect_dict({(str(self.any_verbose),): config_rollbacker}) + + generic_updater = gu.GenericUpdater(factory) + + # Act + generic_updater.checkpoint(self.any_checkpoint_name, self.any_verbose) + + # Assert + config_rollbacker.checkpoint.assert_has_calls([call(self.any_checkpoint_name)]) + + def test_delete_checkpoint__creates_rollbacker_and_deletes_checkpoint(self): + # Arrange + config_rollbacker = Mock() + config_rollbacker.delete_checkpoint.side_effect = create_side_effect_dict({(self.any_checkpoint_name,): 0}) + + factory = Mock() + factory.create_config_rollbacker.side_effect = \ + create_side_effect_dict({(str(self.any_verbose),): config_rollbacker}) + + generic_updater = gu.GenericUpdater(factory) + + # Act + generic_updater.delete_checkpoint(self.any_checkpoint_name, self.any_verbose) + + # Assert + config_rollbacker.delete_checkpoint.assert_has_calls([call(self.any_checkpoint_name)]) + + def test_list_checkpoints__creates_rollbacker_and_list_checkpoints(self): + # Arrange + config_rollbacker = Mock() + config_rollbacker.list_checkpoints.return_value = self.any_checkpoints_list + + factory = Mock() + factory.create_config_rollbacker.side_effect = \ + create_side_effect_dict({(str(self.any_verbose),): config_rollbacker}) + + generic_updater = gu.GenericUpdater(factory) + + expected = self.any_checkpoints_list + + # Act + actual = generic_updater.list_checkpoints(self.any_verbose) + + # Assert + self.assertCountEqual(expected, actual) + +class TestDecorator(unittest.TestCase): + def setUp(self): + self.decorated_patch_applier = Mock() + self.decorated_config_replacer = Mock() + self.decorated_config_rollbacker = Mock() + + self.any_checkpoint_name = "anycheckpoint" + self.any_other_checkpoint_name = "anyothercheckpoint" + self.any_checkpoints_list = [self.any_checkpoint_name, self.any_other_checkpoint_name] + self.decorated_config_rollbacker.list_checkpoints.return_value = self.any_checkpoints_list + + self.decorator = gu.Decorator( + self.decorated_patch_applier, self.decorated_config_replacer, self.decorated_config_rollbacker) + + def test_apply__calls_decorated_applier(self): + # Act + self.decorator.apply(Files.SINGLE_OPERATION_SONIC_YANG_PATCH) + + # Assert + self.decorated_patch_applier.apply.assert_has_calls([call(Files.SINGLE_OPERATION_SONIC_YANG_PATCH)]) + + def test_replace__calls_decorated_replacer(self): + # Act + self.decorator.replace(Files.SONIC_YANG_AS_JSON) + + # Assert + self.decorated_config_replacer.replace.assert_has_calls([call(Files.SONIC_YANG_AS_JSON)]) + + def test_rollback__calls_decorated_rollbacker(self): + # Act + self.decorator.rollback(self.any_checkpoint_name) + + # Assert + self.decorated_config_rollbacker.rollback.assert_has_calls([call(self.any_checkpoint_name)]) + + def test_checkpoint__calls_decorated_rollbacker(self): + # Act + self.decorator.checkpoint(self.any_checkpoint_name) + + # Assert + self.decorated_config_rollbacker.checkpoint.assert_has_calls([call(self.any_checkpoint_name)]) + + def test_delete_checkpoint__calls_decorated_rollbacker(self): + # Act + self.decorator.delete_checkpoint(self.any_checkpoint_name) + + # Assert + self.decorated_config_rollbacker.delete_checkpoint.assert_has_calls([call(self.any_checkpoint_name)]) + + def test_list_checkpoints__calls_decorated_rollbacker(self): + # Arrange + expected = self.any_checkpoints_list + + # Act + actual = self.decorator.list_checkpoints() + + # Assert + self.decorated_config_rollbacker.list_checkpoints.assert_called_once() + self.assertListEqual(expected, actual) + +class TestSonicYangDecorator(unittest.TestCase): + def test_apply__converts_to_config_db_and_calls_decorated_class(self): + # Arrange + sonic_yang_decorator = self.__create_sonic_yang_decorator() + + # Act + sonic_yang_decorator.apply(Files.SINGLE_OPERATION_SONIC_YANG_PATCH) + + # Assert + sonic_yang_decorator.patch_wrapper.convert_sonic_yang_patch_to_config_db_patch.assert_has_calls( + [call(Files.SINGLE_OPERATION_SONIC_YANG_PATCH)]) + sonic_yang_decorator.decorated_patch_applier.apply.assert_has_calls( + [call(Files.SINGLE_OPERATION_CONFIG_DB_PATCH)]) + + def test_replace__converts_to_config_db_and_calls_decorated_class(self): + # Arrange + sonic_yang_decorator = self.__create_sonic_yang_decorator() + + # Act + sonic_yang_decorator.replace(Files.SONIC_YANG_AS_JSON) + + # Assert + sonic_yang_decorator.config_wrapper.convert_sonic_yang_to_config_db.assert_has_calls( + [call(Files.SONIC_YANG_AS_JSON)]) + sonic_yang_decorator.decorated_config_replacer.replace.assert_has_calls([call(Files.CONFIG_DB_AS_JSON)]) + + def __create_sonic_yang_decorator(self): + patch_applier = Mock() + patch_applier.apply.side_effect = create_side_effect_dict({(str(Files.SINGLE_OPERATION_CONFIG_DB_PATCH),): 0}) + + patch_wrapper = Mock() + patch_wrapper.convert_sonic_yang_patch_to_config_db_patch.side_effect = \ + create_side_effect_dict({(str(Files.SINGLE_OPERATION_SONIC_YANG_PATCH),): \ + Files.SINGLE_OPERATION_CONFIG_DB_PATCH}) + + config_replacer = Mock() + config_replacer.replace.side_effect = create_side_effect_dict({(str(Files.CONFIG_DB_AS_JSON),): 0}) + + config_wrapper = Mock() + config_wrapper.convert_sonic_yang_to_config_db.side_effect = \ + create_side_effect_dict({(str(Files.SONIC_YANG_AS_JSON),): Files.CONFIG_DB_AS_JSON}) + + return gu.SonicYangDecorator(decorated_patch_applier=patch_applier, + decorated_config_replacer=config_replacer, + patch_wrapper=patch_wrapper, + config_wrapper=config_wrapper) + +class TestConfigLockDecorator(unittest.TestCase): + def setUp(self): + self.any_checkpoint_name = "anycheckpoint" + + def test_apply__lock_config(self): + # Arrange + config_lock_decorator = self.__create_config_lock_decorator() + + # Act + config_lock_decorator.apply(Files.SINGLE_OPERATION_SONIC_YANG_PATCH) + + # Assert + config_lock_decorator.config_lock.acquire_lock.assert_called_once() + config_lock_decorator.decorated_patch_applier.apply.assert_has_calls( + [call(Files.SINGLE_OPERATION_SONIC_YANG_PATCH)]) + config_lock_decorator.config_lock.release_lock.assert_called_once() + + def test_replace__lock_config(self): + # Arrange + config_lock_decorator = self.__create_config_lock_decorator() + + # Act + config_lock_decorator.replace(Files.SONIC_YANG_AS_JSON) + + # Assert + config_lock_decorator.config_lock.acquire_lock.assert_called_once() + config_lock_decorator.decorated_config_replacer.replace.assert_has_calls([call(Files.SONIC_YANG_AS_JSON)]) + config_lock_decorator.config_lock.release_lock.assert_called_once() + + def test_rollback__lock_config(self): + # Arrange + config_lock_decorator = self.__create_config_lock_decorator() + + # Act + config_lock_decorator.rollback(self.any_checkpoint_name) + + # Assert + config_lock_decorator.config_lock.acquire_lock.assert_called_once() + config_lock_decorator.decorated_config_rollbacker.rollback.assert_has_calls([call(self.any_checkpoint_name)]) + config_lock_decorator.config_lock.release_lock.assert_called_once() + + def test_checkpoint__lock_config(self): + # Arrange + config_lock_decorator = self.__create_config_lock_decorator() + + # Act + config_lock_decorator.checkpoint(self.any_checkpoint_name) + + # Assert + config_lock_decorator.config_lock.acquire_lock.assert_called_once() + config_lock_decorator.decorated_config_rollbacker.checkpoint.assert_has_calls([call(self.any_checkpoint_name)]) + config_lock_decorator.config_lock.release_lock.assert_called_once() + + def __create_config_lock_decorator(self): + config_lock = Mock() + + patch_applier = Mock() + patch_applier.apply.side_effect = create_side_effect_dict({(str(Files.SINGLE_OPERATION_SONIC_YANG_PATCH),): 0}) + + config_replacer = Mock() + config_replacer.replace.side_effect = create_side_effect_dict({(str(Files.SONIC_YANG_AS_JSON),): 0}) + + config_rollbacker = Mock() + config_rollbacker.rollback.side_effect = create_side_effect_dict({(self.any_checkpoint_name,): 0}) + config_rollbacker.checkpoint.side_effect = create_side_effect_dict({(self.any_checkpoint_name,): 0}) + + config_rollbacker.delete_checkpoint.side_effect = create_side_effect_dict({(self.any_checkpoint_name,): 0}) + + return gu.ConfigLockDecorator(config_lock=config_lock, + decorated_patch_applier=patch_applier, + decorated_config_replacer=config_replacer, + decorated_config_rollbacker=config_rollbacker) diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py new file mode 100644 index 0000000000..f69ec08030 --- /dev/null +++ b/tests/generic_config_updater/gu_common_test.py @@ -0,0 +1,635 @@ +import json +import jsonpatch +import sonic_yang +import unittest +from unittest.mock import MagicMock, Mock + +from .gutest_helpers import create_side_effect_dict, Files +import generic_config_updater.gu_common as gu_common + +class TestConfigWrapper(unittest.TestCase): + def setUp(self): + self.config_wrapper_mock = gu_common.ConfigWrapper() + self.config_wrapper_mock.get_config_db_as_json=MagicMock(return_value=Files.CONFIG_DB_AS_JSON) + + def test_ctor__default_values_set(self): + config_wrapper = gu_common.ConfigWrapper() + + self.assertEqual("/usr/local/yang-models", gu_common.YANG_DIR) + + def test_get_sonic_yang_as_json__returns_sonic_yang_as_json(self): + # Arrange + config_wrapper = self.config_wrapper_mock + expected = Files.SONIC_YANG_AS_JSON + + # Act + actual = config_wrapper.get_sonic_yang_as_json() + + # Assert + self.assertDictEqual(expected, actual) + + def test_convert_config_db_to_sonic_yang__empty_config_db__returns_empty_sonic_yang(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = {} + + # Act + actual = config_wrapper.convert_config_db_to_sonic_yang({}) + + # Assert + self.assertDictEqual(expected, actual) + + def test_convert_config_db_to_sonic_yang__non_empty_config_db__returns_sonic_yang_as_json(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = Files.SONIC_YANG_AS_JSON + + # Act + actual = config_wrapper.convert_config_db_to_sonic_yang(Files.CONFIG_DB_AS_JSON) + + # Assert + self.assertDictEqual(expected, actual) + + def test_convert_sonic_yang_to_config_db__empty_sonic_yang__returns_empty_config_db(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = {} + + # Act + actual = config_wrapper.convert_sonic_yang_to_config_db({}) + + # Assert + self.assertDictEqual(expected, actual) + + def test_convert_sonic_yang_to_config_db__non_empty_sonic_yang__returns_config_db_as_json(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = Files.CROPPED_CONFIG_DB_AS_JSON + + # Act + actual = config_wrapper.convert_sonic_yang_to_config_db(Files.SONIC_YANG_AS_JSON) + + # Assert + self.assertDictEqual(expected, actual) + + def test_convert_sonic_yang_to_config_db__table_name_without_colons__returns_config_db_as_json(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = Files.CROPPED_CONFIG_DB_AS_JSON + + # Act + actual = config_wrapper.convert_sonic_yang_to_config_db(Files.SONIC_YANG_AS_JSON_WITHOUT_COLONS) + + # Assert + self.assertDictEqual(expected, actual) + + def test_convert_sonic_yang_to_config_db__table_name_with_unexpected_colons__returns_config_db_as_json(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = Files.CROPPED_CONFIG_DB_AS_JSON + + # Act and assert + self.assertRaises(ValueError, + config_wrapper.convert_sonic_yang_to_config_db, + Files.SONIC_YANG_AS_JSON_WITH_UNEXPECTED_COLONS) + + def test_validate_sonic_yang_config__valid_config__returns_true(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = True + + # Act + actual = config_wrapper.validate_sonic_yang_config(Files.SONIC_YANG_AS_JSON) + + # Assert + self.assertEqual(expected, actual) + + def test_validate_sonic_yang_config__invvalid_config__returns_false(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = False + + # Act + actual = config_wrapper.validate_sonic_yang_config(Files.SONIC_YANG_AS_JSON_INVALID) + + # Assert + self.assertEqual(expected, actual) + + def test_validate_config_db_config__valid_config__returns_true(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = True + + # Act + actual = config_wrapper.validate_config_db_config(Files.CONFIG_DB_AS_JSON) + + # Assert + self.assertEqual(expected, actual) + + def test_validate_config_db_config__invalid_config__returns_false(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = False + + # Act + actual = config_wrapper.validate_config_db_config(Files.CONFIG_DB_AS_JSON_INVALID) + + # Assert + self.assertEqual(expected, actual) + + def test_crop_tables_without_yang__returns_cropped_config_db_as_json(self): + # Arrange + config_wrapper = gu_common.ConfigWrapper() + expected = Files.CROPPED_CONFIG_DB_AS_JSON + + # Act + actual = config_wrapper.crop_tables_without_yang(Files.CONFIG_DB_AS_JSON) + + # Assert + self.assertDictEqual(expected, actual) + +class TestPatchWrapper(unittest.TestCase): + def setUp(self): + self.config_wrapper_mock = gu_common.ConfigWrapper() + self.config_wrapper_mock.get_config_db_as_json=MagicMock(return_value=Files.CONFIG_DB_AS_JSON) + + def test_validate_config_db_patch_has_yang_models__table_without_yang_model__returns_false(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper() + patch = [ { 'op': 'remove', 'path': '/TABLE_WITHOUT_YANG' } ] + expected = False + + # Act + actual = patch_wrapper.validate_config_db_patch_has_yang_models(patch) + + # Assert + self.assertEqual(expected, actual) + + def test_validate_config_db_patch_has_yang_models__table_with_yang_model__returns_true(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper() + patch = [ { 'op': 'remove', 'path': '/ACL_TABLE' } ] + expected = True + + # Act + actual = patch_wrapper.validate_config_db_patch_has_yang_models(patch) + + # Assert + self.assertEqual(expected, actual) + + def test_convert_config_db_patch_to_sonic_yang_patch__invalid_config_db_patch__failure(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper() + patch = [ { 'op': 'remove', 'path': '/TABLE_WITHOUT_YANG' } ] + + # Act and Assert + self.assertRaises(ValueError, patch_wrapper.convert_config_db_patch_to_sonic_yang_patch, patch) + + def test_same_patch__no_diff__returns_true(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper() + + # Act and Assert + self.assertTrue(patch_wrapper.verify_same_json(Files.CONFIG_DB_AS_JSON, Files.CONFIG_DB_AS_JSON)) + + def test_same_patch__diff__returns_false(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper() + + # Act and Assert + self.assertFalse(patch_wrapper.verify_same_json(Files.CONFIG_DB_AS_JSON, Files.CROPPED_CONFIG_DB_AS_JSON)) + + def test_generate_patch__no_diff__empty_patch(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper() + + # Act + patch = patch_wrapper.generate_patch(Files.CONFIG_DB_AS_JSON, Files.CONFIG_DB_AS_JSON) + + # Assert + self.assertFalse(patch) + + def test_simulate_patch__empty_patch__no_changes(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper() + patch = jsonpatch.JsonPatch([]) + expected = Files.CONFIG_DB_AS_JSON + + # Act + actual = patch_wrapper.simulate_patch(patch, Files.CONFIG_DB_AS_JSON) + + # Assert + self.assertDictEqual(expected, actual) + + def test_simulate_patch__non_empty_patch__changes_applied(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper() + patch = Files.SINGLE_OPERATION_CONFIG_DB_PATCH + expected = Files.SINGLE_OPERATION_CONFIG_DB_PATCH.apply(Files.CONFIG_DB_AS_JSON) + + # Act + actual = patch_wrapper.simulate_patch(patch, Files.CONFIG_DB_AS_JSON) + + # Assert + self.assertDictEqual(expected, actual) + + def test_generate_patch__diff__non_empty_patch(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper() + after_update_json = Files.SINGLE_OPERATION_CONFIG_DB_PATCH.apply(Files.CONFIG_DB_AS_JSON) + expected = Files.SINGLE_OPERATION_CONFIG_DB_PATCH + + # Act + actual = patch_wrapper.generate_patch(Files.CONFIG_DB_AS_JSON, after_update_json) + + # Assert + self.assertTrue(actual) + self.assertEqual(expected, actual) + + def test_convert_config_db_patch_to_sonic_yang_patch__empty_patch__returns_empty_patch(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper(config_wrapper = self.config_wrapper_mock) + patch = jsonpatch.JsonPatch([]) + expected = jsonpatch.JsonPatch([]) + + # Act + actual = patch_wrapper.convert_config_db_patch_to_sonic_yang_patch(patch) + + # Assert + self.assertEqual(expected, actual) + + def test_convert_config_db_patch_to_sonic_yang_patch__single_operation_patch__returns_sonic_yang_patch(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper(config_wrapper = self.config_wrapper_mock) + patch = Files.SINGLE_OPERATION_CONFIG_DB_PATCH + expected = Files.SINGLE_OPERATION_SONIC_YANG_PATCH + + # Act + actual = patch_wrapper.convert_config_db_patch_to_sonic_yang_patch(patch) + + # Assert + self.assertEqual(expected, actual) + + def test_convert_config_db_patch_to_sonic_yang_patch__multiple_operations_patch__returns_sonic_yang_patch(self): + # Arrange + config_wrapper = self.config_wrapper_mock + patch_wrapper = gu_common.PatchWrapper(config_wrapper = config_wrapper) + config_db_patch = Files.MULTI_OPERATION_CONFIG_DB_PATCH + + # Act + sonic_yang_patch = patch_wrapper.convert_config_db_patch_to_sonic_yang_patch(config_db_patch) + + # Assert + self.__assert_same_patch(config_db_patch, sonic_yang_patch, config_wrapper, patch_wrapper) + + def test_convert_sonic_yang_patch_to_config_db_patch__empty_patch__returns_empty_patch(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper(config_wrapper = self.config_wrapper_mock) + patch = jsonpatch.JsonPatch([]) + expected = jsonpatch.JsonPatch([]) + + # Act + actual = patch_wrapper.convert_sonic_yang_patch_to_config_db_patch(patch) + + # Assert + self.assertEqual(expected, actual) + + def test_convert_sonic_yang_patch_to_config_db_patch__single_operation_patch__returns_config_db_patch(self): + # Arrange + patch_wrapper = gu_common.PatchWrapper(config_wrapper = self.config_wrapper_mock) + patch = Files.SINGLE_OPERATION_SONIC_YANG_PATCH + expected = Files.SINGLE_OPERATION_CONFIG_DB_PATCH + + # Act + actual = patch_wrapper.convert_sonic_yang_patch_to_config_db_patch(patch) + + # Assert + self.assertEqual(expected, actual) + + def test_convert_sonic_yang_patch_to_config_db_patch__multiple_operations_patch__returns_config_db_patch(self): + # Arrange + config_wrapper = self.config_wrapper_mock + patch_wrapper = gu_common.PatchWrapper(config_wrapper = config_wrapper) + sonic_yang_patch = Files.MULTI_OPERATION_SONIC_YANG_PATCH + + # Act + config_db_patch = patch_wrapper.convert_sonic_yang_patch_to_config_db_patch(sonic_yang_patch) + + # Assert + self.__assert_same_patch(config_db_patch, sonic_yang_patch, config_wrapper, patch_wrapper) + + def __assert_same_patch(self, config_db_patch, sonic_yang_patch, config_wrapper, patch_wrapper): + sonic_yang = config_wrapper.get_sonic_yang_as_json() + config_db = config_wrapper.get_config_db_as_json() + + after_update_sonic_yang = patch_wrapper.simulate_patch(sonic_yang_patch, sonic_yang) + after_update_config_db = patch_wrapper.simulate_patch(config_db_patch, config_db) + after_update_config_db_cropped = config_wrapper.crop_tables_without_yang(after_update_config_db) + + after_update_sonic_yang_as_config_db = \ + config_wrapper.convert_sonic_yang_to_config_db(after_update_sonic_yang) + + self.assertTrue(patch_wrapper.verify_same_json(after_update_config_db_cropped, after_update_sonic_yang_as_config_db)) + +class TestPathAddressing(unittest.TestCase): + def setUp(self): + self.path_addressing = gu_common.PathAddressing() + self.sy_only_models = sonic_yang.SonicYang(gu_common.YANG_DIR) + self.sy_only_models.loadYangModel() + + def test_get_path_tokens(self): + def check(path, tokens): + expected=tokens + actual=self.path_addressing.get_path_tokens(path) + self.assertEqual(expected, actual) + + check("", []) + check("/", [""]) + check("/token", ["token"]) + check("/more/than/one/token", ["more", "than", "one", "token"]) + check("/has/numbers/0/and/symbols/^", ["has", "numbers", "0", "and", "symbols", "^"]) + check("/~0/this/is/telda", ["~", "this", "is", "telda"]) + check("/~1/this/is/forward-slash", ["/", "this", "is", "forward-slash"]) + check("/\\\\/no-escaping", ["\\\\", "no-escaping"]) + check("////empty/tokens/are/ok", ["", "", "", "empty", "tokens", "are", "ok"]) + + def test_create_path(self): + def check(tokens, path): + expected=path + actual=self.path_addressing.create_path(tokens) + self.assertEqual(expected, actual) + + check([], "",) + check([""], "/",) + check(["token"], "/token") + check(["more", "than", "one", "token"], "/more/than/one/token") + check(["has", "numbers", "0", "and", "symbols", "^"], "/has/numbers/0/and/symbols/^") + check(["~", "this", "is", "telda"], "/~0/this/is/telda") + check(["/", "this", "is", "forward-slash"], "/~1/this/is/forward-slash") + check(["\\\\", "no-escaping"], "/\\\\/no-escaping") + check(["", "", "", "empty", "tokens", "are", "ok"], "////empty/tokens/are/ok") + check(["~token", "telda-not-followed-by-0-or-1"], "/~0token/telda-not-followed-by-0-or-1") + + def test_get_xpath_tokens(self): + def check(path, tokens): + expected=tokens + actual=self.path_addressing.get_xpath_tokens(path) + self.assertEqual(expected, actual) + + self.assertRaises(ValueError, check, "", []) + check("/", []) + check("/token", ["token"]) + check("/more/than/one/token", ["more", "than", "one", "token"]) + check("/multi/tokens/with/empty/last/token/", ["multi", "tokens", "with", "empty", "last", "token", ""]) + check("/has/numbers/0/and/symbols/^", ["has", "numbers", "0", "and", "symbols", "^"]) + check("/has[a='predicate']/in/the/beginning", ["has[a='predicate']", "in", "the", "beginning"]) + check("/ha/s[a='predicate']/in/the/middle", ["ha", "s[a='predicate']", "in", "the", "middle"]) + check("/ha/s[a='predicate-in-the-end']", ["ha", "s[a='predicate-in-the-end']"]) + check("/it/has[more='than'][one='predicate']/somewhere", ["it", "has[more='than'][one='predicate']", "somewhere"]) + check("/ha/s[a='predicate\"with']/double-quotes/inside", ["ha", "s[a='predicate\"with']", "double-quotes", "inside"]) + check('/a/predicate[with="double"]/quotes', ["a", 'predicate[with="double"]', "quotes"]) + check('/multiple["predicate"][with="double"]/quotes', ['multiple["predicate"][with="double"]', "quotes"]) + check('/multiple["predicate"][with="double"]/quotes', ['multiple["predicate"][with="double"]', "quotes"]) + check('/ha/s[a="predicate\'with"]/single-quote/inside', ["ha", 's[a="predicate\'with"]', "single-quote", "inside"]) + # XPATH 1.0 does not support single-quote within single-quoted string. str literal can be '[^']*' + # Not validating no single-quote within single-quoted string + check("/a/mix['of''quotes\"does']/not/work/well", ["a", "mix['of''quotes\"does']", "not", "work", "well"]) + # XPATH 1.0 does not support double-quotes within double-quoted string. str literal can be "[^"]*" + # Not validating no double-quotes within double-quoted string + check('/a/mix["of""quotes\'does"]/not/work/well', ["a", 'mix["of""quotes\'does"]', "not", "work", "well"]) + + def test_create_xpath(self): + def check(tokens, xpath): + expected=xpath + actual=self.path_addressing.create_xpath(tokens) + self.assertEqual(expected, actual) + + check([], "/") + check(["token"], "/token") + check(["more", "than", "one", "token"], "/more/than/one/token") + check(["multi", "tokens", "with", "empty", "last", "token", ""], "/multi/tokens/with/empty/last/token/") + check(["has", "numbers", "0", "and", "symbols", "^"], "/has/numbers/0/and/symbols/^") + check(["has[a='predicate']", "in", "the", "beginning"], "/has[a='predicate']/in/the/beginning") + check(["ha", "s[a='predicate']", "in", "the", "middle"], "/ha/s[a='predicate']/in/the/middle") + check(["ha", "s[a='predicate-in-the-end']"], "/ha/s[a='predicate-in-the-end']") + check(["it", "has[more='than'][one='predicate']", "somewhere"], "/it/has[more='than'][one='predicate']/somewhere") + check(["ha", "s[a='predicate\"with']", "double-quotes", "inside"], "/ha/s[a='predicate\"with']/double-quotes/inside") + check(["a", 'predicate[with="double"]', "quotes"], '/a/predicate[with="double"]/quotes') + check(['multiple["predicate"][with="double"]', "quotes"], '/multiple["predicate"][with="double"]/quotes') + check(['multiple["predicate"][with="double"]', "quotes"], '/multiple["predicate"][with="double"]/quotes') + check(["ha", 's[a="predicate\'with"]', "single-quote", "inside"], '/ha/s[a="predicate\'with"]/single-quote/inside') + # XPATH 1.0 does not support single-quote within single-quoted string. str literal can be '[^']*' + # Not validating no single-quote within single-quoted string + check(["a", "mix['of''quotes\"does']", "not", "work", "well"], "/a/mix['of''quotes\"does']/not/work/well", ) + # XPATH 1.0 does not support double-quotes within double-quoted string. str literal can be "[^"]*" + # Not validating no double-quotes within double-quoted string + check(["a", 'mix["of""quotes\'does"]', "not", "work", "well"], '/a/mix["of""quotes\'does"]/not/work/well') + + def test_find_ref_paths__ref_is_the_whole_key__returns_ref_paths(self): + # Arrange + path = "/PORT/Ethernet0" + expected = [ + "/ACL_TABLE/NO-NSW-PACL-V4/ports/0", + "/VLAN_MEMBER/Vlan1000|Ethernet0", + ] + + # Act + actual = self.path_addressing.find_ref_paths(path, Files.CROPPED_CONFIG_DB_AS_JSON) + + # Assert + self.assertCountEqual(expected, actual) + + def test_find_ref_paths__ref_is_a_part_of_key__returns_ref_paths(self): + # Arrange + path = "/VLAN/Vlan1000" + expected = [ + "/VLAN_MEMBER/Vlan1000|Ethernet0", + "/VLAN_MEMBER/Vlan1000|Ethernet4", + "/VLAN_MEMBER/Vlan1000|Ethernet8", + ] + + # Act + actual = self.path_addressing.find_ref_paths(path, Files.CROPPED_CONFIG_DB_AS_JSON) + + # Assert + self.assertCountEqual(expected, actual) + + def test_find_ref_paths__ref_is_in_multilist__returns_ref_paths(self): + # Arrange + path = "/PORT/Ethernet8" + expected = [ + "/INTERFACE/Ethernet8", + "/INTERFACE/Ethernet8|10.0.0.1~130", + ] + + # Act + actual = self.path_addressing.find_ref_paths(path, Files.CONFIG_DB_WITH_INTERFACE) + + # Assert + self.assertCountEqual(expected, actual) + + def test_find_ref_paths__ref_is_in_leafref_union__returns_ref_paths(self): + # Arrange + path = "/PORTCHANNEL/PortChannel0001" + expected = [ + "/ACL_TABLE/NO-NSW-PACL-V4/ports/1", + ] + + # Act + actual = self.path_addressing.find_ref_paths(path, Files.CONFIG_DB_WITH_PORTCHANNEL_AND_ACL) + + # Assert + self.assertCountEqual(expected, actual) + + def test_find_ref_paths__path_is_table__returns_ref_paths(self): + # Arrange + path = "/PORT" + expected = [ + "/ACL_TABLE/DATAACL/ports/0", + "/ACL_TABLE/EVERFLOW/ports/0", + "/ACL_TABLE/EVERFLOWV6/ports/0", + "/ACL_TABLE/EVERFLOWV6/ports/1", + "/ACL_TABLE/NO-NSW-PACL-V4/ports/0", + "/VLAN_MEMBER/Vlan1000|Ethernet0", + "/VLAN_MEMBER/Vlan1000|Ethernet4", + "/VLAN_MEMBER/Vlan1000|Ethernet8", + ] + + # Act + actual = self.path_addressing.find_ref_paths(path, Files.CROPPED_CONFIG_DB_AS_JSON) + + # Assert + self.assertCountEqual(expected, actual) + + def test_find_ref_paths__whole_config_path__returns_all_refs(self): + # Arrange + path = "" + expected = [ + "/ACL_TABLE/DATAACL/ports/0", + "/ACL_TABLE/EVERFLOW/ports/0", + "/ACL_TABLE/EVERFLOWV6/ports/0", + "/ACL_TABLE/EVERFLOWV6/ports/1", + "/ACL_TABLE/NO-NSW-PACL-V4/ports/0", + "/VLAN_MEMBER/Vlan1000|Ethernet0", + "/VLAN_MEMBER/Vlan1000|Ethernet4", + "/VLAN_MEMBER/Vlan1000|Ethernet8", + ] + + # Act + actual = self.path_addressing.find_ref_paths(path, Files.CROPPED_CONFIG_DB_AS_JSON) + + # Assert + self.assertCountEqual(expected, actual) + + def test_convert_path_to_xpath(self): + def check(path, xpath, config=None): + if not config: + config = Files.CROPPED_CONFIG_DB_AS_JSON + + expected=xpath + actual=self.path_addressing.convert_path_to_xpath(path, config, self.sy_only_models) + self.assertEqual(expected, actual) + + check(path="", xpath="/") + check(path="/VLAN_MEMBER", xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER") + check(path="/VLAN/Vlan1000/dhcp_servers", + xpath="/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers") + check(path="/VLAN/Vlan1000/dhcp_servers/0", + xpath="/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers[.='192.0.0.1']") + check(path="/PORT/Ethernet0/lanes", xpath="/sonic-port:sonic-port/PORT/PORT_LIST[name='Ethernet0']/lanes") + check(path="/ACL_TABLE/NO-NSW-PACL-V4/ports/0", + xpath="/sonic-acl:sonic-acl/ACL_TABLE/ACL_TABLE_LIST[ACL_TABLE_NAME='NO-NSW-PACL-V4']/ports[.='Ethernet0']") + check(path="/ACL_TABLE/NO-NSW-PACL-V4/ports/0", + xpath="/sonic-acl:sonic-acl/ACL_TABLE/ACL_TABLE_LIST[ACL_TABLE_NAME='NO-NSW-PACL-V4']/ports[.='Ethernet0']") + check(path="/VLAN_MEMBER/Vlan1000|Ethernet8/tagging_mode", + xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode") + check(path="/VLAN_MEMBER/Vlan1000|Ethernet8", + xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']") + check(path="/DEVICE_METADATA/localhost/hwsku", + xpath="/sonic-device_metadata:sonic-device_metadata/DEVICE_METADATA/localhost/hwsku", + config=Files.CONFIG_DB_WITH_DEVICE_METADATA) + check(path="/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK/FLEX_COUNTER_STATUS", + xpath="/sonic-flex_counter:sonic-flex_counter/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK/FLEX_COUNTER_STATUS", + config=Files.CONTRAINER_WITH_CONTAINER_CONFIG_DB) + check(path="/ACL_RULE/SSH_ONLY|RULE1/L4_SRC_PORT", + xpath="/sonic-acl:sonic-acl/ACL_RULE/ACL_RULE_LIST[ACL_TABLE_NAME='SSH_ONLY'][RULE_NAME='RULE1']/L4_SRC_PORT", + config=Files.CONFIG_DB_CHOICE) + check(path="/INTERFACE/Ethernet8", + xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_LIST[name='Ethernet8']", + config=Files.CONFIG_DB_WITH_INTERFACE) + check(path="/INTERFACE/Ethernet8|10.0.0.1~130", + xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_IPPREFIX_LIST[name='Ethernet8'][ip-prefix='10.0.0.1/30']", + config=Files.CONFIG_DB_WITH_INTERFACE) + check(path="/INTERFACE/Ethernet8|10.0.0.1~130/scope", + xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_IPPREFIX_LIST[name='Ethernet8'][ip-prefix='10.0.0.1/30']/scope", + config=Files.CONFIG_DB_WITH_INTERFACE) + check(path="/PORTCHANNEL_INTERFACE", + xpath="/sonic-portchannel:sonic-portchannel/PORTCHANNEL_INTERFACE", + config=Files.CONFIG_DB_WITH_PORTCHANNEL_INTERFACE) + check(path="/PORTCHANNEL_INTERFACE/PortChannel0001|1.1.1.1~124", + xpath="/sonic-portchannel:sonic-portchannel/PORTCHANNEL_INTERFACE/PORTCHANNEL_INTERFACE_IPPREFIX_LIST[name='PortChannel0001'][ip_prefix='1.1.1.1/24']", + config=Files.CONFIG_DB_WITH_PORTCHANNEL_INTERFACE) + + def test_convert_xpath_to_path(self): + def check(xpath, path, config=None): + if not config: + config = Files.CROPPED_CONFIG_DB_AS_JSON + + expected=path + actual=self.path_addressing.convert_xpath_to_path(xpath, config, self.sy_only_models) + self.assertEqual(expected, actual) + + check(xpath="/",path="") + check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER", path="/VLAN_MEMBER") + check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST",path="/VLAN_MEMBER") + check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']", + path="/VLAN_MEMBER/Vlan1000|Ethernet8") + check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/name", + path="/VLAN_MEMBER/Vlan1000|Ethernet8") + check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/port", + path="/VLAN_MEMBER/Vlan1000|Ethernet8") + check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode", + path="/VLAN_MEMBER/Vlan1000|Ethernet8/tagging_mode") + check(xpath="/sonic-vlan:sonic-acl/ACL_RULE", path="/ACL_RULE") + check(xpath="/sonic-vlan:sonic-acl/ACL_RULE/ACL_RULE_LIST[ACL_TABLE_NAME='SSH_ONLY'][RULE_NAME='RULE1']", + path="/ACL_RULE/SSH_ONLY|RULE1", + config=Files.CONFIG_DB_CHOICE) + check(xpath="/sonic-acl:sonic-acl/ACL_RULE/ACL_RULE_LIST[ACL_TABLE_NAME='SSH_ONLY'][RULE_NAME='RULE1']/L4_SRC_PORT", + path="/ACL_RULE/SSH_ONLY|RULE1/L4_SRC_PORT", + config=Files.CONFIG_DB_CHOICE) + check(xpath="/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers", + path="/VLAN/Vlan1000/dhcp_servers") + check(xpath="/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers[.='192.0.0.1']", + path="/VLAN/Vlan1000/dhcp_servers/0") + check(xpath="/sonic-port:sonic-port/PORT/PORT_LIST[name='Ethernet0']/lanes", path="/PORT/Ethernet0/lanes") + check(xpath="/sonic-acl:sonic-acl/ACL_TABLE/ACL_TABLE_LIST[ACL_TABLE_NAME='NO-NSW-PACL-V4']/ports[.='Ethernet0']", + path="/ACL_TABLE/NO-NSW-PACL-V4/ports/0") + check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode", + path="/VLAN_MEMBER/Vlan1000|Ethernet8/tagging_mode") + check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']", + path="/VLAN_MEMBER/Vlan1000|Ethernet8") + check(xpath="/sonic-device_metadata:sonic-device_metadata/DEVICE_METADATA/localhost/hwsku", + path="/DEVICE_METADATA/localhost/hwsku", + config=Files.CONFIG_DB_WITH_DEVICE_METADATA) + check(xpath="/sonic-flex_counter:sonic-flex_counter/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK", + path="/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK", + config=Files.CONTRAINER_WITH_CONTAINER_CONFIG_DB) + check(xpath="/sonic-flex_counter:sonic-flex_counter/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK/FLEX_COUNTER_STATUS", + path="/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK/FLEX_COUNTER_STATUS", + config=Files.CONTRAINER_WITH_CONTAINER_CONFIG_DB) + check(xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_LIST[name='Ethernet8']", + path="/INTERFACE/Ethernet8", + config=Files.CONFIG_DB_WITH_INTERFACE) + check(xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_IPPREFIX_LIST[name='Ethernet8'][ip-prefix='10.0.0.1/30']", + path="/INTERFACE/Ethernet8|10.0.0.1~130", + config=Files.CONFIG_DB_WITH_INTERFACE) + check(xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_IPPREFIX_LIST[name='Ethernet8'][ip-prefix='10.0.0.1/30']/scope", + path="/INTERFACE/Ethernet8|10.0.0.1~130/scope", + config=Files.CONFIG_DB_WITH_INTERFACE) + check(xpath="/sonic-portchannel:sonic-portchannel/PORTCHANNEL_INTERFACE", + path="/PORTCHANNEL_INTERFACE", + config=Files.CONFIG_DB_WITH_PORTCHANNEL_INTERFACE) + check(xpath="/sonic-portchannel:sonic-portchannel/PORTCHANNEL_INTERFACE/PORTCHANNEL_INTERFACE_IPPREFIX_LIST[name='PortChannel0001'][ip_prefix='1.1.1.1/24']", + path="/PORTCHANNEL_INTERFACE/PortChannel0001|1.1.1.1~124", + config=Files.CONFIG_DB_WITH_PORTCHANNEL_INTERFACE) + diff --git a/tests/generic_config_updater/gutest_helpers.py b/tests/generic_config_updater/gutest_helpers.py new file mode 100644 index 0000000000..2e8984ad68 --- /dev/null +++ b/tests/generic_config_updater/gutest_helpers.py @@ -0,0 +1,53 @@ +import json +import jsonpatch +import os +import shutil +import sys +import unittest +from unittest.mock import MagicMock, Mock, call + +class MockSideEffectDict: + def __init__(self, map): + self.map = map + + def side_effect_func(self, *args): + l = [str(arg) for arg in args] + key = tuple(l) + value = self.map.get(key) + if value is None: + raise ValueError(f"Given arguments were not found in arguments map.\n Arguments: {key}\n Map: {self.map}") + + return value + +def create_side_effect_dict(map): + return MockSideEffectDict(map).side_effect_func + +class FilesLoader: + def __init__(self): + self.files_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "files") + self.cache = {} + + def __getattr__(self, attr): + return self._load(attr) + + def _load(self, file_name): + normalized_file_name = file_name.lower() + + # Try load json file + json_file_path = os.path.join(self.files_path, f"{normalized_file_name}.json") + if os.path.isfile(json_file_path): + with open(json_file_path) as fh: + text = fh.read() + return json.loads(text) + + # Try load json-patch file + jsonpatch_file_path = os.path.join(self.files_path, f"{normalized_file_name}.json-patch") + if os.path.isfile(jsonpatch_file_path): + with open(jsonpatch_file_path) as fh: + text = fh.read() + return jsonpatch.JsonPatch(json.loads(text)) + + raise ValueError(f"There is no file called '{file_name}' in 'files/' directory") + +# Files.File_Name will look for a file called "file_name" in the "files/" directory +Files = FilesLoader() diff --git a/tests/generic_config_updater/patch_sorter_test.py b/tests/generic_config_updater/patch_sorter_test.py new file mode 100644 index 0000000000..4da9fb901b --- /dev/null +++ b/tests/generic_config_updater/patch_sorter_test.py @@ -0,0 +1,1730 @@ +import jsonpatch +import unittest +from unittest.mock import MagicMock, Mock + +import generic_config_updater.patch_sorter as ps +from .gutest_helpers import Files, create_side_effect_dict +from generic_config_updater.gu_common import ConfigWrapper, PatchWrapper, OperationWrapper, \ + GenericConfigUpdaterError, OperationType, JsonChange, PathAddressing + +class TestDiff(unittest.TestCase): + def test_apply_move__updates_current_config(self): + # Arrange + diff = ps.Diff(current_config=Files.CROPPED_CONFIG_DB_AS_JSON, target_config=Files.ANY_CONFIG_DB) + move = ps.JsonMove.from_patch(Files.SINGLE_OPERATION_CONFIG_DB_PATCH) + + expected = ps.Diff(current_config=Files.CONFIG_DB_AFTER_SINGLE_OPERATION, target_config=Files.ANY_CONFIG_DB) + + # Act + actual = diff.apply_move(move) + + # Assert + self.assertEqual(expected.current_config, actual.current_config) + self.assertEqual(expected.target_config, actual.target_config) + + def test_has_no_diff__diff_exists__returns_false(self): + # Arrange + diff = ps.Diff(current_config=Files.CROPPED_CONFIG_DB_AS_JSON, + target_config=Files.CONFIG_DB_AFTER_SINGLE_OPERATION) + + # Act and Assert + self.assertFalse(diff.has_no_diff()) + + def test_has_no_diff__no_diff__returns_true(self): + # Arrange + diff = ps.Diff(current_config=Files.CROPPED_CONFIG_DB_AS_JSON, + target_config=Files.CROPPED_CONFIG_DB_AS_JSON) + + # Act and Assert + self.assertTrue(diff.has_no_diff()) + + def test_hash__different_current_config__different_hashes(self): + # Arrange + diff1 = ps.Diff(current_config=Files.CROPPED_CONFIG_DB_AS_JSON, target_config=Files.ANY_CONFIG_DB) + diff2 = ps.Diff(current_config=Files.CROPPED_CONFIG_DB_AS_JSON, target_config=Files.ANY_CONFIG_DB) + diff3 = ps.Diff(current_config=Files.CONFIG_DB_AFTER_SINGLE_OPERATION, target_config=Files.ANY_CONFIG_DB) + + # Act + hash1 = hash(diff1) + hash2 = hash(diff2) + hash3 = hash(diff3) + + # Assert + self.assertEqual(hash1, hash2) # same current config + self.assertNotEqual(hash1, hash3) + + def test_hash__different_target_config__different_hashes(self): + # Arrange + diff1 = ps.Diff(current_config=Files.ANY_CONFIG_DB, target_config=Files.CROPPED_CONFIG_DB_AS_JSON) + diff2 = ps.Diff(current_config=Files.ANY_CONFIG_DB, target_config=Files.CROPPED_CONFIG_DB_AS_JSON) + diff3 = ps.Diff(current_config=Files.ANY_CONFIG_DB, target_config=Files.CONFIG_DB_AFTER_SINGLE_OPERATION) + + # Act + hash1 = hash(diff1) + hash2 = hash(diff2) + hash3 = hash(diff3) + + # Assert + self.assertEqual(hash1, hash2) # same target config + self.assertNotEqual(hash1, hash3) + + def test_hash__swapped_current_and_target_configs__different_hashes(self): + # Arrange + diff1 = ps.Diff(current_config=Files.ANY_CONFIG_DB, target_config=Files.ANY_OTHER_CONFIG_DB) + diff2 = ps.Diff(current_config=Files.ANY_OTHER_CONFIG_DB, target_config=Files.ANY_CONFIG_DB) + + # Act + hash1 = hash(diff1) + hash2 = hash(diff2) + + # Assert + self.assertNotEqual(hash1, hash2) + + def test_eq__different_current_config__returns_false(self): + # Arrange + diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_CONFIG_DB) + other_diff = ps.Diff(Files.ANY_OTHER_CONFIG_DB, Files.ANY_CONFIG_DB) + + # Act and assert + self.assertNotEqual(diff, other_diff) + self.assertFalse(diff == other_diff) + + def test_eq__different_target_config__returns_false(self): + # Arrange + diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_CONFIG_DB) + other_diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_OTHER_CONFIG_DB) + + # Act and assert + self.assertNotEqual(diff, other_diff) + self.assertFalse(diff == other_diff) + + def test_eq__different_target_config__returns_true(self): + # Arrange + diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_CONFIG_DB) + other_diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_CONFIG_DB) + + # Act and assert + self.assertEqual(diff, other_diff) + self.assertTrue(diff == other_diff) + +class TestJsonMove(unittest.TestCase): + def setUp(self): + self.operation_wrapper = OperationWrapper() + self.any_op_type = OperationType.REPLACE + self.any_tokens = ["table1", "key11"] + self.any_path = "/table1/key11" + self.any_config = { + "table1": { + "key11": "value11" + } + } + self.any_value = "value11" + self.any_operation = self.operation_wrapper.create(self.any_op_type, self.any_path, self.any_value) + self.any_diff = ps.Diff(self.any_config, self.any_config) + + def test_ctor__delete_op_whole_config__none_value_and_empty_path(self): + # Arrange + path = "" + diff = ps.Diff(current_config={}, target_config=self.any_config) + + # Act + jsonmove = ps.JsonMove(diff, OperationType.REMOVE, []) + + # Assert + self.verify_jsonmove(self.operation_wrapper.create(OperationType.REMOVE, path), + OperationType.REMOVE, + [], + None, + jsonmove) + def test_ctor__remove_op__operation_created_directly(self): + # Arrange and Act + jsonmove = ps.JsonMove(self.any_diff, OperationType.REMOVE, self.any_tokens) + + # Assert + self.verify_jsonmove(self.operation_wrapper.create(OperationType.REMOVE, self.any_path), + OperationType.REMOVE, + self.any_tokens, + None, + jsonmove) + + def test_ctor__replace_op_whole_config__whole_config_value_and_empty_path(self): + # Arrange + path = "" + diff = ps.Diff(current_config={}, target_config=self.any_config) + + # Act + jsonmove = ps.JsonMove(diff, OperationType.REPLACE, [], []) + + # Assert + self.verify_jsonmove(self.operation_wrapper.create(OperationType.REPLACE, path, self.any_config), + OperationType.REPLACE, + [], + [], + jsonmove) + + def test_ctor__replace_op__operation_created_directly(self): + # Arrange and Act + jsonmove = ps.JsonMove(self.any_diff, OperationType.REPLACE, self.any_tokens, self.any_tokens) + + # Assert + self.verify_jsonmove(self.operation_wrapper.create(OperationType.REPLACE, self.any_path, self.any_value), + OperationType.REPLACE, + self.any_tokens, + self.any_tokens, + jsonmove) + + def test_ctor__add_op_whole_config__whole_config_value_and_empty_path(self): + # Arrange + path = "" + diff = ps.Diff(current_config={}, target_config=self.any_config) + + # Act + jsonmove = ps.JsonMove(diff, OperationType.ADD, [], []) + + # Assert + self.verify_jsonmove(self.operation_wrapper.create(OperationType.ADD, path, self.any_config), + OperationType.ADD, + [], + [], + jsonmove) + + def test_ctor__add_op_path_exist__same_value_and_path(self): + # Arrange and Act + jsonmove = ps.JsonMove(self.any_diff, OperationType.ADD, self.any_tokens, self.any_tokens) + + # Assert + self.verify_jsonmove(self.operation_wrapper.create(OperationType.ADD, self.any_path, self.any_value), + OperationType.ADD, + self.any_tokens, + self.any_tokens, + jsonmove) + + def test_ctor__add_op_path_exist_include_list__same_value_and_path(self): + # Arrange + current_config = { + "table1": { + "list1": ["value11", "value13"] + } + } + target_config = { + "table1": { + "list1": ["value11", "value12", "value13", "value14"] + } + } + diff = ps.Diff(current_config, target_config) + op_type = OperationType.ADD + current_config_tokens = ["table1", "list1", 1] # Index is 1 which does not exist in target + target_config_tokens = ["table1", "list1", 1] + expected_jsonpatch_path = "/table1/list1/1" + expected_jsonpatch_value = "value12" + # NOTE: the target config can contain more diff than the given move. + + # Act + jsonmove = ps.JsonMove(diff, op_type, current_config_tokens, target_config_tokens) + + # Assert + self.verify_jsonmove(self.operation_wrapper.create(op_type, expected_jsonpatch_path, expected_jsonpatch_value), + op_type, + current_config_tokens, + target_config_tokens, + jsonmove) + + def test_ctor__add_op_path_exist_list_index_doesnot_exist_in_target___same_value_and_path(self): + # Arrange + current_config = { + "table1": { + "list1": ["value11"] + } + } + target_config = { + "table1": { + "list1": ["value12"] + } + } + diff = ps.Diff(current_config, target_config) + op_type = OperationType.ADD + current_config_tokens = ["table1", "list1", 1] # Index is 1 which does not exist in target + target_config_tokens = ["table1", "list1", 0] + expected_jsonpatch_path = "/table1/list1/1" + expected_jsonpatch_value = "value12" + # NOTE: the target config can contain more diff than the given move. + + # Act + jsonmove = ps.JsonMove(diff, op_type, current_config_tokens, target_config_tokens) + + # Assert + self.verify_jsonmove(self.operation_wrapper.create(op_type, expected_jsonpatch_path, expected_jsonpatch_value), + op_type, + current_config_tokens, + target_config_tokens, + jsonmove) + + def test_ctor__add_op_path_doesnot_exist__value_and_path_of_parent(self): + # Arrange + current_config = { + } + target_config = { + "table1": { + "key11": { + "key111": "value111" + } + } + } + diff = ps.Diff(current_config, target_config) + op_type = OperationType.ADD + current_config_tokens = ["table1", "key11", "key111"] + target_config_tokens = ["table1", "key11", "key111"] + expected_jsonpatch_path = "/table1" + expected_jsonpatch_value = { + "key11": { + "key111": "value111" + } + } + # NOTE: the target config can contain more diff than the given move. + + # Act + jsonmove = ps.JsonMove(diff, op_type, current_config_tokens, target_config_tokens) + + # Assert + self.verify_jsonmove(self.operation_wrapper.create(op_type, expected_jsonpatch_path, expected_jsonpatch_value), + op_type, + current_config_tokens, + target_config_tokens, + jsonmove) + + def test_ctor__add_op_path_doesnot_exist_include_list__value_and_path_of_parent(self): + # Arrange + current_config = { + } + target_config = { + "table1": { + "list1": ["value11", "value12", "value13", "value14"] + } + } + diff = ps.Diff(current_config, target_config) + op_type = OperationType.ADD + current_config_tokens = ["table1", "list1", 0] + target_config_tokens = ["table1", "list1", 1] + expected_jsonpatch_path = "/table1" + expected_jsonpatch_value = { + "list1": ["value12"] + } + # NOTE: the target config can contain more diff than the given move. + + # Act + jsonmove = ps.JsonMove(diff, op_type, current_config_tokens, target_config_tokens) + + # Assert + self.verify_jsonmove(self.operation_wrapper.create(op_type, expected_jsonpatch_path, expected_jsonpatch_value), + op_type, + current_config_tokens, + target_config_tokens, + jsonmove) + + def test_from_patch__more_than_1_op__failure(self): + # Arrange + patch = jsonpatch.JsonPatch([self.any_operation, self.any_operation]) + + # Act and Assert + self.assertRaises(GenericConfigUpdaterError, ps.JsonMove.from_patch, patch) + + def test_from_patch__delete_op__delete_jsonmove(self): + # Arrange + operation = self.operation_wrapper.create(OperationType.REMOVE, self.any_path) + patch = jsonpatch.JsonPatch([operation]) + + # Act + jsonmove = ps.JsonMove.from_patch(patch) + + # Assert + self.verify_jsonmove(operation, + OperationType.REMOVE, + self.any_tokens, + None, + jsonmove) + + def test_from_patch__replace_op__replace_jsonmove(self): + # Arrange + operation = self.operation_wrapper.create(OperationType.REPLACE, self.any_path, self.any_value) + patch = jsonpatch.JsonPatch([operation]) + + # Act + jsonmove = ps.JsonMove.from_patch(patch) + + # Assert + self.verify_jsonmove(operation, + OperationType.REPLACE, + self.any_tokens, + self.any_tokens, + jsonmove) + + def test_from_patch__add_op__add_jsonmove(self): + # Arrange + operation = self.operation_wrapper.create(OperationType.ADD, self.any_path, self.any_value) + patch = jsonpatch.JsonPatch([operation]) + + # Act + jsonmove = ps.JsonMove.from_patch(patch) + + # Assert + self.verify_jsonmove(operation, + OperationType.ADD, + self.any_tokens, + self.any_tokens, + jsonmove) + + def test_from_patch__add_op_with_list_indexes__add_jsonmove(self): + # Arrange + path = "/table1/key11/list1111/3" + value = "value11111" + # From a JsonPatch it is not possible to figure out if the '3' is an item in a list or a dictionary, + # will assume by default a dictionary for simplicity. + tokens = ["table1", "key11", "list1111", "3"] + operation = self.operation_wrapper.create(OperationType.ADD, path, value) + patch = jsonpatch.JsonPatch([operation]) + + # Act + jsonmove = ps.JsonMove.from_patch(patch) + + # Assert + self.verify_jsonmove(operation, + OperationType.ADD, + tokens, + tokens, + jsonmove) + + def test_from_patch__replace_whole_config__whole_config_jsonmove(self): + # Arrange + tokens = [] + path = "" + value = {"table1": {"key1": "value1"} } + operation = self.operation_wrapper.create(OperationType.REPLACE, path, value) + patch = jsonpatch.JsonPatch([operation]) + + # Act + jsonmove = ps.JsonMove.from_patch(patch) + + # Assert + self.verify_jsonmove(operation, + OperationType.REPLACE, + tokens, + tokens, + jsonmove) + + def verify_jsonmove(self, + expected_operation, + expected_op_type, + expected_current_config_tokens, + expected_target_config_tokens, + jsonmove): + expected_patch = jsonpatch.JsonPatch([expected_operation]) + self.assertEqual(expected_patch, jsonmove.patch) + self.assertEqual(expected_op_type, jsonmove.op_type) + self.assertListEqual(expected_current_config_tokens, jsonmove.current_config_tokens) + self.assertEqual(expected_target_config_tokens, jsonmove.target_config_tokens) + +class TestMoveWrapper(unittest.TestCase): + def setUp(self): + self.any_current_config = {} + self.any_target_config = {} + self.any_diff = ps.Diff(self.any_current_config, self.any_target_config) + self.any_move = Mock() + self.any_other_move1 = Mock() + self.any_other_move2 = Mock() + self.any_extended_move = Mock() + self.any_other_extended_move1 = Mock() + self.any_other_extended_move2 = Mock() + + self.single_move_generator = Mock() + self.single_move_generator.generate.side_effect = \ + create_side_effect_dict({(str(self.any_diff),): [self.any_move]}) + + self.another_single_move_generator = Mock() + self.another_single_move_generator.generate.side_effect = \ + create_side_effect_dict({(str(self.any_diff),): [self.any_other_move1]}) + + self.multiple_move_generator = Mock() + self.multiple_move_generator.generate.side_effect = create_side_effect_dict( + {(str(self.any_diff),): [self.any_move, self.any_other_move1, self.any_other_move2]}) + + self.single_move_extender = Mock() + self.single_move_extender.extend.side_effect = create_side_effect_dict( + { + (str(self.any_move), str(self.any_diff)): [self.any_extended_move], + (str(self.any_extended_move), str(self.any_diff)): [], # As first extended move will be extended + (str(self.any_other_extended_move1), str(self.any_diff)): [] # Needed when mixed with other extenders + }) + + self.another_single_move_extender = Mock() + self.another_single_move_extender.extend.side_effect = create_side_effect_dict( + { + (str(self.any_move), str(self.any_diff)): [self.any_other_extended_move1], + (str(self.any_other_extended_move1), str(self.any_diff)): [], # As first extended move will be extended + (str(self.any_extended_move), str(self.any_diff)): [] # Needed when mixed with other extenders + }) + + self.multiple_move_extender = Mock() + self.multiple_move_extender.extend.side_effect = create_side_effect_dict( + { + (str(self.any_move), str(self.any_diff)): \ + [self.any_extended_move, self.any_other_extended_move1, self.any_other_extended_move2], + # All extended moves will be extended + (str(self.any_extended_move), str(self.any_diff)): [], + (str(self.any_other_extended_move1), str(self.any_diff)): [], + (str(self.any_other_extended_move2), str(self.any_diff)): [], + }) + + self.mixed_move_extender = Mock() + self.mixed_move_extender.extend.side_effect = create_side_effect_dict( + { + (str(self.any_move), str(self.any_diff)): [self.any_extended_move], + (str(self.any_other_move1), str(self.any_diff)): [self.any_other_extended_move1], + (str(self.any_extended_move), str(self.any_diff)): \ + [self.any_other_extended_move1, self.any_other_extended_move2], + # All extended moves will be extended + (str(self.any_other_extended_move1), str(self.any_diff)): [], + (str(self.any_other_extended_move2), str(self.any_diff)): [], + }) + + self.fail_move_validator = Mock() + self.fail_move_validator.validate.side_effect = create_side_effect_dict( + {(str(self.any_move), str(self.any_diff)): False}) + + self.success_move_validator = Mock() + self.success_move_validator.validate.side_effect = create_side_effect_dict( + {(str(self.any_move), str(self.any_diff)): True}) + + def test_ctor__assigns_values_correctly(self): + # Arrange + move_generators = Mock() + move_extenders = Mock() + move_validators = Mock() + + # Act + move_wrapper = ps.MoveWrapper(move_generators, move_extenders, move_validators) + + # Assert + self.assertIs(move_generators, move_wrapper.move_generators) + self.assertIs(move_extenders, move_wrapper.move_extenders) + self.assertIs(move_validators, move_wrapper.move_validators) + + def test_generate__single_move_generator__single_move_returned(self): + # Arrange + move_generators = [self.single_move_generator] + move_wrapper = ps.MoveWrapper(move_generators, [], []) + expected = [self.any_move] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__multiple_move_generator__multiple_move_returned(self): + # Arrange + move_generators = [self.multiple_move_generator] + move_wrapper = ps.MoveWrapper(move_generators, [], []) + expected = [self.any_move, self.any_other_move1, self.any_other_move2] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__different_move_generators__different_moves_returned(self): + # Arrange + move_generators = [self.single_move_generator, self.another_single_move_generator] + move_wrapper = ps.MoveWrapper(move_generators, [], []) + expected = [self.any_move, self.any_other_move1] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__duplicate_generated_moves__unique_moves_returned(self): + # Arrange + move_generators = [self.single_move_generator, self.single_move_generator] + move_wrapper = ps.MoveWrapper(move_generators, [], []) + expected = [self.any_move] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__single_move_extender__one_extended_move_returned(self): + # Arrange + move_generators = [self.single_move_generator] + move_extenders = [self.single_move_extender] + move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) + expected = [self.any_move, self.any_extended_move] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__multiple_move_extender__multiple_extended_move_returned(self): + # Arrange + move_generators = [self.single_move_generator] + move_extenders = [self.multiple_move_extender] + move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) + expected = [self.any_move, self.any_extended_move, self.any_other_extended_move1, self.any_other_extended_move2] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__different_move_extenders__different_extended_moves_returned(self): + # Arrange + move_generators = [self.single_move_generator] + move_extenders = [self.single_move_extender, self.another_single_move_extender] + move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) + expected = [self.any_move, self.any_extended_move, self.any_other_extended_move1] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__duplicate_extended_moves__unique_moves_returned(self): + # Arrange + move_generators = [self.single_move_generator] + move_extenders = [self.single_move_extender, self.single_move_extender] + move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) + expected = [self.any_move, self.any_extended_move] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__mixed_extended_moves__unique_moves_returned(self): + # Arrange + move_generators = [self.single_move_generator, self.another_single_move_generator] + move_extenders = [self.mixed_move_extender] + move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) + expected = [self.any_move, + self.any_other_move1, + self.any_extended_move, + self.any_other_extended_move1, + self.any_other_extended_move2] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_validate__validation_fail__false_returned(self): + # Arrange + move_validators = [self.fail_move_validator] + move_wrapper = ps.MoveWrapper([], [], move_validators) + + # Act and assert + self.assertFalse(move_wrapper.validate(self.any_move, self.any_diff)) + + def test_validate__validation_succeed__true_returned(self): + # Arrange + move_validators = [self.success_move_validator] + move_wrapper = ps.MoveWrapper([], [], move_validators) + + # Act and assert + self.assertTrue(move_wrapper.validate(self.any_move, self.any_diff)) + + def test_validate__multiple_validators_last_fail___false_returned(self): + # Arrange + move_validators = [self.success_move_validator, self.success_move_validator, self.fail_move_validator] + move_wrapper = ps.MoveWrapper([], [], move_validators) + + # Act and assert + self.assertFalse(move_wrapper.validate(self.any_move, self.any_diff)) + + def test_validate__multiple_validators_succeed___true_returned(self): + # Arrange + move_validators = [self.success_move_validator, self.success_move_validator, self.success_move_validator] + move_wrapper = ps.MoveWrapper([], [], move_validators) + + # Act and assert + self.assertTrue(move_wrapper.validate(self.any_move, self.any_diff)) + + def test_simulate__applies_move(self): + # Arrange + diff = Mock() + diff.apply_move.side_effect = create_side_effect_dict({(str(self.any_move), ): self.any_diff}) + move_wrapper = ps.MoveWrapper(None, None, None) + + # Act + actual = move_wrapper.simulate(self.any_move, diff) + + # Assert + self.assertIs(self.any_diff, actual) + +class TestDeleteWholeConfigMoveValidator(unittest.TestCase): + def setUp(self): + self.operation_wrapper = OperationWrapper() + self.validator = ps.DeleteWholeConfigMoveValidator() + self.any_diff = Mock() + self.any_non_whole_config_path = "/table1" + self.whole_config_path = "" + + def test_validate__non_remove_op_non_whole_config__success(self): + self.verify(OperationType.REPLACE, self.any_non_whole_config_path, True) + self.verify(OperationType.ADD, self.any_non_whole_config_path, True) + + def test_validate__remove_op_non_whole_config__success(self): + self.verify(OperationType.REMOVE, self.any_non_whole_config_path, True) + + def test_validate__non_remove_op_whole_config__success(self): + self.verify(OperationType.REPLACE, self.whole_config_path, True) + self.verify(OperationType.ADD, self.whole_config_path, True) + + def test_validate__remove_op_whole_config__failure(self): + self.verify(OperationType.REMOVE, self.whole_config_path, False) + + def verify(self, operation_type, path, expected): + # Arrange + value = None + if operation_type in [OperationType.ADD, OperationType.REPLACE]: + value = Mock() + + operation = self.operation_wrapper.create(operation_type, path, value) + move = ps.JsonMove.from_operation(operation) + + # Act + actual = self.validator.validate(move, self.any_diff) + + # Assert + self.assertEqual(expected, actual) + +class TestUniqueLanesMoveValidator(unittest.TestCase): + def setUp(self): + self.validator = ps.UniqueLanesMoveValidator() + + def test_validate__no_port_table__success(self): + config = {"ACL_TABLE": {}} + self.validate_target_config(config) + + def test_validate__empty_port_table__success(self): + config = {"PORT": {}} + self.validate_target_config(config) + + def test_validate__single_lane__success(self): + config = {"PORT": {"Ethernet0": {"lanes": "66", "speed":"10000"}}} + self.validate_target_config(config) + + def test_validate__different_lanes_single_port___success(self): + config = {"PORT": {"Ethernet0": {"lanes": "66, 67, 68", "speed":"10000"}}} + self.validate_target_config(config) + + def test_validate__different_lanes_multi_ports___success(self): + config = {"PORT": { + "Ethernet0": {"lanes": "64, 65", "speed":"10000"}, + "Ethernet1": {"lanes": "66, 67, 68", "speed":"10000"}, + }} + self.validate_target_config(config) + + def test_validate__same_lanes_single_port___success(self): + config = {"PORT": {"Ethernet0": {"lanes": "65, 65", "speed":"10000"}}} + self.validate_target_config(config, False) + + def validate_target_config(self, target_config, expected=True): + # Arrange + current_config = {} + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, OperationType.REPLACE, [], []) + + # Act + actual = self.validator.validate(move, diff) + + # Assert + self.assertEqual(expected, actual) + +class TestFullConfigMoveValidator(unittest.TestCase): + def setUp(self): + self.any_current_config = Mock() + self.any_target_config = Mock() + self.any_simulated_config = Mock() + self.any_diff = ps.Diff(self.any_current_config, self.any_target_config) + self.any_move = Mock() + self.any_move.apply.side_effect = \ + create_side_effect_dict({(str(self.any_current_config),): self.any_simulated_config}) + + def test_validate__invalid_config_db_after_applying_move__failure(self): + # Arrange + config_wrapper = Mock() + config_wrapper.validate_config_db_config.side_effect = \ + create_side_effect_dict({(str(self.any_simulated_config),): False}) + validator = ps.FullConfigMoveValidator(config_wrapper) + + # Act and assert + self.assertFalse(validator.validate(self.any_move, self.any_diff)) + + def test_validate__valid_config_db_after_applying_move__success(self): + # Arrange + config_wrapper = Mock() + config_wrapper.validate_config_db_config.side_effect = \ + create_side_effect_dict({(str(self.any_simulated_config),): True}) + validator = ps.FullConfigMoveValidator(config_wrapper) + + # Act and assert + self.assertTrue(validator.validate(self.any_move, self.any_diff)) + +class TestCreateOnlyMoveValidator(unittest.TestCase): + def setUp(self): + self.validator = ps.CreateOnlyMoveValidator(ps.PathAddressing()) + self.any_diff = ps.Diff({}, {}) + + def test_validate__non_replace_operation__success(self): + # Assert + self.assertTrue(self.validator.validate( \ + ps.JsonMove(self.any_diff, OperationType.ADD, [], []), self.any_diff)) + self.assertTrue(self.validator.validate( \ + ps.JsonMove(self.any_diff, OperationType.REMOVE, [], []), self.any_diff)) + + def test_validate__no_create_only_field__success(self): + current_config = {"PORT": {}} + target_config = {"PORT": {}, "ACL_TABLE": {}} + self.verify_diff(current_config, target_config) + + def test_validate__same_create_only_field__success(self): + current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} + target_config = {"PORT": {"Ethernet0":{"lanes":"65"}}, "ACL_TABLE": {}} + self.verify_diff(current_config, target_config) + + def test_validate__different_create_only_field__failure(self): + current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} + target_config = {"PORT": {"Ethernet0":{"lanes":"66"}}, "ACL_TABLE": {}} + self.verify_diff(current_config, target_config, expected=False) + + def test_validate__different_create_only_field_directly_updated__failure(self): + current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} + target_config = {"PORT": {"Ethernet0":{"lanes":"66"}}, "ACL_TABLE": {}} + self.verify_diff(current_config, + target_config, + ["PORT", "Ethernet0", "lanes"], + ["PORT", "Ethernet0", "lanes"], + False) + + def test_validate__different_create_only_field_updating_parent__failure(self): + current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} + target_config = {"PORT": {"Ethernet0":{"lanes":"66"}}, "ACL_TABLE": {}} + self.verify_diff(current_config, + target_config, + ["PORT", "Ethernet0"], + ["PORT", "Ethernet0"], + False) + + def test_validate__different_create_only_field_updating_grandparent__failure(self): + current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} + target_config = {"PORT": {"Ethernet0":{"lanes":"66"}}, "ACL_TABLE": {}} + self.verify_diff(current_config, + target_config, + ["PORT"], + ["PORT"], + False) + + def test_validate__same_create_only_field_directly_updated__failure(self): + current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} + target_config = {"PORT": {"Ethernet0":{"lanes":"65"}}, "ACL_TABLE": {}} + self.verify_diff(current_config, + target_config, + ["PORT", "Ethernet0", "lanes"], + ["PORT", "Ethernet0", "lanes"]) + + def test_validate__same_create_only_field_updating_parent__failure(self): + current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} + target_config = {"PORT": {"Ethernet0":{"lanes":"65"}}, "ACL_TABLE": {}} + self.verify_diff(current_config, + target_config, + ["PORT", "Ethernet0"], + ["PORT", "Ethernet0"]) + + def test_validate__same_create_only_field_updating_grandparent__failure(self): + current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} + target_config = {"PORT": {"Ethernet0":{"lanes":"65"}}, "ACL_TABLE": {}} + self.verify_diff(current_config, + target_config, + ["PORT"], + ["PORT"]) + + def verify_diff(self, current_config, target_config, current_config_tokens=None, target_config_tokens=None, expected=True): + # Arrange + current_config_tokens = current_config_tokens if current_config_tokens else [] + target_config_tokens = target_config_tokens if target_config_tokens else [] + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, OperationType.REPLACE, current_config_tokens, target_config_tokens) + + # Act + actual = self.validator.validate(move, diff) + + # Assert + self.assertEqual(expected, actual) + +class TestNoDependencyMoveValidator(unittest.TestCase): + def setUp(self): + path_addressing = ps.PathAddressing() + config_wrapper = ConfigWrapper() + self.validator = ps.NoDependencyMoveValidator(path_addressing, config_wrapper) + + def test_validate__add_full_config_has_dependencies__failure(self): + # Arrange + # CROPPED_CONFIG_DB_AS_JSON has dependencies between PORT and ACL_TABLE + diff = ps.Diff(Files.EMPTY_CONFIG_DB, Files.CROPPED_CONFIG_DB_AS_JSON) + move = ps.JsonMove(diff, OperationType.ADD, [], []) + + # Act and assert + self.assertFalse(self.validator.validate(move, diff)) + + def test_validate__add_full_config_no_dependencies__success(self): + # Arrange + diff = ps.Diff(Files.EMPTY_CONFIG_DB, Files.CONFIG_DB_NO_DEPENDENCIES) + move = ps.JsonMove(diff, OperationType.ADD, [], []) + + # Act and assert + self.assertTrue(self.validator.validate(move, diff)) + + def test_validate__add_table_has_no_dependencies__success(self): + # Arrange + target_config = Files.CROPPED_CONFIG_DB_AS_JSON + # prepare current config by removing ACL_TABLE from current config + current_config = self.prepare_config(target_config, jsonpatch.JsonPatch([ + {"op": "remove", "path":"/ACL_TABLE"} + ])) + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, OperationType.ADD, ["ACL_TABLE"], ["ACL_TABLE"]) + + # Act and assert + self.assertTrue(self.validator.validate(move, diff)) + + def test_validate__remove_full_config_has_dependencies__failure(self): + # Arrange + # CROPPED_CONFIG_DB_AS_JSON has dependencies between PORT and ACL_TABLE + diff = ps.Diff(Files.CROPPED_CONFIG_DB_AS_JSON, Files.EMPTY_CONFIG_DB) + move = ps.JsonMove(diff, OperationType.REMOVE, [], []) + + # Act and assert + self.assertFalse(self.validator.validate(move, diff)) + + def test_validate__remove_full_config_no_dependencies__success(self): + # Arrange + diff = ps.Diff(Files.EMPTY_CONFIG_DB, Files.CONFIG_DB_NO_DEPENDENCIES) + move = ps.JsonMove(diff, OperationType.REMOVE, [], []) + + # Act and assert + self.assertTrue(self.validator.validate(move, diff)) + + def test_validate__remove_table_has_no_dependencies__success(self): + # Arrange + current_config = Files.CROPPED_CONFIG_DB_AS_JSON + target_config = self.prepare_config(current_config, jsonpatch.JsonPatch([ + {"op": "remove", "path":"/ACL_TABLE"} + ])) + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, OperationType.REMOVE, ["ACL_TABLE"]) + + # Act and assert + self.assertTrue(self.validator.validate(move, diff)) + + def test_validate__replace_whole_config_item_added_ref_added__failure(self): + # Arrange + target_config = Files.SIMPLE_CONFIG_DB_INC_DEPS + # prepare current config by removing an item and its ref from target config + current_config = self.prepare_config(target_config, jsonpatch.JsonPatch([ + {"op": "replace", "path":"/ACL_TABLE/EVERFLOW/ports/0", "value":""}, + {"op": "remove", "path":"/PORT/Ethernet0"} + ])) + + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, OperationType.REPLACE, [], []) + + # Act and assert + self.assertFalse(self.validator.validate(move, diff)) + + def test_validate__replace_whole_config_item_removed_ref_removed__false(self): + # Arrange + current_config = Files.SIMPLE_CONFIG_DB_INC_DEPS + # prepare target config by removing an item and its ref from current config + target_config = self.prepare_config(current_config, jsonpatch.JsonPatch([ + {"op": "replace", "path":"/ACL_TABLE/EVERFLOW/ports/0", "value":""}, + {"op": "remove", "path":"/PORT/Ethernet0"} + ])) + + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, OperationType.REPLACE, [], []) + + # Act and assert + self.assertFalse(self.validator.validate(move, diff)) + + def test_validate__replace_whole_config_item_same_ref_added__true(self): + # Arrange + target_config = Files.SIMPLE_CONFIG_DB_INC_DEPS + # prepare current config by removing ref from target config + current_config = self.prepare_config(target_config, jsonpatch.JsonPatch([ + {"op": "replace", "path":"/ACL_TABLE/EVERFLOW/ports/0", "value":""} + ])) + + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, OperationType.REPLACE, [], []) + + # Act and assert + self.assertTrue(self.validator.validate(move, diff)) + + def test_validate__replace_whole_config_item_same_ref_removed__true(self): + # Arrange + current_config= Files.SIMPLE_CONFIG_DB_INC_DEPS + # prepare target config by removing ref from current config + target_config = self.prepare_config(current_config, jsonpatch.JsonPatch([ + {"op": "replace", "path":"/ACL_TABLE/EVERFLOW/ports/0", "value":""} + ])) + + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, OperationType.REPLACE, [], []) + + # Act and assert + self.assertTrue(self.validator.validate(move, diff)) + + def test_validate__replace_whole_config_item_same_ref_same__true(self): + # Arrange + current_config= Files.SIMPLE_CONFIG_DB_INC_DEPS + # prepare target config by removing ref from current config + target_config = current_config + + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, OperationType.REPLACE, [], []) + + # Act and assert + self.assertTrue(self.validator.validate(move, diff)) + + def prepare_config(self, config, patch): + return patch.apply(config) + +class TestLowLevelMoveGenerator(unittest.TestCase): + def setUp(self): + path_addressing = PathAddressing() + self.generator = ps.LowLevelMoveGenerator(path_addressing) + + def test_generate__no_diff__no_moves(self): + self.verify() + + def test_generate__replace_key__replace_move(self): + self.verify(tc_ops=[{"op": "replace", 'path': '/PORT/Ethernet0/description', 'value':'any-desc'}]) + + def test_generate__leaf_key_missing__add_move(self): + self.verify( + cc_ops=[{"op": "remove", 'path': '/ACL_TABLE/EVERFLOW/policy_desc'}], + ex_ops=[{"op": "add", 'path': '/ACL_TABLE/EVERFLOW/policy_desc', 'value':'EVERFLOW'}] + ) + + def test_generate__leaf_key_additional__remove_move(self): + self.verify( + tc_ops=[{"op": "remove", 'path': '/ACL_TABLE/EVERFLOW/policy_desc'}] + ) + + def test_generate__table_missing__add_leafs_moves(self): + self.verify( + cc_ops=[{"op": "remove", 'path': '/VLAN'}], + ex_ops=[{'op': 'add', 'path': '/VLAN', 'value': {'Vlan1000': {'vlanid': '1000'}}}, + {'op': 'add', 'path': '/VLAN', 'value': {'Vlan1000': {'dhcp_servers': ['192.0.0.1']}}}, + {'op': 'add', 'path': '/VLAN', 'value': {'Vlan1000': {'dhcp_servers': ['192.0.0.2']}}}, + {'op': 'add', 'path': '/VLAN', 'value': {'Vlan1000': {'dhcp_servers': ['192.0.0.3']}}}, + {'op': 'add', 'path': '/VLAN', 'value': {'Vlan1000': {'dhcp_servers': ['192.0.0.4']}}}] + ) + + def test_generate__table_additional__remove_leafs_moves(self): + self.verify( + tc_ops=[{"op": "remove", 'path': '/VLAN'}], + ex_ops=[{'op': 'remove', 'path': '/VLAN/Vlan1000/vlanid'}, + {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, + {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/1'}, + {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/2'}, + {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/3'}] + ) + + def test_generate__leaf_table_missing__add_table(self): + self.verify( + tc_ops=[{"op": "add", 'path': '/NEW_TABLE', 'value':{}}] + ) + + def test_generate__leaf_table_additional__remove_table(self): + self.verify( + cc_ops=[{"op": "add", 'path': '/NEW_TABLE', 'value':{}}], + ex_ops=[{"op": "remove", 'path': '/NEW_TABLE'}] + ) + + def test_generate__replace_list_item__remove_add_replace_moves(self): + self.verify( + tc_ops=[{"op": "replace", 'path': '/ACL_TABLE/EVERFLOW/ports/0', 'value':'Ethernet0'}], + ex_ops=[ + {"op": "remove", 'path': '/ACL_TABLE/EVERFLOW/ports/0'}, + {"op": "add", 'path': '/ACL_TABLE/EVERFLOW/ports/0', 'value':'Ethernet0'}, + {"op": "replace", 'path': '/ACL_TABLE/EVERFLOW/ports/0', 'value':'Ethernet0'}, + ]) + + def test_generate__remove_list_item__remove_move(self): + self.verify( + tc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}]) + + def test_generate__remove_multiple_list_items__multiple_remove_moves(self): + self.verify( + tc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, + {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}], + ex_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, + {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/1'}] + ) + + def test_generate__remove_all_list_items__multiple_remove_moves(self): + self.verify( + tc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[]}], + ex_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, + {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/2'}, + {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/3'}, + {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/1'}] + ) + + def test_generate__add_list_items__add_move(self): + self.verify( + tc_ops=[{"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.5'}] + ) + + def test_generate__add_multiple_list_items__multiple_add_moves(self): + self.verify( + tc_ops=[{"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.5'}, + {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/3', 'value':'192.168.1.6'}] + ) + + def test_generate__add_all_list_items__multiple_add_moves(self): + self.verify( + cc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[]}], + ex_ops=[{"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.0.0.1'}, + {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.0.0.2'}, + {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.0.0.3'}, + {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.0.0.4'}] + ) + + def test_generate__replace_multiple_list_items__multiple_remove_add_replace_moves(self): + self.verify( + tc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.5'}, + {"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/3', 'value':'192.168.1.6'}], + ex_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, + {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/3'}, + {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.5'}, + {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/3', 'value':'192.168.1.6'}, + {"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.5'}, + {"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/3', 'value':'192.168.1.6'}, + {"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/3', 'value':'192.168.1.5'}, + {"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.6'}] + ) + + def test_generate__different_order_list_items__whole_list_replace_move(self): + self.verify( + tc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[ + "192.0.0.4", + "192.0.0.3", + "192.0.0.2", + "192.0.0.1" + ]}]) + + def test_generate__whole_list_missing__add_items_moves(self): + self.verify( + cc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers'}], + ex_ops=[{'op': 'add', 'path': '/VLAN/Vlan1000/dhcp_servers', 'value': ['192.0.0.1']}, + {'op': 'add', 'path': '/VLAN/Vlan1000/dhcp_servers', 'value': ['192.0.0.2']}, + {'op': 'add', 'path': '/VLAN/Vlan1000/dhcp_servers', 'value': ['192.0.0.3']}, + {'op': 'add', 'path': '/VLAN/Vlan1000/dhcp_servers', 'value': ['192.0.0.4']}]) + + def test_generate__whole_list_additional__remove_items_moves(self): + self.verify( + tc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers'}], + ex_ops=[{'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, + {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/1'}, + {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/2'}, + {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/3'}]) + + def test_generate__empty_list_missing__add_whole_list(self): + self.verify( + tc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[]}], + cc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers'}], + ex_ops=[{'op': 'add', 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[]}]) + + def test_generate__empty_list_additional__remove_whole_list(self): + self.verify( + tc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers'}], + cc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[]}], + ex_ops=[{'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers'}]) + + def test_generate__dpb_1_to_4_example(self): + # Arrange + diff = ps.Diff(Files.DPB_1_SPLIT_FULL_CONFIG, Files.DPB_4_SPLITS_FULL_CONFIG) + + # Act + moves = list(self.generator.generate(diff)) + + # Assert + self.verify_moves([{'op': 'replace', 'path': '/PORT/Ethernet0/alias', 'value': 'Eth1/1'}, + {'op': 'replace', 'path': '/PORT/Ethernet0/lanes', 'value': '65'}, + {'op': 'replace', 'path': '/PORT/Ethernet0/description', 'value': ''}, + {'op': 'replace', 'path': '/PORT/Ethernet0/speed', 'value': '10000'}, + {'op': 'add', 'path': '/PORT/Ethernet1', 'value': {'alias': 'Eth1/2'}}, + {'op': 'add', 'path': '/PORT/Ethernet1', 'value': {'lanes': '66'}}, + {'op': 'add', 'path': '/PORT/Ethernet1', 'value': {'description': ''}}, + {'op': 'add', 'path': '/PORT/Ethernet1', 'value': {'speed': '10000'}}, + {'op': 'add', 'path': '/PORT/Ethernet2', 'value': {'alias': 'Eth1/3'}}, + {'op': 'add', 'path': '/PORT/Ethernet2', 'value': {'lanes': '67'}}, + {'op': 'add', 'path': '/PORT/Ethernet2', 'value': {'description': ''}}, + {'op': 'add', 'path': '/PORT/Ethernet2', 'value': {'speed': '10000'}}, + {'op': 'add', 'path': '/PORT/Ethernet3', 'value': {'alias': 'Eth1/4'}}, + {'op': 'add', 'path': '/PORT/Ethernet3', 'value': {'lanes': '68'}}, + {'op': 'add', 'path': '/PORT/Ethernet3', 'value': {'description': ''}}, + {'op': 'add', 'path': '/PORT/Ethernet3', 'value': {'speed': '10000'}}, + {'op': 'add', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/1', 'value': 'Ethernet1'}, + {'op': 'add', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/1', 'value': 'Ethernet2'}, + {'op': 'add', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/1', 'value': 'Ethernet3'}, + {'op': 'add', 'path': '/VLAN_MEMBER/Vlan100|Ethernet1', 'value': {'tagging_mode': 'untagged'}}, + {'op': 'add', 'path': '/VLAN_MEMBER/Vlan100|Ethernet2', 'value': {'tagging_mode': 'untagged'}}, + {'op': 'add', 'path': '/VLAN_MEMBER/Vlan100|Ethernet3', 'value': {'tagging_mode': 'untagged'}}], + moves) + + def test_generate__dpb_4_to_1_example(self): + # Arrange + diff = ps.Diff(Files.DPB_4_SPLITs_FULL_CONFIG, Files.DPB_1_SPLIT_FULL_CONFIG) + + # Act + moves = list(self.generator.generate(diff)) + + # Assert + self.verify_moves([{'op': 'replace', 'path': '/PORT/Ethernet0/alias', 'value': 'Eth1'}, + {'op': 'replace', 'path': '/PORT/Ethernet0/lanes', 'value': '65, 66, 67, 68'}, + {'op': 'replace', 'path': '/PORT/Ethernet0/description', 'value': 'Ethernet0 100G link'}, + {'op': 'replace', 'path': '/PORT/Ethernet0/speed', 'value': '100000'}, + {'op': 'remove', 'path': '/PORT/Ethernet1/alias'}, + {'op': 'remove', 'path': '/PORT/Ethernet1/lanes'}, + {'op': 'remove', 'path': '/PORT/Ethernet1/description'}, + {'op': 'remove', 'path': '/PORT/Ethernet1/speed'}, + {'op': 'remove', 'path': '/PORT/Ethernet2/alias'}, + {'op': 'remove', 'path': '/PORT/Ethernet2/lanes'}, + {'op': 'remove', 'path': '/PORT/Ethernet2/description'}, + {'op': 'remove', 'path': '/PORT/Ethernet2/speed'}, + {'op': 'remove', 'path': '/PORT/Ethernet3/alias'}, + {'op': 'remove', 'path': '/PORT/Ethernet3/lanes'}, + {'op': 'remove', 'path': '/PORT/Ethernet3/description'}, + {'op': 'remove', 'path': '/PORT/Ethernet3/speed'}, + {'op': 'remove', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/1'}, + {'op': 'remove', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/2'}, + {'op': 'remove', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/3'}, + {'op': 'remove', 'path': '/VLAN_MEMBER/Vlan100|Ethernet1/tagging_mode'}, + {'op': 'remove', 'path': '/VLAN_MEMBER/Vlan100|Ethernet2/tagging_mode'}, + {'op': 'remove', 'path': '/VLAN_MEMBER/Vlan100|Ethernet3/tagging_mode'}], + moves) + + def verify(self, tc_ops=None, cc_ops=None, ex_ops=None): + """ + Generate a diff where target config is modified using the given tc_ops. + The expected low level moves should ex_ops if it is not None, otherwise tc_ops + """ + # Arrange + diff = self.get_diff(target_config_ops=tc_ops, current_config_ops=cc_ops) + expected = ex_ops if ex_ops is not None else \ + tc_ops if tc_ops is not None else \ + [] + + # Act + actual = self.generator.generate(diff) + + # Assert + self.verify_moves(expected, actual) + + def verify_moves(self, ops, moves): + moves_ops = [list(move.patch)[0] for move in moves] + self.assertCountEqual(ops, moves_ops) + + def get_diff(self, target_config_ops = None, current_config_ops = None): + current_config = Files.CROPPED_CONFIG_DB_AS_JSON + if current_config_ops: + cc_patch = jsonpatch.JsonPatch(current_config_ops) + current_config = cc_patch.apply(current_config) + + target_config = Files.CROPPED_CONFIG_DB_AS_JSON + if target_config_ops: + tc_patch = jsonpatch.JsonPatch(target_config_ops) + target_config = tc_patch.apply(target_config) + + return ps.Diff(current_config, target_config) + +class TestUpperLevelMoveExtender(unittest.TestCase): + def setUp(self): + self.extender = ps.UpperLevelMoveExtender() + self.any_diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_CONFIG_DB) + + def test_extend__root_level_move__no_extended_moves(self): + self.verify(OperationType.REMOVE, []) + self.verify(OperationType.ADD, [], []) + self.verify(OperationType.REPLACE, [], []) + + def test_extend__remove_key_upper_level_does_not_exist__remove_upper_level(self): + self.verify(OperationType.REMOVE, + ["ACL_TABLE", "EVERFLOW", "policy_desc"], + tc_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW'}], + ex_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW'}]) + + def test_extend__remove_key_upper_level_does_exist__replace_upper_level(self): + self.verify(OperationType.REMOVE, + ["ACL_TABLE", "EVERFLOW", "policy_desc"], + tc_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW/policy_desc'}], + ex_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW', 'value':{ + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }}]) + + def test_extend__remove_list_item_upper_level_does_not_exist__remove_upper_level(self): + self.verify(OperationType.REMOVE, + ["VLAN", "Vlan1000", "dhcp_servers", 1], + tc_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers'}], + ex_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers'}]) + + def test_extend__remove_list_item_upper_level_does_exist__replace_upper_level(self): + self.verify(OperationType.REMOVE, + ["VLAN", "Vlan1000", "dhcp_servers", 1], + tc_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers/1'}], + ex_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers', 'value':[ + "192.0.0.1", + "192.0.0.3", + "192.0.0.4" + ]}]) + + def test_extend__add_key_upper_level_missing__add_upper_level(self): + self.verify(OperationType.ADD, + ["ACL_TABLE", "EVERFLOW", "policy_desc"], + ["ACL_TABLE", "EVERFLOW", "policy_desc"], + cc_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW'}], + ex_ops=[{'op':'add', 'path':'/ACL_TABLE/EVERFLOW', 'value':{ + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }}]) + + def test_extend__add_key_upper_level_exist__replace_upper_level(self): + self.verify(OperationType.ADD, + ["ACL_TABLE", "EVERFLOW", "policy_desc"], + ["ACL_TABLE", "EVERFLOW", "policy_desc"], + cc_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW/policy_desc'}], + ex_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW', 'value':{ + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }}]) + + def test_extend__add_list_item_upper_level_missing__add_upper_level(self): + self.verify(OperationType.ADD, + ["VLAN", "Vlan1000", "dhcp_servers", 1], + ["VLAN", "Vlan1000", "dhcp_servers", 1], + cc_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers'}], + ex_ops=[{'op':'add', 'path':'/VLAN/Vlan1000/dhcp_servers', 'value':[ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ]}]) + + def test_extend__add_list_item_upper_level_exist__replace_upper_level(self): + self.verify(OperationType.ADD, + ["VLAN", "Vlan1000", "dhcp_servers", 1], + ["VLAN", "Vlan1000", "dhcp_servers", 1], + cc_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers/1'}], + ex_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers', 'value':[ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ]}]) + + def test_extend__add_table__replace_whole_config(self): + self.verify(OperationType.ADD, + ["ACL_TABLE"], + ["ACL_TABLE"], + cc_ops=[{'op':'remove', 'path':'/ACL_TABLE'}], + ex_ops=[{'op':'replace', 'path':'', 'value':Files.CROPPED_CONFIG_DB_AS_JSON}]) + + def test_extend__replace_key__replace_upper_level(self): + self.verify(OperationType.REPLACE, + ["ACL_TABLE", "EVERFLOW", "policy_desc"], + ["ACL_TABLE", "EVERFLOW", "policy_desc"], + cc_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW/policy_desc', 'value':'old_desc'}], + ex_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW', 'value':{ + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }}]) + + def test_extend__replace_list_item__replace_upper_level(self): + self.verify(OperationType.REPLACE, + ["VLAN", "Vlan1000", "dhcp_servers", 1], + ["VLAN", "Vlan1000", "dhcp_servers", 1], + cc_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers/1', 'value':'192.0.0.7'}], + ex_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers', 'value':[ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ]}]) + + def test_extend__replace_table__replace_whole_config(self): + self.verify(OperationType.REPLACE, + ["VLAN"], + ["VLAN"], + cc_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers/1', 'value':'192.0.0.7'}], + ex_ops=[{'op':'replace', 'path':'', 'value':Files.CROPPED_CONFIG_DB_AS_JSON}]) + + def verify(self, op_type, ctokens, ttokens=None, cc_ops=[], tc_ops=[], ex_ops=[]): + """ + cc_ops, tc_ops are used to build the diff object. + diff, op_type, ctokens, ttokens are used to build the move. + move is extended and the result should match ex_ops. + """ + # Arrange + current_config=jsonpatch.JsonPatch(cc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) + target_config=jsonpatch.JsonPatch(tc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, op_type, ctokens, ttokens) + + # Act + moves = self.extender.extend(move, diff) + + # Assert + self.verify_moves(ex_ops, moves) + + def verify_moves(self, ex_ops, moves): + moves_ops = [list(move.patch)[0] for move in moves] + self.assertCountEqual(ex_ops, moves_ops) + +class TestDeleteInsteadOfReplaceMoveExtender(unittest.TestCase): + def setUp(self): + self.extender = ps.DeleteInsteadOfReplaceMoveExtender() + + def test_extend__non_replace__no_extended_moves(self): + self.verify(OperationType.REMOVE, + ["ACL_TABLE"], + tc_ops=[{'op':'remove', 'path':'/ACL_TABLE'}], + ex_ops=[]) + self.verify(OperationType.ADD, + ["ACL_TABLE"], + ["ACL_TABLE"], + cc_ops=[{'op':'remove', 'path':'/ACL_TABLE'}], + ex_ops=[]) + + def test_extend__replace_key__delete_key(self): + self.verify(OperationType.REPLACE, + ["ACL_TABLE", "EVERFLOW", "policy_desc"], + ["ACL_TABLE", "EVERFLOW", "policy_desc"], + cc_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW/policy_desc', 'value':'old_desc'}], + ex_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW/policy_desc'}]) + + def test_extend__replace_list_item__delete_list_item(self): + self.verify(OperationType.REPLACE, + ["VLAN", "Vlan1000", "dhcp_servers", 1], + ["VLAN", "Vlan1000", "dhcp_servers", 1], + cc_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers/1', 'value':'192.0.0.7'}], + ex_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers/1'}]) + + def test_extend__replace_table__delete_table(self): + self.verify(OperationType.REPLACE, + ["ACL_TABLE"], + ["ACL_TABLE"], + cc_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW/policy_desc', 'value':'old_desc'}], + ex_ops=[{'op':'remove', 'path':'/ACL_TABLE'}]) + + def test_extend__replace_whole_config__delete_whole_config(self): + self.verify(OperationType.REPLACE, + [], + [], + cc_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW/policy_desc', 'value':'old_desc'}], + ex_ops=[{'op':'remove', 'path':''}]) + + def verify(self, op_type, ctokens, ttokens=None, cc_ops=[], tc_ops=[], ex_ops=[]): + """ + cc_ops, tc_ops are used to build the diff object. + diff, op_type, ctokens, ttokens are used to build the move. + move is extended and the result should match ex_ops. + """ + # Arrange + current_config=jsonpatch.JsonPatch(cc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) + target_config=jsonpatch.JsonPatch(tc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, op_type, ctokens, ttokens) + + # Act + moves = self.extender.extend(move, diff) + + # Assert + self.verify_moves(ex_ops, moves) + + def verify_moves(self, ex_ops, moves): + moves_ops = [list(move.patch)[0] for move in moves] + self.assertCountEqual(ex_ops, moves_ops) + +class DeleteRefsMoveExtender(unittest.TestCase): + def setUp(self): + self.extender = ps.DeleteRefsMoveExtender(PathAddressing()) + + def test_extend__non_delete_ops__no_extended_moves(self): + self.verify(OperationType.ADD, + ["ACL_TABLE"], + ["ACL_TABLE"], + cc_ops=[{'op':'remove', 'path':'/ACL_TABLE'}], + ex_ops=[]) + self.verify(OperationType.REPLACE, + ["ACL_TABLE"], + ["ACL_TABLE"], + cc_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW'}], + ex_ops=[]) + + def test_extend__path_with_no_refs__no_extended_moves(self): + self.verify(OperationType.REMOVE, + ["ACL_TABLE"], + tc_ops=[{'op':'remove', 'path':'/ACL_TABLE'}], + ex_ops=[]) + + def test_extend__path_with_direct_refs__extended_moves(self): + self.verify(OperationType.REMOVE, + ["PORT", "Ethernet0"], + tc_ops=[{'op':'remove', 'path':'/PORT/Ethernet0'}], + ex_ops=[{'op': 'remove', 'path': '/VLAN_MEMBER/Vlan1000|Ethernet0'}, + {'op': 'remove', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/0'}]) + + def test_extend__path_with_refs_to_children__extended_moves(self): + self.verify(OperationType.REMOVE, + ["PORT"], + tc_ops=[{'op':'remove', 'path':'/PORT/Ethernet0'}], + ex_ops=[{'op': 'remove', 'path': '/VLAN_MEMBER/Vlan1000|Ethernet0'}, + {'op': 'remove', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/0'}, + {'op': 'remove', 'path': '/VLAN_MEMBER/Vlan1000|Ethernet4'}, + {'op': 'remove', 'path': '/ACL_TABLE/DATAACL/ports/0'}, + {'op': 'remove', 'path': '/VLAN_MEMBER/Vlan1000|Ethernet8'}, + {'op': 'remove', 'path': '/ACL_TABLE/EVERFLOWV6/ports/0'}, + {'op': 'remove', 'path': '/ACL_TABLE/EVERFLOW/ports/0'}, + {'op': 'remove', 'path': '/ACL_TABLE/EVERFLOWV6/ports/1'}]) + + def verify(self, op_type, ctokens, ttokens=None, cc_ops=[], tc_ops=[], ex_ops=[]): + """ + cc_ops, tc_ops are used to build the diff object. + diff, op_type, ctokens, ttokens are used to build the move. + move is extended and the result should match ex_ops. + """ + # Arrange + current_config=jsonpatch.JsonPatch(cc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) + target_config=jsonpatch.JsonPatch(tc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, op_type, ctokens, ttokens) + + # Act + moves = self.extender.extend(move, diff) + + # Assert + self.verify_moves(ex_ops, moves) + + def verify_moves(self, ex_ops, moves): + moves_ops = [list(move.patch)[0] for move in moves] + self.assertCountEqual(ex_ops, moves_ops) + +class TestSortAlgorithmFactory(unittest.TestCase): + def test_dfs_sorter(self): + self.verify(ps.Algorithm.DFS, ps.DfsSorter) + + def test_bfs_sorter(self): + self.verify(ps.Algorithm.BFS, ps.BfsSorter) + + def test_memoization_sorter(self): + self.verify(ps.Algorithm.MEMOIZATION, ps.MemoizationSorter) + + def verify(self, algo, algo_class): + # Arrange + factory = ps.SortAlgorithmFactory(OperationWrapper(), ConfigWrapper(), PathAddressing()) + expected_generators = [ps.LowLevelMoveGenerator] + expected_extenders = [ps.UpperLevelMoveExtender, ps.DeleteInsteadOfReplaceMoveExtender, ps.DeleteRefsMoveExtender] + expected_validator = [ps.DeleteWholeConfigMoveValidator, + ps.FullConfigMoveValidator, + ps.NoDependencyMoveValidator, + ps.UniqueLanesMoveValidator, + ps.CreateOnlyMoveValidator] + + # Act + sorter = factory.create(algo) + actual_generators = [type(item) for item in sorter.move_wrapper.move_generators] + actual_extenders = [type(item) for item in sorter.move_wrapper.move_extenders] + actual_validators = [type(item) for item in sorter.move_wrapper.move_validators] + + # Assert + self.assertIsInstance(sorter, algo_class) + self.assertCountEqual(expected_generators, actual_generators) + self.assertCountEqual(expected_extenders, actual_extenders) + self.assertCountEqual(expected_validator, actual_validators) + +class TestPatchSorter(unittest.TestCase): + def create_patch_sorter(self, config=None): + if config is None: + config=Files.CROPPED_CONFIG_DB_AS_JSON + config_wrapper = ConfigWrapper() + config_wrapper.get_config_db_as_json = MagicMock(return_value=config) + patch_wrapper = PatchWrapper(config_wrapper) + operation_wrapper = OperationWrapper() + path_addressing= ps.PathAddressing() + sort_algorithm_factory = ps.SortAlgorithmFactory(operation_wrapper, config_wrapper, path_addressing) + + return ps.PatchSorter(config_wrapper, patch_wrapper, sort_algorithm_factory) + + def test_sort__empty_patch__returns_empty_changes_list(self): + # Arrange + patch = jsonpatch.JsonPatch([]) + expected = [] + + # Act + actual = self.create_patch_sorter().sort(patch) + + # Assert + self.assertCountEqual(expected, actual) + + def test_sort__patch_with_single_simple_operation__returns_one_change(self): + # Arrange + patch = jsonpatch.JsonPatch([{"op":"remove", "path":"/VLAN/Vlan1000/dhcp_servers/0"}]) + expected = [JsonChange(patch)] + + # Act + actual = self.create_patch_sorter().sort(patch) + + # Assert + self.assertCountEqual(expected, actual) + + def test_sort__replacing_create_only_field__success(self): + # Arrange + patch = jsonpatch.JsonPatch([{"op":"replace", "path": "/PORT/Ethernet0/lanes", "value":"67"}]) + + # Act + actual = self.create_patch_sorter(Files.DPB_1_SPLIT_FULL_CONFIG).sort(patch) + + # Assert + self.assertNotEqual(None, actual) + + def test_sort__inter_dependency_within_same_table__success(self): + # Arrange + patch = jsonpatch.JsonPatch([{"op":"add", "path":"/VLAN_INTERFACE", "value": { + "Vlan1000|fc02:1000::1/64": {}, + "Vlan1000|192.168.0.1/21": {}, + "Vlan1000": {} + }}]) + expected = [ + JsonChange(jsonpatch.JsonPatch([{"op": "add", "path": "/VLAN_INTERFACE", "value": {"Vlan1000": {}}}])), + JsonChange(jsonpatch.JsonPatch([{"op": "add", "path": "/VLAN_INTERFACE/Vlan1000|fc02:1000::1~164", "value": {}}])), + JsonChange(jsonpatch.JsonPatch([{"op": "add", "path": "/VLAN_INTERFACE/Vlan1000|192.168.0.1~121", "value": {}}])) + ] + + # Act + actual = self.create_patch_sorter().sort(patch) + + # Assert + self.assertListEqual(expected, actual) + + def test_sort__add_table__success(self): + self.verify(cc_ops=[{"op":"remove", "path":"/ACL_TABLE"}]) + + def test_sort__remove_table__success(self): + self.verify(tc_ops=[{"op":"remove", "path":"/ACL_TABLE"}]) + + def test_sort__modify_value_in_existing_table__success(self): + self.verify(tc_ops=[{"op":"replace", "path":"/ACL_TABLE/EVERFLOW/stage", "value":"egress"}]) + + def test_sort__modify_value_in_existing_array__success(self): + self.verify(tc_ops=[{"op":"replace", "path":"/ACL_TABLE/EVERFLOWV6/ports/0", "value":"Ethernet0"}]) + + def test_sort__add_value_to_existing_array__success(self): + self.verify(tc_ops=[{"op":"add", "path":"/ACL_TABLE/EVERFLOWV6/ports/0", "value":"Ethernet0"}]) + + def test_sort__add_new_key_to_existing_table__success(self): + self.verify(cc_ops=[{"op":"remove", "path":"/ACL_TABLE/EVERFLOWV6"}]) + + def test_sort__remove_2_items_with_dependency_from_different_tables__success(self): + self.verify(tc_ops=[{"op":"remove", "path":"/PORT/Ethernet0"}, + {"op":"remove", "path":"/VLAN_MEMBER/Vlan1000|Ethernet0"}, + {"op":"remove", "path":"/ACL_TABLE/NO-NSW-PACL-V4"}], # removing ACL from current and target + cc_ops=[{"op":"remove", "path":"/ACL_TABLE/NO-NSW-PACL-V4"}]) + + def test_sort__add_2_items_with_dependency_from_different_tables__success(self): + self.verify(tc_ops=[{"op":"remove", "path":"/ACL_TABLE/NO-NSW-PACL-V4"}], # removing ACL from current and target + cc_ops=[{"op":"remove", "path":"/PORT/Ethernet0"}, + {"op":"remove", "path":"/VLAN_MEMBER/Vlan1000|Ethernet0"}, + {"op":"remove", "path":"/ACL_TABLE/NO-NSW-PACL-V4"}]) + + def test_sort__remove_2_items_with_dependency_from_same_table__success(self): + self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_INTERFACE}, + {"op":"remove", "path":"/INTERFACE/Ethernet8"}, + {"op":"remove", "path":"/INTERFACE/Ethernet8|10.0.0.1~130"}], + cc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_INTERFACE}]) + + def test_sort__add_2_items_with_dependency_from_same_table__success(self): + self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_INTERFACE}], + cc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_INTERFACE}, + {"op":"remove", "path":"/INTERFACE/Ethernet8"}, + {"op":"remove", "path":"/INTERFACE/Ethernet8|10.0.0.1~130"}]) + + def test_sort__replace_mandatory_item__success(self): + self.verify(tc_ops=[{"op":"replace", "path":"/ACL_TABLE/EVERFLOWV6/type", "value":"L2"}]) + + def test_sort__dpb_1_to_4__success(self): + self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.DPB_4_SPLITS_FULL_CONFIG}], + cc_ops=[{"op":"replace", "path":"", "value":Files.DPB_1_SPLIT_FULL_CONFIG}]) + + def test_sort__dpb_4_to_1__success(self): + self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.DPB_1_SPLIT_FULL_CONFIG}], + cc_ops=[{"op":"replace", "path":"", "value":Files.DPB_4_SPLITS_FULL_CONFIG}]) + + def test_sort__remove_an_item_with_default_value__success(self): + self.verify(tc_ops=[{"op":"remove", "path":"/ACL_TABLE/EVERFLOW/stage"}]) + + def test_sort__modify_items_with_dependencies_using_must__success(self): + self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_CRM}, + {"op":"replace", "path":"/CRM/Config/acl_counter_high_threshold", "value":"60"}, + {"op":"replace", "path":"/CRM/Config/acl_counter_low_threshold", "value":"50"}], + cc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_CRM}]) + + # in the following example, it is possible to start with acl_counter_high_threshold + self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_CRM}, + {"op":"replace", "path":"/CRM/Config/acl_counter_high_threshold", "value":"80"}, + {"op":"replace", "path":"/CRM/Config/acl_counter_low_threshold", "value":"60"}], + cc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_CRM}]) + + def verify(self, cc_ops=[], tc_ops=[]): + # Arrange + config_wrapper=ConfigWrapper() + target_config=jsonpatch.JsonPatch(tc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) + current_config=jsonpatch.JsonPatch(cc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) + patch=jsonpatch.make_patch(current_config, target_config) + + # Act + actual = self.create_patch_sorter(current_config).sort(patch) + + # Assert + simulated_config = current_config + for move in actual: + simulated_config = move.apply(simulated_config) + self.assertTrue(config_wrapper.validate_config_db_config(simulated_config)) + self.assertEqual(target_config, simulated_config) From e89fac3d8b4fcdcd16b53f8dddf04bf830865a95 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Wed, 11 Aug 2021 23:57:46 +0000 Subject: [PATCH 13/60] Revert "CLI GEN-2 merged" This reverts commit 4fdf8050fae77de5ff251c50254bcfbfac931b19. --- config/config_mgmt.py | 109 ++----- sonic_package_manager/constraint.py | 2 +- sonic_package_manager/dockerapi.py | 9 - sonic_package_manager/errors.py | 1 + sonic_package_manager/main.py | 20 +- sonic_package_manager/manager.py | 179 ++++-------- sonic_package_manager/manifest.py | 4 +- sonic_package_manager/metadata.py | 13 +- sonic_package_manager/registry.py | 2 +- .../service_creator/__init__.py | 1 - .../service_creator/creator.py | 268 ++++++------------ .../service_creator/feature.py | 112 +++----- .../service_creator/sonic_db.py | 139 +++------ tests/sonic_package_manager/conftest.py | 34 +-- .../test_service_creator.py | 235 +++++---------- 15 files changed, 342 insertions(+), 786 deletions(-) diff --git a/config/config_mgmt.py b/config/config_mgmt.py index 4e34a7ae00..9b2021bef0 100644 --- a/config/config_mgmt.py +++ b/config/config_mgmt.py @@ -2,11 +2,8 @@ config_mgmt.py provides classes for configuration validation and for Dynamic Port Breakout. ''' - -import os import re import syslog -import yang as ly from json import load from sys import flags from time import sleep as tsleep @@ -49,14 +46,27 @@ def __init__(self, source="configDB", debug=False, allowTablesWithoutYang=True): try: self.configdbJsonIn = None self.configdbJsonOut = None - self.source = source self.allowTablesWithoutYang = allowTablesWithoutYang # logging vars self.SYSLOG_IDENTIFIER = "ConfigMgmt" self.DEBUG = debug - self.__init_sonic_yang() + self.sy = sonic_yang.SonicYang(YANG_DIR, debug=debug) + # load yang models + self.sy.loadYangModel() + # load jIn from config DB or from config DB json file. + if source.lower() == 'configdb': + self.readConfigDB() + # treat any other source as file input + else: + self.readConfigDBJson(source) + # this will crop config, xlate and load. + self.sy.loadData(self.configdbJsonIn) + + # Raise if tables without YANG models are not allowed but exist. + if not allowTablesWithoutYang and len(self.sy.tablesWithOutYang): + raise Exception('Config has tables without YANG models') except Exception as e: self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) @@ -64,23 +74,6 @@ def __init__(self, source="configDB", debug=False, allowTablesWithoutYang=True): return - def __init_sonic_yang(self): - self.sy = sonic_yang.SonicYang(YANG_DIR, debug=self.DEBUG) - # load yang models - self.sy.loadYangModel() - # load jIn from config DB or from config DB json file. - if self.source.lower() == 'configdb': - self.readConfigDB() - # treat any other source as file input - else: - self.readConfigDBJson(self.source) - # this will crop config, xlate and load. - self.sy.loadData(self.configdbJsonIn) - - # Raise if tables without YANG models are not allowed but exist. - if not self.allowTablesWithoutYang and len(self.sy.tablesWithOutYang): - raise Exception('Config has tables without YANG models') - def __del__(self): pass @@ -220,70 +213,6 @@ def writeConfigDB(self, jDiff): return - def add_module(self, yang_module_str, replace_if_exists=False): - """ - Validate and add new YANG module to the system. - - Parameters: - yang_module_str (str): YANG module in string representation. - - Returns: - None - """ - - module_name = self.get_module_name(yang_module_str) - module_path = os.path.join(YANG_DIR, '{}.yang'.format(module_name)) - if os.path.exists(module_path) and not replace_if_exists: - raise Exception('{} already exists'.format(module_name)) - with open(module_path, 'w') as module_file: - module_file.write(yang_module_str) - try: - self.__init_sonic_yang() - except Exception: - os.remove(module_path) - raise - - def remove_module(self, module_name): - """ - Remove YANG module on the system and validate. - - Parameters: - module_name (str): YANG module name. - - Returns: - None - """ - - module_path = os.path.join(YANG_DIR, '{}.yang'.format(module_name)) - if not os.path.exists(module_path): - return - with open(module_path, 'r') as module_file: - yang_module_str = module_file.read() - try: - os.remove(module_path) - self.__init_sonic_yang() - except Exception: - self.add_module(yang_module_str) - raise - - @staticmethod - def get_module_name(yang_module_str): - """ - Read yangs module name from yang_module_str - - Parameters: - yang_module_str(str): YANG module string. - - Returns: - str: Module name - """ - - # Instantiate new context since parse_module_mem() loads the module into context. - sy = sonic_yang.SonicYang(YANG_DIR) - module = sy.ctx.parse_module_mem(yang_module_str, ly.LYS_IN_YANG) - return module.name() - - # End of Class ConfigMgmt class ConfigMgmtDPB(ConfigMgmt): @@ -488,8 +417,8 @@ def _deletePorts(self, ports=list(), force=False): deps.extend(dep) # No further action with no force and deps exist - if not force and deps: - return configToLoad, deps, False + if force == False and deps: + return configToLoad, deps, False; # delets all deps, No topological sort is needed as of now, if deletion # of deps fails, return immediately @@ -507,8 +436,8 @@ def _deletePorts(self, ports=list(), force=False): self.sy.deleteNode(str(xPathPort)) # Let`s Validate the tree now - if not self.validateConfigData(): - return configToLoad, deps, False + if self.validateConfigData()==False: + return configToLoad, deps, False; # All great if we are here, Lets get the diff self.configdbJsonOut = self.sy.getData() diff --git a/sonic_package_manager/constraint.py b/sonic_package_manager/constraint.py index 70b7165354..af5a13000b 100644 --- a/sonic_package_manager/constraint.py +++ b/sonic_package_manager/constraint.py @@ -46,7 +46,7 @@ def parse(constraints: Dict) -> 'ComponentConstraints': """ components = {component: VersionConstraint.parse(version) - for component, version in constraints.items()} + for component, version in constraints.items()} return ComponentConstraints(components) def deparse(self) -> Dict[str, str]: diff --git a/sonic_package_manager/dockerapi.py b/sonic_package_manager/dockerapi.py index 7f051d2d72..926600d0bc 100644 --- a/sonic_package_manager/dockerapi.py +++ b/sonic_package_manager/dockerapi.py @@ -186,15 +186,6 @@ def rm(self, container: str, **kwargs): self.client.containers.get(container).remove(**kwargs) log.debug(f'removed container {container}') - def rm_by_ancestor(self, image_id: str, **kwargs): - """ Docker 'rm' command for running containers instantiated - from image passed to this function. """ - - # Clean containers based on the old image - containers = self.ps(filters={'ancestor': image_id}, all=True) - for container in containers: - self.rm(container.id, **kwargs) - def ps(self, **kwargs): """ Docker 'ps' command. """ diff --git a/sonic_package_manager/errors.py b/sonic_package_manager/errors.py index fe4de39a39..17279c52c4 100644 --- a/sonic_package_manager/errors.py +++ b/sonic_package_manager/errors.py @@ -143,3 +143,4 @@ class PackageComponentConflictError(PackageInstallationError): def __str__(self): return (f'Package {self.name} conflicts with {self.component} {self.constraint} ' f'in package {self.dependency} but version {self.installed_ver} is installed') + diff --git a/sonic_package_manager/main.py b/sonic_package_manager/main.py index 8a0aabb901..c0589ae5b5 100644 --- a/sonic_package_manager/main.py +++ b/sonic_package_manager/main.py @@ -361,7 +361,7 @@ def install(ctx, package_source = package_expr or from_repository or from_tarball if not package_source: - exit_cli('Package source is not specified', fg='red') + exit_cli(f'Package source is not specified', fg='red') if not yes and not force: click.confirm(f'{package_source} is going to be installed, ' @@ -386,7 +386,7 @@ def install(ctx, except Exception as err: exit_cli(f'Failed to install {package_source}: {err}', fg='red') except KeyboardInterrupt: - exit_cli('Operation canceled by user', fg='red') + exit_cli(f'Operation canceled by user', fg='red') @cli.command() @@ -409,16 +409,15 @@ def reset(ctx, name, force, yes, skip_host_plugins): except Exception as err: exit_cli(f'Failed to reset package {name}: {err}', fg='red') except KeyboardInterrupt: - exit_cli('Operation canceled by user', fg='red') + exit_cli(f'Operation canceled by user', fg='red') @cli.command() @add_options(PACKAGE_COMMON_OPERATION_OPTIONS) -@click.option('--keep-config', is_flag=True, help='Keep features configuration in CONFIG DB.') @click.argument('name') @click.pass_context @root_privileges_required -def uninstall(ctx, name, force, yes, keep_config): +def uninstall(ctx, name, force, yes): """ Uninstall package. """ manager: PackageManager = ctx.obj @@ -427,17 +426,12 @@ def uninstall(ctx, name, force, yes, keep_config): click.confirm(f'Package {name} is going to be uninstalled, ' f'continue?', abort=True, show_default=True) - uninstall_opts = { - 'force': force, - 'keep_config': keep_config, - } - try: - manager.uninstall(name, **uninstall_opts) + manager.uninstall(name, force) except Exception as err: exit_cli(f'Failed to uninstall package {name}: {err}', fg='red') except KeyboardInterrupt: - exit_cli('Operation canceled by user', fg='red') + exit_cli(f'Operation canceled by user', fg='red') @cli.command() @@ -459,7 +453,7 @@ def migrate(ctx, database, force, yes, dockerd_socket): except Exception as err: exit_cli(f'Failed to migrate packages {err}', fg='red') except KeyboardInterrupt: - exit_cli('Operation canceled by user', fg='red') + exit_cli(f'Operation canceled by user', fg='red') if __name__ == "__main__": diff --git a/sonic_package_manager/manager.py b/sonic_package_manager/manager.py index 836a992f0a..3caf90d95f 100644 --- a/sonic_package_manager/manager.py +++ b/sonic_package_manager/manager.py @@ -10,11 +10,8 @@ import docker import filelock -from config import config_mgmt from sonic_py_common import device_info -from sonic_cli_gen.generator import CliGenerator - from sonic_package_manager import utils from sonic_package_manager.constraint import ( VersionConstraint, @@ -42,16 +39,12 @@ from sonic_package_manager.progress import ProgressManager from sonic_package_manager.reference import PackageReference from sonic_package_manager.registry import RegistryResolver -from sonic_package_manager.service_creator import SONIC_CLI_COMMANDS from sonic_package_manager.service_creator.creator import ( ServiceCreator, run_command ) from sonic_package_manager.service_creator.feature import FeatureRegistry -from sonic_package_manager.service_creator.sonic_db import ( - INIT_CFG_JSON, - SonicDB -) +from sonic_package_manager.service_creator.sonic_db import SonicDB from sonic_package_manager.service_creator.utils import in_chroot from sonic_package_manager.source import ( PackageSource, @@ -59,6 +52,7 @@ RegistrySource, TarballSource ) +from sonic_package_manager.utils import DockerReference from sonic_package_manager.version import ( Version, VersionRange, @@ -108,7 +102,7 @@ def wrapped_function(*args, **kwargs): return wrapped_function -def rollback(func, *args, **kwargs) -> Callable: +def rollback(func, *args, **kwargs): """ Used in rollback callbacks to ignore failure but proceed with rollback. Error will be printed but not fail the whole procedure of rollback. """ @@ -137,7 +131,7 @@ def package_constraint_to_reference(constraint: PackageConstraint) -> PackageRef return PackageReference(package_name, version_to_tag(version_constraint)) -def parse_reference_expression(expression) -> PackageReference: +def parse_reference_expression(expression): try: return package_constraint_to_reference(PackageConstraint.parse(expression)) except ValueError: @@ -146,36 +140,6 @@ def parse_reference_expression(expression) -> PackageReference: return PackageReference.parse(expression) -def get_cli_plugin_directory(command: str) -> str: - """ Returns a plugins package directory for command group. - - Args: - command: SONiC command: "show"/"config"/"clear". - Returns: - Path to plugins package directory. - """ - - pkg_loader = pkgutil.get_loader(f'{command}.plugins') - if pkg_loader is None: - raise PackageManagerError(f'Failed to get plugins path for {command} CLI') - plugins_pkg_path = os.path.dirname(pkg_loader.path) - return plugins_pkg_path - - -def get_cli_plugin_path(package: Package, command: str) -> str: - """ Returns a path where to put CLI plugin code. - - Args: - package: Package to generate this path for. - command: SONiC command: "show"/"config"/"clear". - Returns: - Path generated for this package. - """ - - plugin_module_file = package.name + '.py' - return os.path.join(get_cli_plugin_directory(command), plugin_module_file) - - def validate_package_base_os_constraints(package: Package, sonic_version_info: Dict[str, str]): """ Verify that all dependencies on base OS components are met. Args: @@ -253,10 +217,11 @@ def validate_package_tree(packages: Dict[str, Package]): continue component_version = conflicting_package.components[component] - log.debug(f'conflicting package {conflict.name}: ' + log.debug(f'conflicting package {dependency.name}: ' f'component {component} version is {component_version}') + if constraint.allows_all(component_version): - raise PackageComponentConflictError(package.name, conflict, component, + raise PackageComponentConflictError(package.name, dependency, component, constraint, component_version) @@ -402,17 +367,12 @@ def install_from_source(self, if not self.database.has_package(package.name): self.database.add_package(package.name, package.repository) - service_create_opts = { - 'state': feature_state, - 'owner': default_owner, - } - try: with contextlib.ExitStack() as exits: source.install(package) exits.callback(rollback(source.uninstall, package)) - self.service_creator.create(package, **service_create_opts) + self.service_creator.create(package, state=feature_state, owner=default_owner) exits.callback(rollback(self.service_creator.remove, package)) self.service_creator.generate_shutdown_sequence_files( @@ -440,16 +400,13 @@ def install_from_source(self, @under_lock @opt_check - def uninstall(self, name: str, - force: bool = False, - keep_config: bool = False): + def uninstall(self, name: str, force=False): """ Uninstall SONiC Package referenced by name. The uninstallation can be forced if force argument is True. Args: name: SONiC Package name. force: Force the installation. - keep_config: Keep feature configuration in databases. Raises: PackageManagerError """ @@ -479,11 +436,17 @@ def uninstall(self, name: str, try: self._uninstall_cli_plugins(package) - self.service_creator.remove(package, keep_config=keep_config) + self.service_creator.remove(package) self.service_creator.generate_shutdown_sequence_files( self._get_installed_packages_except(package) ) - self.docker.rm_by_ancestor(package.image_id, force=True) + + # Clean containers based on this image + containers = self.docker.ps(filters={'ancestor': package.image_id}, + all=True) + for container in containers: + self.docker.rm(container.id, force=True) + self.docker.rmi(package.image_id, force=True) package.entry.image_id = None except Exception as err: @@ -531,6 +494,7 @@ def upgrade_from_source(self, ) old_feature = old_package.manifest['service']['name'] + new_feature = new_package.manifest['service']['name'] old_version = old_package.manifest['package']['version'] new_version = new_package.manifest['package']['version'] @@ -558,13 +522,6 @@ def upgrade_from_source(self, # After all checks are passed we proceed to actual upgrade - service_create_opts = { - 'register_feature': False, - } - service_remove_opts = { - 'deregister_feature': False, - } - try: with contextlib.ExitStack() as exits: self._uninstall_cli_plugins(old_package) @@ -573,25 +530,24 @@ def upgrade_from_source(self, source.install(new_package) exits.callback(rollback(source.uninstall, new_package)) - feature_enabled = self.feature_registry.is_feature_enabled(old_feature) - - if feature_enabled: - self._systemctl_action(new_package, 'disable') - exits.callback(rollback(self._systemctl_action, - old_package, 'enable')) + if self.feature_registry.is_feature_enabled(old_feature): self._systemctl_action(old_package, 'stop') exits.callback(rollback(self._systemctl_action, old_package, 'start')) - self.service_creator.remove(old_package, **service_remove_opts) + self.service_creator.remove(old_package, deregister_feature=False) exits.callback(rollback(self.service_creator.create, old_package, - **service_create_opts)) + register_feature=False)) - self.docker.rm_by_ancestor(old_package.image_id, force=True) + # Clean containers based on the old image + containers = self.docker.ps(filters={'ancestor': old_package.image_id}, + all=True) + for container in containers: + self.docker.rm(container.id, force=True) - self.service_creator.create(new_package, **service_create_opts) + self.service_creator.create(new_package, register_feature=False) exits.callback(rollback(self.service_creator.remove, new_package, - **service_remove_opts)) + register_feature=False)) self.service_creator.generate_shutdown_sequence_files( self._get_installed_packages_and(new_package) @@ -601,23 +557,11 @@ def upgrade_from_source(self, self._get_installed_packages_and(old_package)) ) - if feature_enabled: - self._systemctl_action(new_package, 'enable') - exits.callback(rollback(self._systemctl_action, - old_package, 'disable')) + if self.feature_registry.is_feature_enabled(new_feature): self._systemctl_action(new_package, 'start') exits.callback(rollback(self._systemctl_action, new_package, 'stop')) - # Update feature configuration after we have started new service. - # If we place it before the above, our service start/stop will - # interfere with hostcfgd in rollback path leading to a service - # running with new image and not the old one. - self.feature_registry.update(old_package.manifest, new_package.manifest) - exits.callback(rollback( - self.feature_registry.update, new_package.manifest, old_package.manifest) - ) - if not skip_host_plugins: self._install_cli_plugins(new_package) exits.callback(rollback(self._uninstall_cli_plugin, old_package)) @@ -669,16 +613,16 @@ def migrate_packages(self, old_package_database: PackageDatabase, dockerd_sock: Optional[str] = None): """ - Migrate packages from old database. This function can do a comparison between - current database and the database passed in as argument. If the package is - missing in the current database it will be added. If the package is installed - in the passed database and in the current it is not installed it will be - installed with a passed database package version. If the package is installed - in the passed database and it is installed in the current database but with - older version the package will be upgraded to the never version. If the package - is installed in the passed database and in the current it is installed but with - never version - no actions are taken. If dockerd_sock parameter is passed, the - migration process will use loaded images from docker library of the currently + Migrate packages from old database. This function can do a comparison between + current database and the database passed in as argument. If the package is + missing in the current database it will be added. If the package is installed + in the passed database and in the current it is not installed it will be + installed with a passed database package version. If the package is installed + in the passed database and it is installed in the current database but with + older version the package will be upgraded to the never version. If the package + is installed in the passed database and in the current it is installed but with + never version - no actions are taken. If dockerd_sock parameter is passed, the + migration process will use loaded images from docker library of the currently installed image. Args: @@ -799,7 +743,7 @@ def get_package_source(self, ref = parse_reference_expression(package_expression) return self.get_package_source(package_ref=ref) elif repository_reference: - repo_ref = utils.DockerReference.parse(repository_reference) + repo_ref = DockerReference.parse(repository_reference) repository = repo_ref['name'] reference = repo_ref['tag'] or repo_ref['digest'] reference = reference or 'latest' @@ -830,8 +774,8 @@ def get_package_source(self, if package_entry.default_reference is not None: package_ref.reference = package_entry.default_reference else: - raise PackageManagerError('No default reference tag. ' - 'Please specify the version or tag explicitly') + raise PackageManagerError(f'No default reference tag. ' + f'Please specify the version or tag explicitly') return RegistrySource(package_entry.repository, package_ref.reference, @@ -903,7 +847,7 @@ def get_installed_packages_list(self) -> List[Package]: Installed packages dictionary. """ - return [self.get_installed_package(entry.name) + return [self.get_installed_package(entry.name) for entry in self.database if entry.installed] def _migrate_package_database(self, old_package_database: PackageDatabase): @@ -962,26 +906,38 @@ def _systemctl_action(self, package: Package, action: str): for npu in range(self.num_npus): run_command(f'systemctl {action} {name}@{npu}') + @staticmethod + def _get_cli_plugin_name(package: Package): + return utils.make_python_identifier(package.name) + '.py' + + @classmethod + def _get_cli_plugin_path(cls, package: Package, command): + pkg_loader = pkgutil.get_loader(f'{command}.plugins') + if pkg_loader is None: + raise PackageManagerError(f'Failed to get plugins path for {command} CLI') + plugins_pkg_path = os.path.dirname(pkg_loader.path) + return os.path.join(plugins_pkg_path, cls._get_cli_plugin_name(package)) + def _install_cli_plugins(self, package: Package): - for command in SONIC_CLI_COMMANDS: + for command in ('show', 'config', 'clear'): self._install_cli_plugin(package, command) def _uninstall_cli_plugins(self, package: Package): - for command in SONIC_CLI_COMMANDS: + for command in ('show', 'config', 'clear'): self._uninstall_cli_plugin(package, command) def _install_cli_plugin(self, package: Package, command: str): image_plugin_path = package.manifest['cli'][command] if not image_plugin_path: return - host_plugin_path = get_cli_plugin_path(package, command) + host_plugin_path = self._get_cli_plugin_path(package, command) self.docker.extract(package.entry.image_id, image_plugin_path, host_plugin_path) def _uninstall_cli_plugin(self, package: Package, command: str): image_plugin_path = package.manifest['cli'][command] if not image_plugin_path: return - host_plugin_path = get_cli_plugin_path(package, command) + host_plugin_path = self._get_cli_plugin_path(package, command) if os.path.exists(host_plugin_path): os.remove(host_plugin_path) @@ -993,21 +949,12 @@ def get_manager() -> 'PackageManager': PackageManager """ - docker_api = DockerApi(docker.from_env(), ProgressManager()) + docker_api = DockerApi(docker.from_env()) registry_resolver = RegistryResolver() - metadata_resolver = MetadataResolver(docker_api, registry_resolver) - cfg_mgmt = config_mgmt.ConfigMgmt(source=INIT_CFG_JSON) - cli_generator = CliGenerator(log) - feature_registry = FeatureRegistry(SonicDB) - service_creator = ServiceCreator(feature_registry, - SonicDB, - cli_generator, - cfg_mgmt) - - return PackageManager(docker_api, + return PackageManager(DockerApi(docker.from_env(), ProgressManager()), registry_resolver, PackageDatabase.from_file(), - metadata_resolver, - service_creator, + MetadataResolver(docker_api, registry_resolver), + ServiceCreator(FeatureRegistry(SonicDB), SonicDB), device_info, filelock.FileLock(PACKAGE_MANAGER_LOCK_FILE, timeout=0)) diff --git a/sonic_package_manager/manifest.py b/sonic_package_manager/manifest.py index 216baef756..c126e2eef1 100644 --- a/sonic_package_manager/manifest.py +++ b/sonic_package_manager/manifest.py @@ -205,9 +205,7 @@ def unmarshal(self, value): ManifestField('mandatory', DefaultMarshaller(bool), False), ManifestField('show', DefaultMarshaller(str), ''), ManifestField('config', DefaultMarshaller(str), ''), - ManifestField('clear', DefaultMarshaller(str), ''), - ManifestField('auto-generate-show', DefaultMarshaller(bool), False), - ManifestField('auto-generate-config', DefaultMarshaller(bool), False), + ManifestField('clear', DefaultMarshaller(str), '') ]) ]) diff --git a/sonic_package_manager/metadata.py b/sonic_package_manager/metadata.py index dc718375ed..7f7c25ceaf 100644 --- a/sonic_package_manager/metadata.py +++ b/sonic_package_manager/metadata.py @@ -4,7 +4,7 @@ import json import tarfile -from typing import Dict, Optional +from typing import Dict from sonic_package_manager.errors import MetadataError from sonic_package_manager.manifest import Manifest @@ -24,10 +24,10 @@ def deep_update(dst: Dict, src: Dict) -> Dict: for key, value in src.items(): if isinstance(value, dict): - node = dst.setdefault(key, {}) - deep_update(node, value) + node = dst.setdefault(key, {}) + deep_update(node, value) else: - dst[key] = value + dst[key] = value return dst @@ -73,7 +73,6 @@ class Metadata: manifest: Manifest components: Dict[str, Version] = field(default_factory=dict) - yang_module_str: Optional[str] = None class MetadataResolver: @@ -183,6 +182,4 @@ def from_labels(cls, labels: Dict[str, str]) -> Metadata: except ValueError as err: raise MetadataError(f'Failed to parse component version: {err}') - yang_module_str = sonic_metadata.get('yang-module') - - return Metadata(Manifest.marshal(manifest_dict), components, yang_module_str) + return Metadata(Manifest.marshal(manifest_dict), components) diff --git a/sonic_package_manager/registry.py b/sonic_package_manager/registry.py index 8c03b078d2..8a09d9136e 100644 --- a/sonic_package_manager/registry.py +++ b/sonic_package_manager/registry.py @@ -38,7 +38,7 @@ def get_token(realm, service, scope) -> str: response = requests.get(f'{realm}?scope={scope}&service={service}') if response.status_code != requests.codes.ok: - raise AuthenticationServiceError('Failed to retrieve token') + raise AuthenticationServiceError(f'Failed to retrieve token') content = json.loads(response.content) token = content['token'] diff --git a/sonic_package_manager/service_creator/__init__.py b/sonic_package_manager/service_creator/__init__.py index b0f4a24086..e2af81ceb5 100644 --- a/sonic_package_manager/service_creator/__init__.py +++ b/sonic_package_manager/service_creator/__init__.py @@ -1,4 +1,3 @@ #!/usr/bin/env python ETC_SONIC_PATH = '/etc/sonic' -SONIC_CLI_COMMANDS = ('show', 'config', 'clear') diff --git a/sonic_package_manager/service_creator/creator.py b/sonic_package_manager/service_creator/creator.py index 91f0f6102c..4c618eb7ea 100644 --- a/sonic_package_manager/service_creator/creator.py +++ b/sonic_package_manager/service_creator/creator.py @@ -5,27 +5,18 @@ import stat import subprocess from collections import defaultdict -from typing import Dict, Type +from typing import Dict import jinja2 as jinja2 -from config.config_mgmt import ConfigMgmt from prettyprinter import pformat from toposort import toposort_flatten, CircularDependencyError -from config.config_mgmt import sonic_cfggen -from sonic_cli_gen.generator import CliGenerator - from sonic_package_manager.logger import log from sonic_package_manager.package import Package -from sonic_package_manager.service_creator import ( - ETC_SONIC_PATH, - SONIC_CLI_COMMANDS, -) +from sonic_package_manager.service_creator import ETC_SONIC_PATH from sonic_package_manager.service_creator.feature import FeatureRegistry -from sonic_package_manager.service_creator.sonic_db import SonicDB from sonic_package_manager.service_creator.utils import in_chroot - SERVICE_FILE_TEMPLATE = 'sonic.service.j2' TIMER_UNIT_TEMPLATE = 'timer.unit.j2' @@ -87,22 +78,12 @@ def set_executable_bit(filepath): os.chmod(filepath, st.st_mode | stat.S_IEXEC) -def remove_if_exists(path): - """ Remove filepath if it exists """ - - if not os.path.exists(path): - return - - os.remove(path) - log.info(f'removed {path}') - - def run_command(command: str): """ Run arbitrary bash command. Args: command: String command to execute as bash script Raises: - ServiceCreatorError: Raised when the command return code + PackageManagerError: Raised when the command return code is not 0. """ @@ -123,30 +104,24 @@ class ServiceCreator: def __init__(self, feature_registry: FeatureRegistry, - sonic_db: Type[SonicDB], - cli_gen: CliGenerator, - cfg_mgmt: ConfigMgmt): + sonic_db): """ Initialize ServiceCreator with: - + Args: feature_registry: FeatureRegistry object. - sonic_db: SonicDB interface. - cli_gen: CliGenerator instance. - cfg_mgmt: ConfigMgmt instance. + sonic_db: SonicDb interface. """ self.feature_registry = feature_registry self.sonic_db = sonic_db - self.cli_gen = cli_gen - self.cfg_mgmt = cfg_mgmt def create(self, package: Package, register_feature: bool = True, state: str = 'enabled', owner: str = 'local'): - """ Register package as SONiC service. - + """ Register package as SONiC service. + Args: package: Package object to install. register_feature: Wether to register this package in FEATURE table. @@ -164,54 +139,54 @@ def create(self, self.generate_systemd_service(package) self.generate_dump_script(package) self.generate_service_reconciliation_file(package) - self.install_yang_module(package) + self.set_initial_config(package) - self.install_autogen_cli_all(package) self._post_operation_hook() if register_feature: - self.feature_registry.register(package.manifest, state, owner) + self.feature_registry.register(package.manifest, + state, owner) except (Exception, KeyboardInterrupt): - self.remove(package, deregister_feature=register_feature) + self.remove(package, register_feature) raise def remove(self, package: Package, - deregister_feature: bool = True, - keep_config: bool = False): + deregister_feature: bool = True): """ Uninstall SONiC service provided by the package. - + Args: package: Package object to uninstall. deregister_feature: Wether to deregister this package from FEATURE table. - keep_config: Whether to remove package configuration. Returns: None """ name = package.manifest['service']['name'] - remove_if_exists(os.path.join(SYSTEMD_LOCATION, f'{name}.service')) - remove_if_exists(os.path.join(SYSTEMD_LOCATION, f'{name}@.service')) - remove_if_exists(os.path.join(SERVICE_MGMT_SCRIPT_LOCATION, f'{name}.sh')) - remove_if_exists(os.path.join(DOCKER_CTL_SCRIPT_LOCATION, f'{name}.sh')) - remove_if_exists(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, f'{name}')) - remove_if_exists(os.path.join(ETC_SONIC_PATH, f'{name}_reconcile')) - self.update_dependent_list_file(package, remove=True) - if deregister_feature and not keep_config: - self.remove_config(package) + def remove_file(path): + if os.path.exists(path): + os.remove(path) + log.info(f'removed {path}') + + remove_file(os.path.join(SYSTEMD_LOCATION, f'{name}.service')) + remove_file(os.path.join(SYSTEMD_LOCATION, f'{name}@.service')) + remove_file(os.path.join(SERVICE_MGMT_SCRIPT_LOCATION, f'{name}.sh')) + remove_file(os.path.join(DOCKER_CTL_SCRIPT_LOCATION, f'{name}.sh')) + remove_file(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, f'{name}')) + remove_file(os.path.join(ETC_SONIC_PATH, f'{name}_reconcile')) - self.uninstall_autogen_cli_all(package) - self.uninstall_yang_module(package) + self.update_dependent_list_file(package, remove=True) self._post_operation_hook() if deregister_feature: self.feature_registry.deregister(package.manifest['service']['name']) + self.remove_config(package) def generate_container_mgmt(self, package: Package): - """ Generates container management script under /usr/bin/.sh for package. - + """ Generates container management script under /usr/bin/.sh for package. + Args: package: Package object to generate script for. Returns: @@ -253,8 +228,8 @@ def generate_container_mgmt(self, package: Package): log.info(f'generated {script_path}') def generate_service_mgmt(self, package: Package): - """ Generates service management script under /usr/local/bin/.sh for package. - + """ Generates service management script under /usr/local/bin/.sh for package. + Args: package: Package object to generate script for. Returns: @@ -274,8 +249,8 @@ def generate_service_mgmt(self, package: Package): log.info(f'generated {script_path}') def generate_systemd_service(self, package: Package): - """ Generates systemd service(s) file and timer(s) (if needed) for package. - + """ Generates systemd service(s) file and timer(s) (if needed) for package. + Args: package: Package object to generate service for. Returns: @@ -322,13 +297,13 @@ def generate_systemd_service(self, package: Package): def update_dependent_list_file(self, package: Package, remove=False): """ This function updates dependent list file for packages listed in "dependent-of" (path: /etc/sonic/_dependent file). + Args: package: Package to update packages dependent of it. - remove: True if update for removal process. Returns: None. - """ + """ name = package.manifest['service']['name'] dependent_of = package.manifest['service']['dependent-of'] host_service = package.manifest['service']['host-service'] @@ -362,6 +337,7 @@ def update_dependent(service, name, multi_inst): def generate_dump_script(self, package): """ Generates dump plugin script for package. + Args: package: Package object to generate dump plugin script for. Returns: @@ -387,7 +363,7 @@ def generate_dump_script(self, package): def get_shutdown_sequence(self, reboot_type: str, packages: Dict[str, Package]): """ Returns shutdown sequence file for particular reboot type. - + Args: reboot_type: Reboot type to generated service shutdown sequence for. packages: Dict of installed packages. @@ -434,7 +410,7 @@ def filter_not_available(services): def generate_shutdown_sequence_file(self, reboot_type: str, packages: Dict[str, Package]): """ Generates shutdown sequence file for particular reboot type (path: /etc/sonic/-reboot_order). - + Args: reboot_type: Reboot type to generated service shutdown sequence for. packages: Dict of installed packages. @@ -445,11 +421,11 @@ def generate_shutdown_sequence_file(self, reboot_type: str, packages: Dict[str, order = self.get_shutdown_sequence(reboot_type, packages) with open(os.path.join(ETC_SONIC_PATH, f'{reboot_type}-reboot_order'), 'w') as file: file.write(' '.join(order)) - + def generate_shutdown_sequence_files(self, packages: Dict[str, Package]): - """ Generates shutdown sequence file for fast and warm reboot. + """ Generates shutdown sequence file for fast and warm reboot. (path: /etc/sonic/-reboot_order). - + Args: packages: Dict of installed packages. Returns: @@ -486,150 +462,64 @@ def set_initial_config(self, package): """ init_cfg = package.manifest['package']['init-cfg'] - if not init_cfg: - return - - for conn in self.sonic_db.get_connectors(): - cfg = conn.get_config() - new_cfg = init_cfg.copy() - sonic_cfggen.deep_update(new_cfg, cfg) - self.validate_config(new_cfg) - conn.mod_config(new_cfg) - - def remove_config(self, package): - """ Remove configuration based on package YANG module. - - Args: - package: Package object remove initial configuration for. - Returns: - None - """ - - if not package.metadata.yang_module_str: - return - module_name = self.cfg_mgmt.get_module_name(package.metadata.yang_module_str) - for tablename, module in self.cfg_mgmt.sy.confDbYangMap.items(): - if module.get('module') != module_name: + for tablename, content in init_cfg.items(): + if not isinstance(content, dict): continue - for conn in self.sonic_db.get_connectors(): - keys = conn.get_table(tablename).keys() - for key in keys: - conn.set_entry(tablename, key, None) - - def validate_config(self, config): - """ Validate configuration through YANG. - - Args: - config: Config DB data. - Returns: - None. - Raises: - Exception: if config does not pass YANG validation. - """ - - config = sonic_cfggen.FormatConverter.to_serialized(config) - log.debug(f'validating configuration {pformat(config)}') - # This will raise exception if configuration is not valid. - # NOTE: loadData() modifies the state of ConfigMgmt instance. - # This is not desired for configuration validation only purpose. - # Although the config loaded into ConfigMgmt instance is not - # interesting in this application so we don't care. - self.cfg_mgmt.loadData(config) - - def install_yang_module(self, package: Package): - """ Install package's yang module in the system. - - Args: - package: Package object. - Returns: - None - """ - - if not package.metadata.yang_module_str: - return - - self.cfg_mgmt.add_module(package.metadata.yang_module_str) + tables = self._get_tables(tablename) - def uninstall_yang_module(self, package: Package): - """ Uninstall package's yang module in the system. + for key in content: + for table in tables: + cfg = content[key] + exists, old_fvs = table.get(key) + if exists: + cfg.update(old_fvs) + fvs = list(cfg.items()) + table.set(key, fvs) - Args: - package: Package object. - Returns: - None - """ - - if not package.metadata.yang_module_str: - return - - module_name = self.cfg_mgmt.get_module_name(package.metadata.yang_module_str) - self.cfg_mgmt.remove_module(module_name) - - def install_autogen_cli_all(self, package: Package): - """ Install autogenerated CLI plugins for package. + def remove_config(self, package): + """ Remove configuration based on init-cfg tables, so having + init-cfg even with tables without keys might be a good idea. + TODO: init-cfg should be validated with yang model + TODO: remove config from tables known to yang model Args: - package: Package + package: Package object remove initial configuration for. Returns: None """ - for command in SONIC_CLI_COMMANDS: - self.install_autogen_cli(package, command) + init_cfg = package.manifest['package']['init-cfg'] - def uninstall_autogen_cli_all(self, package: Package): - """ Remove autogenerated CLI plugins for package. + for tablename, content in init_cfg.items(): + if not isinstance(content, dict): + continue - Args: - package: Package - Returns: - None - """ + tables = self._get_tables(tablename) - for command in SONIC_CLI_COMMANDS: - self.uninstall_autogen_cli(package, command) + for key in content: + for table in tables: + table._del(key) - def install_autogen_cli(self, package: Package, command: str): - """ Install autogenerated CLI plugins for package for particular command. + def _get_tables(self, table_name): + """ Return swsscommon Tables for all kinds of configuration DBs """ - Args: - package: Package. - command: Name of command to generate CLI for. - Returns: - None - """ + tables = [] - if package.metadata.yang_module_str is None: - return - if f'auto-generate-{command}' not in package.manifest['cli']: - return - if not package.manifest['cli'][f'auto-generate-{command}']: - return - module_name = self.cfg_mgmt.get_module_name(package.metadata.yang_module_str) - self.cli_gen.generate_cli_plugin(command, module_name) - log.debug(f'{command} command line interface autogenerated for {module_name}') + running_table = self.sonic_db.running_table(table_name) + if running_table is not None: + tables.append(running_table) - def uninstall_autogen_cli(self, package: Package, command: str): - """ Uninstall autogenerated CLI plugins for package for particular command. + persistent_table = self.sonic_db.persistent_table(table_name) + if persistent_table is not None: + tables.append(persistent_table) - Args: - package: Package. - command: Name of command to remove CLI. - Returns: - None - """ + initial_table = self.sonic_db.initial_table(table_name) + if initial_table is not None: + tables.append(initial_table) - if package.metadata.yang_module_str is None: - return - if f'auto-generate-{command}' not in package.manifest['cli']: - return - if not package.manifest['cli'][f'auto-generate-{command}']: - return - module_name = self.cfg_mgmt.get_module_name(package.metadata.yang_module_str) - self.cli_gen.remove_cli_plugin(command, module_name) - log.debug(f'{command} command line interface removed for {module_name}') + return tables def _post_operation_hook(self): """ Common operations executed after service is created/removed. """ diff --git a/sonic_package_manager/service_creator/feature.py b/sonic_package_manager/service_creator/feature.py index eb8e1a0710..4df06384d2 100644 --- a/sonic_package_manager/service_creator/feature.py +++ b/sonic_package_manager/service_creator/feature.py @@ -16,14 +16,6 @@ } -def is_enabled(cfg): - return cfg.get('state', 'disabled').lower() == 'enabled' - - -def is_multi_instance(cfg): - return str(cfg.get('has_per_asic_scope', 'False')).lower() == 'true' - - class FeatureRegistry: """ FeatureRegistry class provides an interface to register/de-register new feature persistently. """ @@ -35,93 +27,51 @@ def register(self, manifest: Manifest, state: str = 'disabled', owner: str = 'local'): - """ Register feature in CONFIG DBs. - - Args: - manifest: Feature's manifest. - state: Desired feature admin state. - owner: Owner of this feature (kube/local). - Returns: - None. - """ - name = manifest['service']['name'] - db_connectors = self._sonic_db.get_connectors() - cfg_entries = self.get_default_feature_entries(state, owner) - non_cfg_entries = self.get_non_configurable_feature_entries(manifest) + for table in self._get_tables(): + cfg_entries = self.get_default_feature_entries(state, owner) + non_cfg_entries = self.get_non_configurable_feature_entries(manifest) - for conn in db_connectors: - current_cfg = conn.get_entry(FEATURE, name) + exists, current_cfg = table.get(name) new_cfg = cfg_entries.copy() # Override configurable entries with CONFIG DB data. - new_cfg = {**new_cfg, **current_cfg} + new_cfg = {**new_cfg, **dict(current_cfg)} # Override CONFIG DB data with non configurable entries. new_cfg = {**new_cfg, **non_cfg_entries} - conn.set_entry(FEATURE, name, new_cfg) + table.set(name, list(new_cfg.items())) def deregister(self, name: str): - """ Deregister feature by name. - - Args: - name: Name of the feature in CONFIG DB. - Returns: - None - """ - - db_connetors = self._sonic_db.get_connectors() - for conn in db_connetors: - conn.set_entry(FEATURE, name, None) - - def update(self, - old_manifest: Manifest, - new_manifest: Manifest): - """ Migrate feature configuration. It can be that non-configurable - feature entries have to be updated. e.g: "has_timer" for example if - the new feature introduces a service timer or name of the service has - changed, but user configurable entries are not changed). - - Args: - old_manifest: Old feature manifest. - new_manifest: New feature manifest. - Returns: - None - """ - - old_name = old_manifest['service']['name'] - new_name = new_manifest['service']['name'] - db_connectors = self._sonic_db.get_connectors() - non_cfg_entries = self.get_non_configurable_feature_entries(new_manifest) - - for conn in db_connectors: - current_cfg = conn.get_entry(FEATURE, old_name) - conn.set_entry(FEATURE, old_name, None) - - new_cfg = current_cfg.copy() - # Override CONFIG DB data with non configurable entries. - new_cfg = {**new_cfg, **non_cfg_entries} - - conn.set_entry(FEATURE, new_name, new_cfg) + for table in self._get_tables(): + table._del(name) def is_feature_enabled(self, name: str) -> bool: """ Returns whether the feature is current enabled or not. Accesses running CONFIG DB. If no running CONFIG_DB table is found in tables returns False. """ - conn = self._sonic_db.get_running_db_connector() - if conn is None: + running_db_table = self._sonic_db.running_table(FEATURE) + if running_db_table is None: return False - cfg = conn.get_entry(FEATURE, name) - return is_enabled(cfg) + exists, cfg = running_db_table.get(name) + if not exists: + return False + cfg = dict(cfg) + return cfg.get('state').lower() == 'enabled' def get_multi_instance_features(self): - """ Returns a list of features which run in asic namespace. """ - - conn = self._sonic_db.get_initial_db_connector() - features = conn.get_table(FEATURE) - return [feature for feature, cfg in features.items() if is_multi_instance(cfg)] + res = [] + init_db_table = self._sonic_db.initial_table(FEATURE) + for feature in init_db_table.keys(): + exists, cfg = init_db_table.get(feature) + assert exists + cfg = dict(cfg) + asic_flag = str(cfg.get('has_per_asic_scope', 'False')) + if asic_flag.lower() == 'true': + res.append(feature) + return res @staticmethod def get_default_feature_entries(state=None, owner=None) -> Dict[str, str]: @@ -144,3 +94,15 @@ def get_non_configurable_feature_entries(manifest) -> Dict[str, str]: 'has_global_scope': str(manifest['service']['host-service']), 'has_timer': str(manifest['service']['delayed']), } + + def _get_tables(self): + tables = [] + running = self._sonic_db.running_table(FEATURE) + if running is not None: # it's Ok if there is no database container running + tables.append(running) + persistent = self._sonic_db.persistent_table(FEATURE) + if persistent is not None: # it's Ok if there is no config_db.json + tables.append(persistent) + tables.append(self._sonic_db.initial_table(FEATURE)) # init_cfg.json is must + + return tables diff --git a/sonic_package_manager/service_creator/sonic_db.py b/sonic_package_manager/service_creator/sonic_db.py index 6b617cb802..a064c60c4a 100644 --- a/sonic_package_manager/service_creator/sonic_db.py +++ b/sonic_package_manager/service_creator/sonic_db.py @@ -6,8 +6,6 @@ from swsscommon import swsscommon -from config.config_mgmt import sonic_cfggen - from sonic_package_manager.service_creator import ETC_SONIC_PATH from sonic_package_manager.service_creator.utils import in_chroot @@ -16,74 +14,46 @@ INIT_CFG_JSON = os.path.join(ETC_SONIC_PATH, 'init_cfg.json') -class PersistentConfigDbConnector: - """ This class implements swsscommon.ConfigDBConnector methods for persistent DBs (JSON files). - For method description refer to swsscommon.ConfigDBConnector. - """ +class FileDbTable: + """ swsscommon.Table adapter for persistent DBs. """ - def __init__(self, filepath): - self._filepath = filepath + def __init__(self, file, table): + self._file = file + self._table = table - def get_config(self): - with open(self._filepath) as stream: + def keys(self): + with open(self._file) as stream: config = json.load(stream) - config = sonic_cfggen.FormatConverter.to_deserialized(config) - return config - - def get_entry(self, table, key): - table = table.upper() - table_data = self.get_table(table) - return table_data.get(key, {}) - - def get_table(self, table): - table = table.upper() - config = self.get_config() - return config.get(table, {}) - - def set_entry(self, table, key, data): - table = table.upper() - config = self.get_config() - if data is None: - self._del_key(config, table, key) - else: - table_data = config.setdefault(table, {}) - table_data[key] = data - self._write_config(config) - - def mod_entry(self, table, key, data): - table = table.upper() - config = self.get_config() - if data is None: - self._del_key(config, table, key) - else: - table_data = config.setdefault(table, {}) - curr_data = table_data.setdefault(key, {}) - curr_data.update(data) - self._write_config(config) - - def mod_config(self, config): - for table_name in config: - table_data = config[table_name] - if table_data is None: - self._del_table(config, table_name) - continue - for key in table_data: - self.mod_entry(table_name, key, table_data[key]) - - def _del_table(self, config, table): - with contextlib.suppress(KeyError): - config.pop(table) + return config.get(self._table, {}).keys() - def _del_key(self, config, table, key): - with contextlib.suppress(KeyError): - config[table].pop(key) + def get(self, key): + with open(self._file) as stream: + config = json.load(stream) - if not config[table]: - self._del_table(config, table) + table = config.get(self._table, {}) + exists = key in table + fvs_dict = table.get(key, {}) + fvs = list(fvs_dict.items()) + return exists, fvs - def _write_config(self, config): - config = sonic_cfggen.FormatConverter.to_serialized(config) - with open(self._filepath, 'w') as stream: + def set(self, key, fvs): + with open(self._file) as stream: + config = json.load(stream) + + table = config.setdefault(self._table, {}) + table.update({key: dict(fvs)}) + + with open(self._file, 'w') as stream: + json.dump(config, stream, indent=4) + + def _del(self, key): + with open(self._file) as stream: + config = json.load(stream) + + with contextlib.suppress(KeyError): + config[self._table].pop(key) + + with open(self._file, 'w') as stream: json.dump(config, stream, indent=4) @@ -92,52 +62,37 @@ class SonicDB: running DB and also for persistent and initial configs. """ - _running_db_conn = None - - @classmethod - def get_connectors(cls): - """ Yields available DBs connectors. """ - - initial_db_conn = cls.get_initial_db_connector() - persistent_db_conn = cls.get_persistent_db_connector() - running_db_conn = cls.get_running_db_connector() - - yield initial_db_conn - if persistent_db_conn is not None: - yield persistent_db_conn - if running_db_conn is not None: - yield running_db_conn + _running = None @classmethod - def get_running_db_connector(cls): - """ Returns running DB connector. """ + def running_table(cls, table): + """ Returns running DB table. """ # In chroot we can connect to a running # DB via TCP socket, we should ignore this case. if in_chroot(): return None - if cls._running_db_conn is None: + if cls._running is None: try: - cls._running_db_conn = swsscommon.ConfigDBConnector() - cls._running_db_conn.connect() + cls._running = swsscommon.DBConnector(CONFIG_DB, 0) except RuntimeError: # Failed to connect to DB. - cls._running_db_conn = None + return None - return cls._running_db_conn + return swsscommon.Table(cls._running, table) @classmethod - def get_persistent_db_connector(cls): - """ Returns persistent DB connector. """ + def persistent_table(cls, table): + """ Returns persistent DB table. """ if not os.path.exists(CONFIG_DB_JSON): return None - return PersistentConfigDbConnector(CONFIG_DB_JSON) + return FileDbTable(CONFIG_DB_JSON, table) @classmethod - def get_initial_db_connector(cls): - """ Returns initial DB connector. """ + def initial_table(cls, table): + """ Returns initial DB table. """ - return PersistentConfigDbConnector(INIT_CFG_JSON) + return FileDbTable(INIT_CFG_JSON, table) diff --git a/tests/sonic_package_manager/conftest.py b/tests/sonic_package_manager/conftest.py index 1ec067657c..2788a75cd3 100644 --- a/tests/sonic_package_manager/conftest.py +++ b/tests/sonic_package_manager/conftest.py @@ -7,8 +7,6 @@ import pytest from docker_image.reference import Reference -from config.config_mgmt import ConfigMgmt - from sonic_package_manager.database import PackageDatabase, PackageEntry from sonic_package_manager.manager import DockerApi, PackageManager from sonic_package_manager.manifest import Manifest @@ -64,17 +62,7 @@ def mock_service_creator(): @pytest.fixture def mock_sonic_db(): - yield MagicMock() - - -@pytest.fixture -def mock_config_mgmt(): - yield MagicMock() - - -@pytest.fixture -def mock_cli_gen(): - yield MagicMock() + yield Mock() @pytest.fixture @@ -119,7 +107,7 @@ def __init__(self): 'before': ['swss'], } ) - self.add('Azure/docker-test', '1.6.0', 'test-package', '1.6.0', yang='TEST') + self.add('Azure/docker-test', '1.6.0', 'test-package', '1.6.0') self.add('Azure/docker-test-2', '1.5.0', 'test-package-2', '1.5.0') self.add('Azure/docker-test-2', '2.0.0', 'test-package-2', '2.0.0') self.add('Azure/docker-test-3', 'latest', 'test-package-3', '1.6.0') @@ -136,26 +124,23 @@ def __init__(self): def from_registry(self, repository: str, reference: str): manifest = Manifest.marshal(self.metadata_store[repository][reference]['manifest']) components = self.metadata_store[repository][reference]['components'] - yang = self.metadata_store[repository][reference]['yang'] - return Metadata(manifest, components, yang) + return Metadata(manifest, components) def from_local(self, image: str): ref = Reference.parse(image) manifest = Manifest.marshal(self.metadata_store[ref['name']][ref['tag']]['manifest']) components = self.metadata_store[ref['name']][ref['tag']]['components'] - yang = self.metadata_store[ref['name']][ref['tag']]['yang'] - return Metadata(manifest, components, yang) + return Metadata(manifest, components) def from_tarball(self, filepath: str) -> Manifest: path, ref = filepath.split(':') manifest = Manifest.marshal(self.metadata_store[path][ref]['manifest']) components = self.metadata_store[path][ref]['components'] - yang = self.metadata_store[path][ref]['yang'] - return Metadata(manifest, components, yang) + return Metadata(manifest, components) def add(self, repo, reference, name, version, components=None, warm_shutdown=None, fast_shutdown=None, - processes=None, yang=None): + processes=None): repo_dict = self.metadata_store.setdefault(repo, {}) repo_dict[reference] = { 'manifest': { @@ -172,7 +157,6 @@ def add(self, repo, reference, name, version, components=None, 'processes': processes or [], }, 'components': components or {}, - 'yang': yang, } yield FakeMetadataResolver() @@ -268,7 +252,7 @@ def fake_db(fake_metadata_resolver): description='SONiC Package Manager Test Package', default_reference='1.6.0', installed=False, - built_in=False, + built_in=False ) add_package( content, @@ -418,8 +402,8 @@ def sonic_fs(fs): @pytest.fixture(autouse=True) def patch_pkgutil(): - with mock.patch('pkgutil.get_loader') as loader: - yield loader + with mock.patch('pkgutil.get_loader'): + yield @pytest.fixture diff --git a/tests/sonic_package_manager/test_service_creator.py b/tests/sonic_package_manager/test_service_creator.py index 456cc71a4a..ffa6737531 100644 --- a/tests/sonic_package_manager/test_service_creator.py +++ b/tests/sonic_package_manager/test_service_creator.py @@ -1,8 +1,7 @@ #!/usr/bin/env python import os -import copy -from unittest.mock import Mock, call +from unittest.mock import Mock, MagicMock import pytest @@ -60,25 +59,13 @@ def manifest(): }) -@pytest.fixture() -def service_creator(mock_feature_registry, - mock_sonic_db, - mock_cli_gen, - mock_config_mgmt): - yield ServiceCreator( - mock_feature_registry, - mock_sonic_db, - mock_cli_gen, - mock_config_mgmt - ) - - -def test_service_creator(sonic_fs, manifest, service_creator, package_manager): +def test_service_creator(sonic_fs, manifest, package_manager, mock_feature_registry, mock_sonic_db): + creator = ServiceCreator(mock_feature_registry, mock_sonic_db) entry = PackageEntry('test', 'azure/sonic-test') package = Package(entry, Metadata(manifest)) installed_packages = package_manager._get_installed_packages_and(package) - service_creator.create(package) - service_creator.generate_shutdown_sequence_files(installed_packages) + creator.create(package) + creator.generate_shutdown_sequence_files(installed_packages) assert sonic_fs.exists(os.path.join(ETC_SONIC_PATH, 'swss_dependent')) assert sonic_fs.exists(os.path.join(DOCKER_CTL_SCRIPT_LOCATION, 'test.sh')) @@ -94,200 +81,122 @@ def read_file(name): assert read_file('test_reconcile') == 'test-process test-process-3' -def test_service_creator_with_timer_unit(sonic_fs, manifest, service_creator): +def test_service_creator_with_timer_unit(sonic_fs, manifest, mock_feature_registry, mock_sonic_db): + creator = ServiceCreator(mock_feature_registry, mock_sonic_db) entry = PackageEntry('test', 'azure/sonic-test') package = Package(entry, Metadata(manifest)) - service_creator.create(package) + creator.create(package) assert not sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.timer')) manifest['service']['delayed'] = True package = Package(entry, Metadata(manifest)) - service_creator.create(package) + creator.create(package) assert sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.timer')) -def test_service_creator_with_debug_dump(sonic_fs, manifest, service_creator): +def test_service_creator_with_debug_dump(sonic_fs, manifest, mock_feature_registry, mock_sonic_db): + creator = ServiceCreator(mock_feature_registry, mock_sonic_db) entry = PackageEntry('test', 'azure/sonic-test') package = Package(entry, Metadata(manifest)) - service_creator.create(package) + creator.create(package) assert not sonic_fs.exists(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, 'test')) manifest['package']['debug-dump'] = '/some/command' package = Package(entry, Metadata(manifest)) - service_creator.create(package) + creator.create(package) assert sonic_fs.exists(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, 'test')) -def test_service_creator_yang(sonic_fs, manifest, mock_sonic_db, - mock_config_mgmt, service_creator): - test_yang = 'TEST YANG' - test_yang_module = 'sonic-test' +def test_service_creator_initial_config(sonic_fs, manifest, mock_feature_registry, mock_sonic_db): + mock_table = Mock() + mock_table.get = Mock(return_value=(True, (('field_2', 'original_value_2'),))) + mock_sonic_db.initial_table = Mock(return_value=mock_table) + mock_sonic_db.persistent_table = Mock(return_value=mock_table) + mock_sonic_db.running_table = Mock(return_value=mock_table) - mock_connector = Mock() - mock_sonic_db.get_connectors = Mock(return_value=[mock_connector]) - mock_connector.get_table = Mock(return_value={'key_a': {'field_1': 'value_1'}}) - mock_connector.get_config = Mock(return_value={ - 'TABLE_A': mock_connector.get_table('') - }) + creator = ServiceCreator(mock_feature_registry, mock_sonic_db) entry = PackageEntry('test', 'azure/sonic-test') - package = Package(entry, Metadata(manifest, yang_module_str=test_yang)) - service_creator.create(package) + package = Package(entry, Metadata(manifest)) + creator.create(package) - mock_config_mgmt.add_module.assert_called_with(test_yang) - mock_config_mgmt.get_module_name = Mock(return_value=test_yang_module) + assert not sonic_fs.exists(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, 'test')) manifest['package']['init-cfg'] = { 'TABLE_A': { 'key_a': { - 'field_1': 'new_value_1', + 'field_1': 'value_1', 'field_2': 'value_2' }, }, } - package = Package(entry, Metadata(manifest, yang_module_str=test_yang)) - - service_creator.create(package) - - mock_config_mgmt.add_module.assert_called_with(test_yang) - - mock_connector.mod_config.assert_called_with( - { - 'TABLE_A': { - 'key_a': { - 'field_1': 'value_1', - 'field_2': 'value_2', - }, - }, - } - ) - - mock_config_mgmt.sy.confDbYangMap = { - 'TABLE_A': {'module': test_yang_module} - } - - service_creator.remove(package) - mock_connector.set_entry.assert_called_with('TABLE_A', 'key_a', None) - mock_config_mgmt.remove_module.assert_called_with(test_yang_module) - - -def test_service_creator_autocli(sonic_fs, manifest, mock_cli_gen, - mock_config_mgmt, service_creator): - test_yang = 'TEST YANG' - test_yang_module = 'sonic-test' + package = Package(entry, Metadata(manifest)) - manifest['cli']['auto-generate-show'] = True - manifest['cli']['auto-generate-config'] = True + creator.create(package) + mock_table.set.assert_called_with('key_a', [('field_1', 'value_1'), + ('field_2', 'original_value_2')]) - entry = PackageEntry('test', 'azure/sonic-test') - package = Package(entry, Metadata(manifest, yang_module_str=test_yang)) - mock_config_mgmt.get_module_name = Mock(return_value=test_yang_module) - service_creator.create(package) - - mock_cli_gen.generate_cli_plugin.assert_has_calls( - [ - call('show', test_yang_module), - call('config', test_yang_module), - ], - any_order=True - ) - - service_creator.remove(package) - mock_cli_gen.remove_cli_plugin.assert_has_calls( - [ - call('show', test_yang_module), - call('config', test_yang_module), - ], - any_order=True - ) + creator.remove(package) + mock_table._del.assert_called_with('key_a') def test_feature_registration(mock_sonic_db, manifest): - mock_connector = Mock() - mock_connector.get_entry = Mock(return_value={}) - mock_sonic_db.get_connectors = Mock(return_value=[mock_connector]) + mock_feature_table = Mock() + mock_feature_table.get = Mock(return_value=(False, ())) + mock_sonic_db.initial_table = Mock(return_value=mock_feature_table) + mock_sonic_db.persistent_table = Mock(return_value=mock_feature_table) + mock_sonic_db.running_table = Mock(return_value=mock_feature_table) feature_registry = FeatureRegistry(mock_sonic_db) feature_registry.register(manifest) - mock_connector.set_entry.assert_called_with('FEATURE', 'test', { - 'state': 'disabled', - 'auto_restart': 'enabled', - 'high_mem_alert': 'disabled', - 'set_owner': 'local', - 'has_per_asic_scope': 'False', - 'has_global_scope': 'True', - 'has_timer': 'False', - }) - - -def test_feature_update(mock_sonic_db, manifest): - curr_feature_config = { - 'state': 'enabled', - 'auto_restart': 'enabled', - 'high_mem_alert': 'disabled', - 'set_owner': 'local', - 'has_per_asic_scope': 'False', - 'has_global_scope': 'True', - 'has_timer': 'False', - } - mock_connector = Mock() - mock_connector.get_entry = Mock(return_value=curr_feature_config) - mock_sonic_db.get_connectors = Mock(return_value=[mock_connector]) - feature_registry = FeatureRegistry(mock_sonic_db) - - new_manifest = copy.deepcopy(manifest) - new_manifest['service']['name'] = 'test_new' - new_manifest['service']['delayed'] = True - - feature_registry.update(manifest, new_manifest) - - mock_connector.set_entry.assert_has_calls([ - call('FEATURE', 'test', None), - call('FEATURE', 'test_new', { - 'state': 'enabled', - 'auto_restart': 'enabled', - 'high_mem_alert': 'disabled', - 'set_owner': 'local', - 'has_per_asic_scope': 'False', - 'has_global_scope': 'True', - 'has_timer': 'True', - }), - ], any_order=True) + mock_feature_table.set.assert_called_with('test', [ + ('state', 'disabled'), + ('auto_restart', 'enabled'), + ('high_mem_alert', 'disabled'), + ('set_owner', 'local'), + ('has_per_asic_scope', 'False'), + ('has_global_scope', 'True'), + ('has_timer', 'False'), + ]) def test_feature_registration_with_timer(mock_sonic_db, manifest): manifest['service']['delayed'] = True - mock_connector = Mock() - mock_connector.get_entry = Mock(return_value={}) - mock_sonic_db.get_connectors = Mock(return_value=[mock_connector]) + mock_feature_table = Mock() + mock_feature_table.get = Mock(return_value=(False, ())) + mock_sonic_db.initial_table = Mock(return_value=mock_feature_table) + mock_sonic_db.persistent_table = Mock(return_value=mock_feature_table) + mock_sonic_db.running_table = Mock(return_value=mock_feature_table) feature_registry = FeatureRegistry(mock_sonic_db) feature_registry.register(manifest) - mock_connector.set_entry.assert_called_with('FEATURE', 'test', { - 'state': 'disabled', - 'auto_restart': 'enabled', - 'high_mem_alert': 'disabled', - 'set_owner': 'local', - 'has_per_asic_scope': 'False', - 'has_global_scope': 'True', - 'has_timer': 'True', - }) + mock_feature_table.set.assert_called_with('test', [ + ('state', 'disabled'), + ('auto_restart', 'enabled'), + ('high_mem_alert', 'disabled'), + ('set_owner', 'local'), + ('has_per_asic_scope', 'False'), + ('has_global_scope', 'True'), + ('has_timer', 'True'), + ]) def test_feature_registration_with_non_default_owner(mock_sonic_db, manifest): - mock_connector = Mock() - mock_connector.get_entry = Mock(return_value={}) - mock_sonic_db.get_connectors = Mock(return_value=[mock_connector]) + mock_feature_table = Mock() + mock_feature_table.get = Mock(return_value=(False, ())) + mock_sonic_db.initial_table = Mock(return_value=mock_feature_table) + mock_sonic_db.persistent_table = Mock(return_value=mock_feature_table) + mock_sonic_db.running_table = Mock(return_value=mock_feature_table) feature_registry = FeatureRegistry(mock_sonic_db) feature_registry.register(manifest, owner='kube') - mock_connector.set_entry.assert_called_with('FEATURE', 'test', { - 'state': 'disabled', - 'auto_restart': 'enabled', - 'high_mem_alert': 'disabled', - 'set_owner': 'kube', - 'has_per_asic_scope': 'False', - 'has_global_scope': 'True', - 'has_timer': 'False', - }) + mock_feature_table.set.assert_called_with('test', [ + ('state', 'disabled'), + ('auto_restart', 'enabled'), + ('high_mem_alert', 'disabled'), + ('set_owner', 'kube'), + ('has_per_asic_scope', 'False'), + ('has_global_scope', 'True'), + ('has_timer', 'False'), + ]) From 7b08c27c807ff874b055cccbccabc55fd7c85463 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Wed, 11 Aug 2021 23:58:05 +0000 Subject: [PATCH 14/60] Revert "CLI GEN-1 merged" This reverts commit 7be9ee41a5e8616396c37b6bc2f3a2136fd019e6. --- clear/plugins/auto/__init__.py | 0 config/plugins/auto/__init__.py | 0 setup.py | 5 - show/plugins/auto/__init__.py | 0 .../bash_completion.d/sonic-cli-gen | 8 - sonic-utilities-data/debian/install | 5 +- .../templates/sonic-cli-gen/common.j2 | 3 - .../templates/sonic-cli-gen/config.py.j2 | 481 ------------- .../templates/sonic-cli-gen/show.py.j2 | 245 ------- sonic_cli_gen/__init__.py | 6 - sonic_cli_gen/generator.py | 67 -- sonic_cli_gen/main.py | 51 -- sonic_cli_gen/yang_parser.py | 679 ------------------ .../cli_autogen_input/assert_dictionaries.py | 625 ---------------- tests/cli_autogen_input/config_db.json | 544 -------------- tests/cli_autogen_input/sonic-1-list.yang | 29 - .../sonic-1-object-container.yang | 23 - .../sonic-1-table-container.yang | 17 - tests/cli_autogen_input/sonic-2-lists.yang | 42 -- .../sonic-2-object-containers.yang | 29 - .../sonic-2-table-containers.yang | 23 - .../sonic-choice-complex.yang | 91 --- .../sonic-dynamic-object-complex-1.yang | 57 -- .../sonic-dynamic-object-complex-2.yang | 84 --- tests/cli_autogen_input/sonic-grouping-1.yang | 25 - tests/cli_autogen_input/sonic-grouping-2.yang | 25 - .../sonic-grouping-complex.yang | 96 --- .../sonic-static-object-complex-1.yang | 49 -- .../sonic-static-object-complex-2.yang | 71 -- tests/cli_autogen_yang_parser_test.py | 196 ----- utilities_common/util_base.py | 1 - 31 files changed, 2 insertions(+), 3575 deletions(-) delete mode 100644 clear/plugins/auto/__init__.py delete mode 100644 config/plugins/auto/__init__.py delete mode 100644 show/plugins/auto/__init__.py delete mode 100644 sonic-utilities-data/bash_completion.d/sonic-cli-gen delete mode 100644 sonic-utilities-data/templates/sonic-cli-gen/common.j2 delete mode 100644 sonic-utilities-data/templates/sonic-cli-gen/config.py.j2 delete mode 100644 sonic-utilities-data/templates/sonic-cli-gen/show.py.j2 delete mode 100644 sonic_cli_gen/__init__.py delete mode 100644 sonic_cli_gen/generator.py delete mode 100644 sonic_cli_gen/main.py delete mode 100644 sonic_cli_gen/yang_parser.py delete mode 100644 tests/cli_autogen_input/assert_dictionaries.py delete mode 100644 tests/cli_autogen_input/config_db.json delete mode 100644 tests/cli_autogen_input/sonic-1-list.yang delete mode 100644 tests/cli_autogen_input/sonic-1-object-container.yang delete mode 100644 tests/cli_autogen_input/sonic-1-table-container.yang delete mode 100644 tests/cli_autogen_input/sonic-2-lists.yang delete mode 100644 tests/cli_autogen_input/sonic-2-object-containers.yang delete mode 100644 tests/cli_autogen_input/sonic-2-table-containers.yang delete mode 100644 tests/cli_autogen_input/sonic-choice-complex.yang delete mode 100644 tests/cli_autogen_input/sonic-dynamic-object-complex-1.yang delete mode 100644 tests/cli_autogen_input/sonic-dynamic-object-complex-2.yang delete mode 100644 tests/cli_autogen_input/sonic-grouping-1.yang delete mode 100644 tests/cli_autogen_input/sonic-grouping-2.yang delete mode 100644 tests/cli_autogen_input/sonic-grouping-complex.yang delete mode 100644 tests/cli_autogen_input/sonic-static-object-complex-1.yang delete mode 100644 tests/cli_autogen_input/sonic-static-object-complex-2.yang delete mode 100644 tests/cli_autogen_yang_parser_test.py diff --git a/clear/plugins/auto/__init__.py b/clear/plugins/auto/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/config/plugins/auto/__init__.py b/config/plugins/auto/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/setup.py b/setup.py index 1b8bda9985..c80e11e65b 100644 --- a/setup.py +++ b/setup.py @@ -35,10 +35,8 @@ def run_tests(self): 'acl_loader', 'clear', 'clear.plugins', - 'clear.plugins.auto', 'config', 'config.plugins', - 'config.plugins.auto', 'connect', 'consutil', 'counterpoll', @@ -60,7 +58,6 @@ def run_tests(self): 'show', 'show.interfaces', 'show.plugins', - 'show.plugins.auto', 'sonic_installer', 'sonic_installer.bootloader', 'sonic_package_manager', @@ -69,7 +66,6 @@ def run_tests(self): 'undebug', 'utilities_common', 'watchdogutil', - 'sonic_cli_gen', ], package_data={ 'show': ['aliases.ini'], @@ -180,7 +176,6 @@ def run_tests(self): 'spm = sonic_package_manager.main:cli', 'undebug = undebug.main:cli', 'watchdogutil = watchdogutil.main:watchdogutil', - 'sonic-cli-gen = sonic_cli_gen.main:cli', ] }, install_requires=[ diff --git a/show/plugins/auto/__init__.py b/show/plugins/auto/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/sonic-utilities-data/bash_completion.d/sonic-cli-gen b/sonic-utilities-data/bash_completion.d/sonic-cli-gen deleted file mode 100644 index 3327f9c513..0000000000 --- a/sonic-utilities-data/bash_completion.d/sonic-cli-gen +++ /dev/null @@ -1,8 +0,0 @@ -_sonic_cli_gen_completion() { - COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \ - COMP_CWORD=$COMP_CWORD \ - _SONIC_CLI_GEN_COMPLETE=complete $1 ) ) - return 0 -} - -complete -F _sonic_cli_gen_completion -o default sonic-cli-gen; diff --git a/sonic-utilities-data/debian/install b/sonic-utilities-data/debian/install index 1f67b78c20..82d087d54d 100644 --- a/sonic-utilities-data/debian/install +++ b/sonic-utilities-data/debian/install @@ -1,3 +1,2 @@ -bash_completion.d/ /etc/ -templates/*.j2 /usr/share/sonic/templates/ -templates/sonic-cli-gen/*.j2 /usr/share/sonic/templates/sonic-cli-gen/ +bash_completion.d/ /etc/ +templates/*.j2 /usr/share/sonic/templates/ diff --git a/sonic-utilities-data/templates/sonic-cli-gen/common.j2 b/sonic-utilities-data/templates/sonic-cli-gen/common.j2 deleted file mode 100644 index 3b83ee5635..0000000000 --- a/sonic-utilities-data/templates/sonic-cli-gen/common.j2 +++ /dev/null @@ -1,3 +0,0 @@ -{% macro cli_name(name) -%} -{{ name|lower|replace("_", "-") }} -{%- endmacro %} diff --git a/sonic-utilities-data/templates/sonic-cli-gen/config.py.j2 b/sonic-utilities-data/templates/sonic-cli-gen/config.py.j2 deleted file mode 100644 index 402b7e3dd2..0000000000 --- a/sonic-utilities-data/templates/sonic-cli-gen/config.py.j2 +++ /dev/null @@ -1,481 +0,0 @@ -{%- from "common.j2" import cli_name -%} -""" -Autogenerated config CLI plugin. -{% if source_template is defined %} -Source template: {{ source_template }} -{% endif %} -{% if source_yang_module is defined %} -Source YANG module: {{ source_yang_module }} -{% endif %} -""" - -import click -import utilities_common.cli as clicommon -import utilities_common.general as general -from config import config_mgmt - - -# Load sonic-cfggen from source since /usr/local/bin/sonic-cfggen does not have .py extension. -sonic_cfggen = general.load_module_from_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') - - -def exit_with_error(*args, **kwargs): - """ Print a message and abort CLI. """ - - click.secho(*args, **kwargs) - raise click.Abort() - - -def validate_config_or_raise(cfg): - """ Validate config db data using ConfigMgmt """ - - try: - cfg = sonic_cfggen.FormatConverter.to_serialized(cfg) - config_mgmt.ConfigMgmt().loadData(cfg) - except Exception as err: - raise Exception('Failed to validate configuration: {}'.format(err)) - - -def add_entry_validated(db, table, key, data): - """ Add new entry in table and validate configuration """ - - cfg = db.get_config() - cfg.setdefault(table, {}) - if key in cfg[table]: - raise Exception(f"{key} already exists") - - cfg[table][key] = data - - validate_config_or_raise(cfg) - db.set_entry(table, key, data) - - -def update_entry_validated(db, table, key, data, create_if_not_exists=False): - """ Update entry in table and validate configuration. - If attribute value in data is None, the attribute is deleted. - """ - - cfg = db.get_config() - cfg.setdefault(table, {}) - - if create_if_not_exists: - cfg[table].setdefault(key, {}) - - if key not in cfg[table]: - raise Exception(f"{key} does not exist") - - for attr, value in data.items(): - if value is None and attr in cfg[table][key]: - cfg[table][key].pop(attr) - else: - cfg[table][key][attr] = value - - validate_config_or_raise(cfg) - db.set_entry(table, key, cfg[table][key]) - - -def del_entry_validated(db, table, key): - """ Delete entry in table and validate configuration """ - - cfg = db.get_config() - cfg.setdefault(table, {}) - if key not in cfg[table]: - raise Exception(f"{key} does not exist") - - cfg[table].pop(key) - - validate_config_or_raise(cfg) - db.set_entry(table, key, None) - - -def add_list_entry_validated(db, table, key, attr, data): - """ Add new entry into list in table and validate configuration""" - - cfg = db.get_config() - cfg.setdefault(table, {}) - if key not in cfg[table]: - raise Exception(f"{key} does not exist") - cfg[table][key].setdefault(attr, []) - for entry in data: - if entry in cfg[table][key][attr]: - raise Exception(f"{entry} already exists") - cfg[table][key][attr].append(entry) - - validate_config_or_raise(cfg) - db.set_entry(table, key, cfg[table][key]) - - -def del_list_entry_validated(db, table, key, attr, data): - """ Delete entry from list in table and validate configuration""" - - cfg = db.get_config() - cfg.setdefault(table, {}) - if key not in cfg[table]: - raise Exception(f"{key} does not exist") - cfg[table][key].setdefault(attr, []) - for entry in data: - if entry not in cfg[table][key][attr]: - raise Exception(f"{entry} does not exist") - cfg[table][key][attr].remove(entry) - if not cfg[table][key][attr]: - cfg[table][key].pop(attr) - - validate_config_or_raise(cfg) - db.set_entry(table, key, cfg[table][key]) - - -def clear_list_entry_validated(db, table, key, attr): - """ Clear list in object and validate configuration""" - - update_entry_validated(db, table, key, {attr: None}) - - -{# Generate click arguments macro -Jinja2 Call: - {{ gen_click_arguments([{"name": "leaf1", "is-leaf-list": False}, - {"name": "leaf2", "is-leaf-list": Talse}) }} -Result: -@click.argument( - "leaf1", - nargs=1, - required=True, -) -@click.argument( - "leaf2", - nargs=-1, - required=True, -) -#} -{%- macro gen_click_arguments(attrs) -%} -{%- for attr in attrs %} -@click.argument( - "{{ cli_name(attr.name) }}", - nargs={% if attr["is-leaf-list"] %}-1{% else %}1{% endif %}, - required=True, -) -{%- endfor %} -{%- endmacro %} - - -{# Generate click options macro -Jinja2 Call: - {{ gen_click_arguments([{"name": "leaf1", "is-mandatory": True, "description": "leaf1-desc"}, - {"name": "leaf2", "is-mandatory": False, "description": "leaf2-desc"}) }} -Result: -@click.option( - "--leaf1", - help="leaf1-desc [mandatory]", -) -@click.option( - "--leaf2", - help="leaf2-desc", -) -#} -{%- macro gen_click_options(attrs) -%} -{%- for attr in attrs %} -@click.option( - "--{{ cli_name(attr.name) }}", - help="{{ attr.description }}{% if attr['is-mandatory'] %}[mandatory]{% endif %}", -) -{%- endfor %} -{%- endmacro %} - -{# Generate valid python identifier from input names #} -{% macro pythonize(attrs) -%} -{{ attrs|map(attribute="name")|map("lower")|map("replace", "-", "_")|join(", ") }} -{%- endmacro %} - -{% macro gen_cfg_obj_list_update(group, table, object, attr) %} -{% set list_update_group = group + "_" + attr.name %} - -@{{ group }}.group(name="{{ cli_name(attr.name) }}", - cls=clicommon.AliasedGroup) -def {{ list_update_group }}(): - """ Add/Delete {{ attr.name }} in {{ table.name }} """ - - pass - -{# Add entries to list attribute config CLI generation -E.g: - @TABLE_object.command(name="add") - @click.argument("key1", nargs=1) - @click.argument("key2", nargs=1) - @click.argument("attribute", nargs=-1) - def TABLE_object_attribute_add(db, key1, key2, attribute): -#} -@{{ list_update_group }}.command(name="add") -{{ gen_click_arguments(object["keys"] + [attr]) }} -@clicommon.pass_db -def {{ list_update_group }}_add( - db, - {{ pythonize(object["keys"] + [attr]) }} -): - """ Add {{ attr.name }} in {{ table.name }} """ - - table = "{{ table.name }}" - key = {{ pythonize(object["keys"]) }} - attr = "{{ attr.name }}" - data = {{ pythonize([attr]) }} - - try: - add_list_entry_validated(db.cfgdb, table, key, attr, data) - except Exception as err: - exit_with_error(f"Error: {err}", fg="red") - - -{# Delete entries from list attribute config CLI generation -E.g: - @TABLE_object.command(name="delete") - @click.argument("key1", nargs=1) - @click.argument("key2", nargs=1) - @click.argument("attribute", nargs=-1) - def TABLE_object_attribute_delete(db, key1, key2, attribute): -#} -@{{ list_update_group }}.command(name="delete") -{{ gen_click_arguments(object["keys"] + [attr]) }} -@clicommon.pass_db -def {{ list_update_group }}_delete( - db, - {{ pythonize(object["keys"] + [attr]) }} -): - """ Delete {{ attr.name }} in {{ table.name }} """ - - table = "{{ table.name }}" - key = {{ pythonize(object["keys"]) }} - attr = "{{ attr.name }}" - data = {{ pythonize([attr]) }} - - try: - del_list_entry_validated(db.cfgdb, table, key, attr, data) - except Exception as err: - exit_with_error(f"Error: {err}", fg="red") - - -{# Clear entries from list attribute config CLI generation -E.g: - @TABLE_object.command(name="delete") - @click.argument("key1", nargs=1) - @click.argument("key2", nargs=1) - def TABLE_object_attribute_clear(db, key1, key2): -#} -@{{ list_update_group }}.command(name="clear") -{{ gen_click_arguments(object["keys"]) }} -@clicommon.pass_db -def {{ list_update_group }}_clear( - db, - {{ pythonize(object["keys"]) }} -): - """ Clear {{ attr.name }} in {{ table.name }} """ - - table = "{{ table.name }}" - key = {{ pythonize(object["keys"]) }} - attr = "{{ attr.name }}" - - try: - clear_list_entry_validated(db.cfgdb, table, key, attr) - except Exception as err: - exit_with_error(f"Error: {err}", fg="red") - -{% endmacro %} - - -{% macro gen_cfg_obj_list_update_all(group, table, object) %} -{% for attr in object.attrs %} -{% if attr["is-leaf-list"] %} -{{ gen_cfg_obj_list_update(group, table, object, attr) }} -{% endif %} -{% endfor %} -{% endmacro %} - - -{% macro gen_cfg_static_obj_attr(table, object, attr) %} -@{{ table.name }}_{{ object.name }}.command(name="{{ cli_name(attr.name) }}") -{{ gen_click_arguments([attr]) }} -@clicommon.pass_db -def {{ table.name }}_{{ object.name }}_{{ attr.name }}(db, {{ pythonize([attr]) }}): - """ {{ attr.description }} """ - - table = "{{ table.name }}" - key = "{{ object.name }}" - data = { - "{{ attr.name }}": {{ pythonize([attr]) }}, - } - try: - update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) - except Exception as err: - exit_with_error(f"Error: {err}", fg="red") -{% endmacro %} - - -{# Static objects config CLI generation -E.g: - @TABLE.group(name="object") - def TABLE_object(db): -#} -{% macro gen_cfg_static_obj(table, object) %} -@{{ table.name }}.group(name="{{ cli_name(object.name) }}", - cls=clicommon.AliasedGroup) -@clicommon.pass_db -def {{ table.name }}_{{ object.name }}(db): - """ {{ object.description }} """ - - pass - -{# Static objects attributes config CLI generation -E.g: - @TABLE_object.command(name="attribute") - def TABLE_object_attribute(db, attribute): -#} -{% for attr in object.attrs %} -{{ gen_cfg_static_obj_attr(table, object, attr) }} -{% endfor %} - -{{ gen_cfg_obj_list_update_all(table.name + "_" + object.name, table, object) }} -{% endmacro %} - -{# Dynamic objects config CLI generation #} - -{# Dynamic objects add command -E.g: - @TABLE.command(name="add") - @click.argument("key1") - @click.argument("key2") - @click.option("--attr1") - @click.option("--attr2") - @click.option("--attr3") - def TABLE_TABLE_LIST_add(db, key1, key2, attr1, attr2, attr3): -#} -{% macro gen_cfg_dyn_obj_add(group, table, object) %} -@{{ group }}.command(name="add") -{{ gen_click_arguments(object["keys"]) }} -{{ gen_click_options(object.attrs) }} -@clicommon.pass_db -def {{ group }}_add(db, {{ pythonize(object["keys"] + object.attrs) }}): - """ Add object in {{ table.name }}. """ - - table = "{{ table.name }}" - key = {{ pythonize(object["keys"]) }} - data = {} -{%- for attr in object.attrs %} - if {{ pythonize([attr]) }} is not None: -{%- if not attr["is-leaf-list"] %} - data["{{ attr.name }}"] = {{ pythonize([attr]) }} -{%- else %} - data["{{ attr.name }}"] = {{ pythonize([attr]) }}.split(",") -{%- endif %} -{%- endfor %} - - try: - add_entry_validated(db.cfgdb, table, key, data) - except Exception as err: - exit_with_error(f"Error: {err}", fg="red") -{% endmacro %} - -{# Dynamic objects update command -E.g: - @TABLE.command(name="update") - @click.argument("key1") - @click.argument("key2") - @click.option("--attr1") - @click.option("--attr2") - @click.option("--attr3") - def TABLE_TABLE_LIST_update(db, key1, key2, attr1, attr2, attr3): -#} -{% macro gen_cfg_dyn_obj_update(group, table, object) %} -@{{ group }}.command(name="update") -{{ gen_click_arguments(object["keys"]) }} -{{ gen_click_options(object.attrs) }} -@clicommon.pass_db -def {{ group }}_update(db, {{ pythonize(object["keys"] + object.attrs) }}): - """ Add object in {{ table.name }}. """ - - table = "{{ table.name }}" - key = {{ pythonize(object["keys"]) }} - data = {} -{%- for attr in object.attrs %} - if {{ pythonize([attr]) }} is not None: -{%- if not attr["is-leaf-list"] %} - data["{{ attr.name }}"] = {{ pythonize([attr]) }} -{%- else %} - data["{{ attr.name }}"] = {{ pythonize([attr]) }}.split(",") -{%- endif %} -{%- endfor %} - - try: - update_entry_validated(db.cfgdb, table, key, data) - except Exception as err: - exit_with_error(f"Error: {err}", fg="red") -{% endmacro %} - -{# Dynamic objects delete command -E.g: - @TABLE.command(name="delete") - @click.argument("key1") - @click.argument("key2") - def TABLE_TABLE_LIST_delete(db, key1, key2): -#} -{% macro gen_cfg_dyn_obj_delete(group, table, object) %} -@{{ group }}.command(name="delete") -{{ gen_click_arguments(object["keys"]) }} -@clicommon.pass_db -def {{ group }}_delete(db, {{ pythonize(object["keys"]) }}): - """ Delete object in {{ table.name }}. """ - - table = "{{ table.name }}" - key = {{ pythonize(object["keys"]) }} - try: - del_entry_validated(db.cfgdb, table, key) - except Exception as err: - exit_with_error(f"Error: {err}", fg="red") -{% endmacro %} - -{% macro gen_cfg_dyn_obj(table, object) %} -{# Generate another nested group in case table holds two types of objects #} -{% if table["dynamic-objects"]|length > 1 %} -{% set group = table.name + "_" + object.name %} -@{{ table.name }}.group(name="{{ cli_name(object.name) }}", - cls=clicommon.AliasedGroup) -def {{ group }}(): - """ {{ object.description }} """ - - pass -{% else %} -{% set group = table.name %} -{% endif %} - -{{ gen_cfg_dyn_obj_add(group, table, object) }} -{{ gen_cfg_dyn_obj_update(group, table, object) }} -{{ gen_cfg_dyn_obj_delete(group, table, object) }} -{{ gen_cfg_obj_list_update_all(group, table, object) }} -{% endmacro %} - - -{% for table in tables %} -@click.group(name="{{ cli_name(table.name) }}", - cls=clicommon.AliasedGroup) -def {{ table.name }}(): - """ {{ table.description }} """ - - pass - -{% if "static-objects" in table %} -{% for object in table["static-objects"] %} -{{ gen_cfg_static_obj(table, object) }} -{% endfor %} -{% endif %} - -{% if "dynamic-objects" in table %} -{% for object in table["dynamic-objects"] %} -{{ gen_cfg_dyn_obj(table, object) }} -{% endfor %} -{% endif %} - -{% endfor %} - -def register(cli): -{%- for table in tables %} - cli_node = {{ table.name }} - if cli_node.name in cli.commands: - raise Exception(f"{cli_node.name} already exists in CLI") - cli.add_command({{ table.name }}) -{%- endfor %} diff --git a/sonic-utilities-data/templates/sonic-cli-gen/show.py.j2 b/sonic-utilities-data/templates/sonic-cli-gen/show.py.j2 deleted file mode 100644 index 6ee27f2013..0000000000 --- a/sonic-utilities-data/templates/sonic-cli-gen/show.py.j2 +++ /dev/null @@ -1,245 +0,0 @@ -{% from "common.j2" import cli_name -%} -""" -Auto-generated show CLI plugin. -{% if source_template is defined %} -Source template: {{ source_template }} -{% endif %} -{% if source_yang_module is defined %} -Source YANG module: {{ source_yang_module }} -{% endif %} -""" - -import click -import tabulate -import natsort -import utilities_common.cli as clicommon - - -{% macro column_name(name) -%} -{{ name|upper|replace("_", " ")|replace("-", " ") }} -{%- endmacro %} - - -def format_attr_value(entry, attr): - """ Helper that formats attribute to be presented in the table output. - - Args: - entry (Dict[str, str]): CONFIG DB entry configuration. - attr (Dict): Attribute metadata. - - Returns: - str: fomatted attribute value. - """ - - if attr["is-leaf-list"]: - return "\n".join(entry.get(attr["name"], [])) - return entry.get(attr["name"], "N/A") - - -def format_group_value(entry, attrs): - """ Helper that formats grouped attribute to be presented in the table output. - - Args: - entry (Dict[str, str]): CONFIG DB entry configuration. - attrs (List[Dict]): Attributes metadata that belongs to the same group. - - Returns: - str: fomatted group attributes. - """ - - data = [] - for attr in attrs: - if entry.get(attr["name"]): - data.append((attr["name"] + ":", format_attr_value(entry, attr))) - return tabulate.tabulate(data, tablefmt="plain") - - -{# Generates a python list that represents a row in the table view. -E.g: -Jinja2: -{{ - gen_row("entry", [ - {"name": "leaf1"}, - {"name": "leaf_1"}, - {"name": "leaf_2"}, - {"name": "leaf_3", "group": "group_0"} - ]) -}} -Result: -[ - format_attr_value( - entry, - {'name': 'leaf1'} - ), - format_attr_value( - entry, - {'name': 'leaf_1'} - ), - format_attr_value( - entry, - {'name': 'leaf_2'} - ), - format_group_value( - entry, - [{'name': 'leaf_3', 'group': 'group_0'}] - ), -] -#} -{% macro gen_row(entry, attrs) -%} -[ -{%- for attr in attrs|rejectattr("group", "defined") %} - format_attr_value( - {{ entry }}, - {{ attr }} - ), -{%- endfor %} -{%- for group, attrs in attrs|selectattr("group", "defined")|groupby("group") %} -{%- if group == "" %} -{%- for attr in attrs %} - format_attr_value( - {{ entry }}, - {{ attr }} - ), -{%- endfor %} -{%- else %} - format_group_value( - {{ entry }}, - {{ attrs }} - ), -{%- endif %} -{%- endfor %} -] -{% endmacro %} - -{# Generates a list that represents a header in table view. -E.g: -Jinja2: {{ - gen_header([ - {"name": "key"}, - {"name": "leaf_1"}, - {"name": "leaf_2"}, - {"name": "leaf_3", "group": "group_0"} - ]) - }} - -Result: -[ - "KEY", - "LEAF 1", - "LEAF 2", - "GROUP 0", -] - -#} -{% macro gen_header(attrs) -%} -[ -{% for attr in attrs|rejectattr("group", "defined") -%} - "{{ column_name(attr.name) }}", -{% endfor -%} -{% for group, attrs in attrs|selectattr("group", "defined")|groupby("group") -%} -{%- if group == "" %} -{% for attr in attrs -%} - "{{ column_name(attr.name) }}", -{% endfor -%} -{%- else %} - "{{ column_name(group) }}", -{%- endif %} -{% endfor -%} -] -{% endmacro %} - - -{% for table in tables %} -{% if "static-objects" in table %} -{# For static objects generate a command group called against table name. -E.g: -@click.group(name="table-name", - cls=clicommon.AliasedGroup) -def TABLE_NAME(): - """ TABLE DESCRIPTION """ - - pass -#} -@click.group(name="{{ cli_name(table.name) }}", - cls=clicommon.AliasedGroup) -def {{ table.name }}(): - """ {{ table.description }} """ - - pass - -{% for object in table["static-objects"] %} -{# For every object in static table generate a command -in the group to show individual object configuration. -CLI command is named against the object key in DB. -E.g: -@TABLE_NAME.command(name="object-name") -@clicommon.pass_db -def TABLE_NAME_object_name(db): - ... -#} -@{{ table.name }}.command(name="{{ cli_name(object.name) }}") -@clicommon.pass_db -def {{ table.name }}_{{ object.name }}(db): - """ {{ object.description }} """ - - header = {{ gen_header(object.attrs) }} - body = [] - - table = db.cfgdb.get_table("{{ table.name }}") - entry = table.get("{{ object.name }}", {}) - row = {{ gen_row("entry", object.attrs) }} - body.append(row) - click.echo(tabulate.tabulate(body, header)) - -{% endfor %} -{% elif "dynamic-objects" in table %} -{% if table["dynamic-objects"]|length > 1 %} -@click.group(name="{{ cli_name(table.name) }}", - cls=clicommon.AliasedGroup) -def {{ table.name }}(): - """ {{ table.description }} """ - - pass -{% endif %} -{% for object in table["dynamic-objects"] %} -{# Generate another nesting group in case table holds two types of objects #} -{% if table["dynamic-objects"]|length > 1 %} -{% set group = table.name %} -{% set name = object.name %} -{% else %} -{% set group = "click" %} -{% set name = table.name %} -{% endif %} - -{# Generate an implementation to display table. #} -@{{ group }}.group(name="{{ cli_name(name) }}", - cls=clicommon.AliasedGroup, - invoke_without_command=True) -@clicommon.pass_db -def {{ name }}(db): - """ {{ object.description }} [Callable command group] """ - - header = {{ gen_header(object["keys"] + object.attrs) }} - body = [] - - table = db.cfgdb.get_table("{{ table.name }}") - for key in natsort.natsorted(table): - entry = table[key] - if not isinstance(key, tuple): - key = (key,) - - row = [*key] + {{ gen_row("entry", object.attrs) }} - body.append(row) - - click.echo(tabulate.tabulate(body, header)) -{% endfor %} -{% endif %} -{% endfor %} - -def register(cli): -{%- for table in tables %} - cli_node = {{ table.name }} - if cli_node.name in cli.commands: - raise Exception(f"{cli_node.name} already exists in CLI") - cli.add_command({{ table.name }}) -{%- endfor %} diff --git a/sonic_cli_gen/__init__.py b/sonic_cli_gen/__init__.py deleted file mode 100644 index e7e775c0fb..0000000000 --- a/sonic_cli_gen/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env python - -from sonic_cli_gen.generator import CliGenerator - -__all__ = ['CliGenerator'] - diff --git a/sonic_cli_gen/generator.py b/sonic_cli_gen/generator.py deleted file mode 100644 index 4f48b0201a..0000000000 --- a/sonic_cli_gen/generator.py +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env python - -import os -import pkgutil -import jinja2 - -from sonic_cli_gen.yang_parser import YangParser - -templates_path = '/usr/share/sonic/templates/sonic-cli-gen/' - - -class CliGenerator: - """ SONiC CLI generator. This class provides public API - for sonic-cli-gen python library. It can generate config, - show CLI plugins. - - Attributes: - loader: the loaded j2 templates - env: j2 central object - logger: logger - """ - - def __init__(self, logger): - """ Initialize CliGenerator. """ - - self.loader = jinja2.FileSystemLoader(templates_path) - self.env = jinja2.Environment(loader=self.loader) - self.logger = logger - - def generate_cli_plugin(self, cli_group, plugin_name): - """ Generate click CLI plugin and put it to: - /usr/local/lib//dist-packages//plugins/auto/ - """ - - parser = YangParser(yang_model_name=plugin_name, - config_db_path='configDB', - allow_tbl_without_yang=True, - debug=False) - # yang_dict will be used as an input for templates located in - # /usr/share/sonic/templates/sonic-cli-gen/ - yang_dict = parser.parse_yang_model() - plugin_path = get_cli_plugin_path(cli_group, plugin_name + '_yang.py') - template = self.env.get_template(cli_group + '.py.j2') - with open(plugin_path, 'w') as plugin_py: - plugin_py.write(template.render(yang_dict)) - self.logger.info(' Auto-generation successful! Location: {}'.format(plugin_path)) - - def remove_cli_plugin(self, cli_group, plugin_name): - """ Remove CLI plugin from directory: - /usr/local/lib//dist-packages//plugins/auto/ - """ - plugin_path = get_cli_plugin_path(cli_group, plugin_name + '_yang.py') - if os.path.exists(plugin_path): - os.remove(plugin_path) - self.logger.info(' {} was removed.'.format(plugin_path)) - else: - self.logger.info(' Path {} doest NOT exist!'.format(plugin_path)) - - -def get_cli_plugin_path(command, plugin_name): - pkg_loader = pkgutil.get_loader(f'{command}.plugins.auto') - if pkg_loader is None: - raise Exception(f'Failed to get plugins path for {command} CLI') - plugins_pkg_path = os.path.dirname(pkg_loader.path) - - return os.path.join(plugins_pkg_path, plugin_name) - diff --git a/sonic_cli_gen/main.py b/sonic_cli_gen/main.py deleted file mode 100644 index bfcd301aed..0000000000 --- a/sonic_cli_gen/main.py +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/bin/env python - -import sys -import click -import logging -from sonic_cli_gen.generator import CliGenerator - -logger = logging.getLogger('sonic-cli-gen') -logging.basicConfig(stream=sys.stdout, level=logging.INFO) - - -@click.group() -@click.pass_context -def cli(ctx): - """ SONiC CLI Auto-generator tool.\r - Generate click CLI plugin for 'config' or 'show' CLI groups.\r - CLI plugin will be generated from the YANG model, which should be in:\r\n - /usr/local/yang-models/ \n - Generated CLI plugin will be placed in: \r\n - /usr/local/lib/python3.7/dist-packages//plugins/auto/ - """ - - context = { - 'gen': CliGenerator(logger) - } - ctx.obj = context - - -@cli.command() -@click.argument('cli_group', type=click.Choice(['config', 'show'])) -@click.argument('yang_model_name', type=click.STRING) -@click.pass_context -def generate(ctx, cli_group, yang_model_name): - """ Generate click CLI plugin. """ - - ctx.obj['gen'].generate_cli_plugin(cli_group, yang_model_name) - - -@cli.command() -@click.argument('cli_group', type=click.Choice(['config', 'show'])) -@click.argument('yang_model_name', type=click.STRING) -@click.pass_context -def remove(ctx, cli_group, yang_model_name): - """ Remove generated click CLI plugin from. """ - - ctx.obj['gen'].remove_cli_plugin(cli_group, yang_model_name) - - -if __name__ == '__main__': - cli() - diff --git a/sonic_cli_gen/yang_parser.py b/sonic_cli_gen/yang_parser.py deleted file mode 100644 index df0382536f..0000000000 --- a/sonic_cli_gen/yang_parser.py +++ /dev/null @@ -1,679 +0,0 @@ -#!/usr/bin/env python - -from collections import OrderedDict -from config.config_mgmt import ConfigMgmt -from typing import List, Dict - -yang_guidelines_link = 'https://github.com/Azure/SONiC/blob/master/doc/mgmt/SONiC_YANG_Model_Guidelines.md' - - -class YangParser: - """ YANG model parser - - Attributes: - yang_model_name: Name of the YANG model file - conf_mgmt: Instance of Config Mgmt class to - help parse YANG models - y_module: Reference to 'module' entity - from YANG model file - y_top_level_container: Reference to top level 'container' - entity from YANG model file - y_table_containers: Reference to 'container' entities - from YANG model file that represent Config DB tables - yang_2_dict: dictionary created from YANG model file that - represent Config DB schema. - - Below the 'yang_2_dict' obj in case if YANG model has a 'list' entity: - { - 'tables': [{ - 'name': 'value', - 'description': 'value', - 'dynamic-objects': [ - 'name': 'value', - 'description': 'value, - 'attrs': [ - { - 'name': 'value', - 'description': 'value', - 'is-leaf-list': False, - 'is-mandatory': False, - 'group': 'value' - } - ... - ], - 'keys': [ - { - 'name': 'ACL_TABLE_NAME', - 'description': 'value' - } - ... - ] - ], - }] - } - In case if YANG model does NOT have a 'list' entity, - it has the same structure as above, but 'dynamic-objects' - changed to 'static-objects' and have no 'keys' - """ - - def __init__(self, - yang_model_name, - config_db_path, - allow_tbl_without_yang, - debug): - self.yang_model_name = yang_model_name - self.conf_mgmt = None - self.y_module = None - self.y_top_level_container = None - self.y_table_containers = None - self.yang_2_dict = dict() - - try: - self.conf_mgmt = ConfigMgmt(config_db_path, - debug, - allow_tbl_without_yang) - except Exception as e: - raise Exception("Failed to load the {} class".format(str(e))) - - def _init_yang_module_and_containers(self): - """ Initialize inner class variables: - self.y_module - self.y_top_level_container - self.y_table_containers - - Raises: - Exception: if YANG model is invalid or NOT exist - """ - - self.y_module = self._find_yang_model_in_yjson_obj() - - if self.y_module is None: - raise Exception('The YANG model {} is NOT exist'.format(self.yang_model_name)) - - if self.y_module.get('container') is None: - raise Exception('The YANG model {} does NOT have\ - "top level container" element\ - Please follow the SONiC YANG model guidelines:\ - \n{}'.format(self.yang_model_name, yang_guidelines_link)) - self.y_top_level_container = self.y_module.get('container') - - if self.y_top_level_container.get('container') is None: - raise Exception('The YANG model {} does NOT have "container"\ - element after "top level container"\ - Please follow the SONiC YANG model guidelines:\ - \n{}'.format(self.yang_model_name, yang_guidelines_link)) - self.y_table_containers = self.y_top_level_container.get('container') - - def _find_yang_model_in_yjson_obj(self) -> OrderedDict: - """ Find provided YANG model inside the yJson object, - the yJson object contain all yang-models - parsed from directory - /usr/local/yang-models - - Returns: - reference to yang_model_name - """ - - for yang_model in self.conf_mgmt.sy.yJson: - if yang_model.get('module').get('@name') == self.yang_model_name: - return yang_model.get('module') - - def parse_yang_model(self) -> dict: - """ Parse provided YANG model and save - the output to self.yang_2_dict object - - Returns: - parsed YANG model in dictionary format - """ - - self._init_yang_module_and_containers() - self.yang_2_dict['tables'] = list() - - # determine how many (1 or more) containers a YANG model - # has after the 'top level' container - # 'table' container goes after the 'top level' container - self.yang_2_dict['tables'] += list_handler(self.y_table_containers, - lambda e: on_table_container(self.y_module, e, self.conf_mgmt)) - - return self.yang_2_dict - - -# ------------------------------HANDLERS-------------------------------- # - -def list_handler(y_entity, callback) -> List[Dict]: - """ Determine if the type of entity is a list, - if so - call the callback for every list element - """ - - if isinstance(y_entity, list): - return [callback(e) for e in y_entity] - else: - return [callback(y_entity)] - - -def on_table_container(y_module: OrderedDict, - tbl_container: OrderedDict, - conf_mgmt: ConfigMgmt) -> dict: - """ Parse 'table' container, - 'table' container goes after 'top level' container - - Args: - y_module: reference to 'module' - tbl_container: reference to 'table' container - conf_mgmt: reference to ConfigMgmt class instance, - it have yJson object which contain all parsed YANG models - Returns: - element for self.yang_2_dict['tables'] - """ - y2d_elem = { - 'name': tbl_container.get('@name'), - 'description': get_description(tbl_container) - } - - # determine if 'table container' has a 'list' entity - if tbl_container.get('list') is None: - y2d_elem['static-objects'] = list() - - # 'object' container goes after the 'table' container - # 'object' container have 2 types - list (like sonic-flex_counter.yang) - # and NOT list (like sonic-device_metadata.yang) - y2d_elem['static-objects'] += list_handler(tbl_container.get('container'), - lambda e: on_object_entity(y_module, e, conf_mgmt, is_list=False)) - else: - y2d_elem['dynamic-objects'] = list() - - # 'container' can have more than 1 'list' entity - y2d_elem['dynamic-objects'] += list_handler(tbl_container.get('list'), - lambda e: on_object_entity(y_module, e, conf_mgmt, is_list=True)) - - # move 'keys' elements from 'attrs' to 'keys' - change_dyn_obj_struct(y2d_elem['dynamic-objects']) - - return y2d_elem - - -def on_object_entity(y_module: OrderedDict, - y_entity: OrderedDict, - conf_mgmt: ConfigMgmt, - is_list: bool) -> dict: - """ Parse a 'object' entity, it could be a 'container' or a 'list' - 'Object' entity represent OBJECT in Config DB schema: - { - "TABLE": { - "OBJECT": { - "attr": "value" - } - } - } - - Args: - y_module: reference to 'module' - y_entity: reference to 'object' entity - conf_mgmt: reference to ConfigMgmt class instance, - it have yJson object which contain all parsed YANG models - is_list: boolean flag to determine if a 'list' was passed - Returns: - element for y2d_elem['static-objects'] OR y2d_elem['dynamic-objects'] - """ - - if y_entity is None: - return {} - - obj_elem = { - 'name': y_entity.get('@name'), - 'description': get_description(y_entity), - 'attrs': list() - } - - if is_list: - obj_elem['keys'] = get_list_keys(y_entity) - - attrs_list = list() - # grouping_name is empty because 'grouping' is not used so far - attrs_list.extend(get_leafs(y_entity, grouping_name='')) - attrs_list.extend(get_leaf_lists(y_entity, grouping_name='')) - attrs_list.extend(get_choices(y_module, y_entity, conf_mgmt, grouping_name='')) - attrs_list.extend(get_uses(y_module, y_entity, conf_mgmt)) - - obj_elem['attrs'] = attrs_list - - return obj_elem - - -def on_uses(y_module: OrderedDict, - y_uses, - conf_mgmt: ConfigMgmt) -> list: - """ Parse a YANG 'uses' entities - 'uses' referring to 'grouping' YANG entity - - Args: - y_module: reference to 'module' - y_uses: reference to 'uses' - conf_mgmt: reference to ConfigMgmt class instance, - it have yJson object which contain all parsed YANG model - Returns: - element for obj_elem['attrs'], 'attrs' contain a parsed 'leafs' - """ - - ret_attrs = list() - y_grouping = get_all_grouping(y_module, y_uses, conf_mgmt) - # trim prefixes in order to the next checks - trim_uses_prefixes(y_uses) - - # TODO: 'refine' support - for group in y_grouping: - if isinstance(y_uses, list): - for use in y_uses: - if group.get('@name') == use.get('@name'): - ret_attrs.extend(get_leafs(group, group.get('@name'))) - ret_attrs.extend(get_leaf_lists(group, group.get('@name'))) - ret_attrs.extend(get_choices(y_module, group, conf_mgmt, group.get('@name'))) - else: - if group.get('@name') == y_uses.get('@name'): - ret_attrs.extend(get_leafs(group, group.get('@name'))) - ret_attrs.extend(get_leaf_lists(group, group.get('@name'))) - ret_attrs.extend(get_choices(y_module, group, conf_mgmt, group.get('@name'))) - - return ret_attrs - - -def on_choices(y_module: OrderedDict, - y_choices, - conf_mgmt: ConfigMgmt, - grouping_name: str) -> list: - """ Parse a YANG 'choice' entities - - Args: - y_module: reference to 'module' - y_choices: reference to 'choice' element - conf_mgmt: reference to ConfigMgmt class instance, - it have yJson object which contain all parsed YANG model - grouping_name: if YANG entity contain 'uses', this arg represent 'grouping' name - Returns: - element for obj_elem['attrs'], 'attrs' contain a parsed 'leafs' - """ - - ret_attrs = list() - - # the YANG model can have multiple 'choice' entities - # inside a 'container' or 'list' - if isinstance(y_choices, list): - for choice in y_choices: - attrs = on_choice_cases(y_module, choice.get('case'), - conf_mgmt, grouping_name) - ret_attrs.extend(attrs) - else: - ret_attrs = on_choice_cases(y_module, y_choices.get('case'), - conf_mgmt, grouping_name) - - return ret_attrs - - -def on_choice_cases(y_module: OrderedDict, - y_cases, - conf_mgmt: ConfigMgmt, - grouping_name: str) -> list: - """ Parse a single YANG 'case' entity from the 'choice' entity. - The 'case' element can has inside - 'leaf', 'leaf-list', 'uses' - - Args: - y_module: reference to 'module' - y_cases: reference to 'case' - conf_mgmt: reference to ConfigMgmt class instance, - it have yJson object which contain all - parsed YANG model - grouping_name: if YANG entity contain 'uses', - this argument represent 'grouping' name - Returns: - element for the obj_elem['attrs'], the 'attrs' - contain a parsed 'leafs' - """ - - ret_attrs = list() - - if isinstance(y_cases, list): - for case in y_cases: - ret_attrs.extend(get_leafs(case, grouping_name)) - ret_attrs.extend(get_leaf_lists(case, grouping_name)) - ret_attrs.extend(get_uses(y_module, case, conf_mgmt)) - else: - ret_attrs.extend(get_leafs(y_cases, grouping_name)) - ret_attrs.extend(get_leaf_lists(y_cases, grouping_name)) - ret_attrs.extend(get_uses(y_module, y_cases, conf_mgmt)) - - return ret_attrs - - -def on_leafs(y_leafs, - grouping_name: str, - is_leaf_list: bool) -> list: - """ Parse all the 'leaf' or 'leaf-list' elements - - Args: - y_leafs: reference to all 'leaf' elements - grouping_name: if YANG entity contain 'uses', - this argument represent the 'grouping' name - is_leaf_list: boolean to determine if a 'leaf-list' - was passed as 'y_leafs' argument - Returns: - list of parsed 'leaf' elements - """ - - ret_attrs = list() - # The YANG 'container' entity may have only 1 'leaf' - # element OR a list of 'leaf' elements - ret_attrs += list_handler(y_leafs, lambda e: on_leaf(e, grouping_name, is_leaf_list)) - - return ret_attrs - - -def on_leaf(leaf: OrderedDict, - grouping_name: str, - is_leaf_list: bool) -> dict: - """ Parse a single 'leaf' element - - Args: - leaf: reference to a 'leaf' entity - grouping_name: if YANG entity contain 'uses', - this argument represent 'grouping' name - is_leaf_list: boolean to determine if 'leaf-list' - was passed in 'y_leafs' argument - Returns: - parsed 'leaf' element - """ - - attr = {'name': leaf.get('@name'), - 'description': get_description(leaf), - 'is-leaf-list': is_leaf_list, - 'is-mandatory': get_mandatory(leaf), - 'group': grouping_name} - - return attr - - -# ----------------------GETERS------------------------- # - -def get_mandatory(y_leaf: OrderedDict) -> bool: - """ Parse the 'mandatory' statement for a 'leaf' - - Args: - y_leaf: reference to a 'leaf' entity - Returns: - 'leaf' 'mandatory' value - """ - - if y_leaf.get('mandatory') is not None: - return True - - return False - - -def get_description(y_entity: OrderedDict) -> str: - """ Parse the 'description' entity from any YANG element - - Args: - y_entity: reference to YANG 'container' OR 'list' OR 'leaf' ... - Returns: - text of the 'description' - """ - - if y_entity.get('description') is not None: - return y_entity.get('description').get('text') - else: - return '' - - -def get_leafs(y_entity: OrderedDict, - grouping_name: str) -> list: - """ Check if the YANG entity have 'leafs', if so call handler - - Args: - y_entity: reference YANG 'container' or 'list' - or 'choice' or 'uses' - grouping_name: if YANG entity contain 'uses', - this argument represent 'grouping' name - Returns: - list of parsed 'leaf' elements - """ - - if y_entity.get('leaf') is not None: - return on_leafs(y_entity.get('leaf'), grouping_name, is_leaf_list=False) - - return [] - - -def get_leaf_lists(y_entity: OrderedDict, - grouping_name: str) -> list: - """ Check if the YANG entity have 'leaf-list', if so call handler - - Args: - y_entity: reference YANG 'container' or 'list' - or 'choice' or 'uses' - grouping_name: if YANG entity contain 'uses', - this argument represent 'grouping' name - Returns: - list of parsed 'leaf-list' elements - """ - - if y_entity.get('leaf-list') is not None: - return on_leafs(y_entity.get('leaf-list'), grouping_name, is_leaf_list=True) - - return [] - - -def get_choices(y_module: OrderedDict, - y_entity: OrderedDict, - conf_mgmt: ConfigMgmt, - grouping_name: str) -> list: - """ Check if the YANG entity have 'choice', if so call handler - - Args: - y_module: reference to 'module' - y_entity: reference YANG 'container' or 'list' - or 'choice' or 'uses' - conf_mgmt: reference to ConfigMgmt class instance, - it have yJson object which contain all parsed YANG model - grouping_name: if YANG entity contain 'uses', - this argument represent 'grouping' name - Returns: - list of parsed elements inside 'choice' - """ - - if y_entity.get('choice') is not None: - return on_choices(y_module, y_entity.get('choice'), conf_mgmt, grouping_name) - - return [] - - -def get_uses(y_module: OrderedDict, - y_entity: OrderedDict, - conf_mgmt: ConfigMgmt) -> list: - """ Check if the YANG entity have 'uses', if so call handler - - Args: - y_module: reference to 'module' - y_entity: reference YANG 'container' or 'list' - or 'choice' or 'uses' - conf_mgmt: reference to ConfigMgmt class instance, - it have yJson object which contain all parsed YANG model - Returns: - list of parsed elements inside 'grouping' - that referenced by 'uses' - """ - - if y_entity.get('uses') is not None: - return on_uses(y_module, y_entity.get('uses'), conf_mgmt) - - return [] - - -def get_all_grouping(y_module: OrderedDict, - y_uses: OrderedDict, - conf_mgmt: ConfigMgmt) -> list: - """ Get all the 'grouping' entities that was referenced - by 'uses' in current YANG model - - Args: - y_module: reference to 'module' - y_entity: reference to 'uses' - conf_mgmt: reference to ConfigMgmt class instance, - it have yJson object which contain all parsed YANG model - Returns: - list of 'grouping' elements - """ - - ret_grouping = list() - # prefix_list needed to find what YANG model was imported - prefix_list = get_import_prefixes(y_uses) - - # in case if 'grouping' located in the same YANG model - local_grouping = y_module.get('grouping') - if local_grouping is not None: - if isinstance(local_grouping, list): - ret_grouping.extend(local_grouping) - else: - ret_grouping.append(local_grouping) - - # if prefix_list is NOT empty it means that 'grouping' - # was imported from another YANG model - if prefix_list != []: - for prefix in prefix_list: - y_import = y_module.get('import') - if isinstance(y_import, list): - for _import in y_import: - if _import.get('prefix').get('@value') == prefix: - ret_grouping.extend(get_grouping_from_another_yang_model(_import.get('@module'), conf_mgmt)) - else: - if y_import.get('prefix').get('@value') == prefix: - ret_grouping.extend(get_grouping_from_another_yang_model(y_import.get('@module'), conf_mgmt)) - - return ret_grouping - - -def get_grouping_from_another_yang_model(yang_model_name: str, - conf_mgmt) -> list: - """ Get the YANG 'grouping' entity - - Args: - yang_model_name - YANG model to search - conf_mgmt - reference to ConfigMgmt class instance, - it have yJson object which contain all parsed YANG models - - Returns: - list of 'grouping' entities - """ - - ret_grouping = list() - - for yang_model in conf_mgmt.sy.yJson: - if (yang_model.get('module').get('@name') == yang_model_name): - grouping = yang_model.get('module').get('grouping') - if isinstance(grouping, list): - ret_grouping.extend(grouping) - else: - ret_grouping.append(grouping) - - return ret_grouping - - -def get_import_prefixes(y_uses: OrderedDict) -> list: - """ Parse 'import prefix' of YANG 'uses' entity - Example: - { - uses stypes:endpoint; - } - 'stypes' - prefix of imported YANG module. - 'endpoint' - YANG 'grouping' entity name - - Args: - y_uses: refrence to YANG 'uses' - Returns: - list of parsed prefixes - """ - - ret_prefixes = list() - - if isinstance(y_uses, list): - for use in y_uses: - prefix = use.get('@name').split(':')[0] - if prefix != use.get('@name'): - ret_prefixes.append(prefix) - else: - prefix = y_uses.get('@name').split(':')[0] - if prefix != y_uses.get('@name'): - ret_prefixes.append(prefix) - - return ret_prefixes - - -def trim_uses_prefixes(y_uses) -> list: - """ Trim prefixes from the 'uses' YANG entities. - If the YANG 'grouping' was imported from another - YANG file, it use the 'prefix' before the 'grouping' name: - { - uses sgrop:endpoint; - } - Where 'sgrop' = 'prefix'; 'endpoint' = 'grouping' name. - - Args: - y_uses: reference to 'uses' - - Returns: - list of 'uses' without 'prefixes' - """ - - prefixes = get_import_prefixes(y_uses) - - for prefix in prefixes: - if isinstance(y_uses, list): - for use in y_uses: - if prefix in use.get('@name'): - use['@name'] = use.get('@name').split(':')[1] - else: - if prefix in y_uses.get('@name'): - y_uses['@name'] = y_uses.get('@name').split(':')[1] - - -def get_list_keys(y_list: OrderedDict) -> list: - """ Parse YANG the 'key' entity. - If YANG model has a 'list' entity, inside the 'list' - there is 'key' entity. The 'key' - whitespace - separeted list of 'leafs' - - Args: - y_list: reference to the 'list' - Returns: - list of parsed keys - """ - - ret_list = list() - - keys = y_list.get('key').get('@value').split() - for k in keys: - key = {'name': k} - ret_list.append(key) - - return ret_list - - -def change_dyn_obj_struct(dynamic_objects: list): - """ Rearrange self.yang_2_dict['dynamic_objects'] structure. - If YANG model have a 'list' entity - inside the 'list' - it has 'key' entity. The 'key' entity it is whitespace - separeted list of 'leafs', those 'leafs' was parsed by - 'on_leaf()' function and placed under 'attrs' in - self.yang_2_dict['dynamic_objects'] need to move 'leafs' - from 'attrs' and put them into 'keys' section of - self.yang_2_dict['dynamic_objects'] - - Args: - dynamic_objects: reference to self.yang_2_dict['dynamic_objects'] - """ - - for obj in dynamic_objects: - for key in obj.get('keys'): - for attr in obj.get('attrs'): - if key.get('name') == attr.get('name'): - key['description'] = attr.get('description') - obj['attrs'].remove(attr) - break - diff --git a/tests/cli_autogen_input/assert_dictionaries.py b/tests/cli_autogen_input/assert_dictionaries.py deleted file mode 100644 index 263e48366d..0000000000 --- a/tests/cli_autogen_input/assert_dictionaries.py +++ /dev/null @@ -1,625 +0,0 @@ -""" -Module holding correct dictionaries for test YANG models -""" - -one_table_container = { - "tables":[ - { - "description":"TABLE_1 description", - "name":"TABLE_1", - "static-objects":[ - { - } - ] - } - ] -} - -two_table_containers = { - "tables":[ - { - "description":"TABLE_1 description", - "name":"TABLE_1", - "static-objects":[ - { - - } - ] - }, - { - "description":"TABLE_2 description", - "name":"TABLE_2", - "static-objects":[ - { - - } - ] - } - ] -} - -one_object_container = { - "tables":[ - { - "description":"TABLE_1 description", - "name":"TABLE_1", - "static-objects":[ - { - "name":"OBJECT_1", - "description":"OBJECT_1 description", - "attrs":[ - ] - } - ] - } - ] -} - -two_object_containers = { - "tables":[ - { - "description":"FIRST_TABLE description", - "name":"TABLE_1", - "static-objects":[ - { - "name":"OBJECT_1", - "description":"OBJECT_1 description", - "attrs":[ - ] - }, - { - "name":"OBJECT_2", - "description":"OBJECT_2 description", - "attrs":[ - ] - } - ] - } - ] -} - -one_list = { - "tables":[ - { - "description":"TABLE_1 description", - "name":"TABLE_1", - "dynamic-objects":[ - { - "name":"TABLE_1_LIST", - "description":"TABLE_1_LIST description", - "keys":[ - { - "name": "key_name", - "description": "", - } - ], - "attrs":[ - ] - } - ] - } - ] -} - -two_lists = { - "tables":[ - { - "description":"TABLE_1 description", - "name":"TABLE_1", - "dynamic-objects":[ - { - "name":"TABLE_1_LIST_1", - "description":"TABLE_1_LIST_1 description", - "keys":[ - { - "name": "key_name1", - "description": "", - } - ], - "attrs":[ - ] - }, - { - "name":"TABLE_1_LIST_2", - "description":"TABLE_1_LIST_2 description", - "keys":[ - { - "name": "key_name2", - "description": "", - } - ], - "attrs":[ - ] - } - ] - } - ] -} - -static_object_complex_1 = { - "tables":[ - { - "description":"TABLE_1 description", - "name":"TABLE_1", - "static-objects":[ - { - "name":"OBJECT_1", - "description":"OBJECT_1 description", - "attrs":[ - { - "name":"OBJ_1_LEAF_1", - "description": "OBJ_1_LEAF_1 description", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - }, - { - "name":"OBJ_1_LEAF_LIST_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": True, - "group": '', - }, - { - "name":"OBJ_1_CHOICE_1_LEAF_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - }, - { - "name":"OBJ_1_CHOICE_1_LEAF_2", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - } - ] - } - ] - } - ] -} - -static_object_complex_2 = { - "tables":[ - { - "description":"TABLE_1 description", - "name":"TABLE_1", - "static-objects":[ - { - "name":"OBJECT_1", - "description":"OBJECT_1 description", - "attrs":[ - { - "name":"OBJ_1_LEAF_1", - "description": "OBJ_1_LEAF_1 description", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - }, - { - "name":"OBJ_1_LEAF_2", - "description": "OBJ_1_LEAF_2 description", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - }, - { - "name":"OBJ_1_LEAF_LIST_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": True, - "group": '', - }, - { - "name":"OBJ_1_LEAF_LIST_2", - "description": "", - "is-mandatory": False, - "is-leaf-list": True, - "group": '', - }, - { - "name":"OBJ_1_CHOICE_1_LEAF_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - }, - { - "name":"OBJ_1_CHOICE_1_LEAF_2", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - }, - { - "name":"OBJ_1_CHOICE_2_LEAF_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - }, - { - "name":"OBJ_1_CHOICE_2_LEAF_2", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - }, - ] - } - ] - } - ] -} - -dynamic_object_complex_1 = { - "tables":[ - { - "description":"TABLE_1 description", - "name":"TABLE_1", - "dynamic-objects":[ - { - "name":"OBJECT_1_LIST", - "description":"OBJECT_1_LIST description", - "attrs":[ - { - "name":"OBJ_1_LEAF_1", - "description": "OBJ_1_LEAF_1 description", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - }, - { - "name":"OBJ_1_LEAF_LIST_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": True, - "group": '', - }, - { - "name":"OBJ_1_CHOICE_1_LEAF_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - }, - { - "name":"OBJ_1_CHOICE_1_LEAF_2", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - } - ], - "keys":[ - { - "name": "KEY_LEAF_1", - "description": "KEY_LEAF_1 description", - } - ] - } - ] - } - ] -} - -dynamic_object_complex_2 = { - "tables":[ - { - "description":"TABLE_1 description", - "name":"TABLE_1", - "dynamic-objects":[ - { - "name":"OBJECT_1_LIST", - "description":"OBJECT_1_LIST description", - "attrs":[ - { - "name":"OBJ_1_LEAF_1", - "description": "OBJ_1_LEAF_1 description", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - }, - { - "name":"OBJ_1_LEAF_2", - "description": "OBJ_1_LEAF_2 description", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - }, - { - "name":"OBJ_1_LEAF_LIST_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": True, - "group": '', - }, - { - "name":"OBJ_1_LEAF_LIST_2", - "description": "", - "is-mandatory": False, - "is-leaf-list": True, - "group": '', - }, - { - "name":"OBJ_1_CHOICE_1_LEAF_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - }, - { - "name":"OBJ_1_CHOICE_1_LEAF_2", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - }, - { - "name":"OBJ_1_CHOICE_2_LEAF_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - }, - { - "name":"OBJ_1_CHOICE_2_LEAF_2", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - } - ], - "keys":[ - { - "name": "KEY_LEAF_1", - "description": "KEY_LEAF_1 description", - }, - { - "name": "KEY_LEAF_2", - "description": "KEY_LEAF_2 description", - } - ] - } - ] - } - ] -} - -choice_complex = { - "tables":[ - { - "description":"TABLE_1 description", - "name":"TABLE_1", - "static-objects":[ - { - "name":"OBJECT_1", - "description":"OBJECT_1 description", - "attrs":[ - { - "name":"LEAF_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - }, - { - "name":"LEAF_LIST_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": True, - "group": '', - }, - { - "name":"GR_1_LEAF_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": "GR_1", - }, - { - "name":"GR_1_LEAF_2", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": 'GR_1', - }, - { - "name":"LEAF_2", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - }, - { - "name":"LEAF_3", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": '', - }, - { - "name":"LEAF_LIST_2", - "description": "", - "is-mandatory": False, - "is-leaf-list": True, - "group": '', - }, - { - "name":"LEAF_LIST_3", - "description": "", - "is-mandatory": False, - "is-leaf-list": True, - "group": '', - }, - { - "name":"GR_5_LEAF_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": 'GR_5', - }, - { - "name":"GR_5_LEAF_2", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": 'GR_5', - }, - { - "name":"GR_2_LEAF_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": 'GR_2', - }, - { - "name":"GR_2_LEAF_2", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": 'GR_2', - }, - { - "name":"GR_3_LEAF_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": 'GR_3', - }, - { - "name":"GR_3_LEAF_2", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": 'GR_3', - }, - ] - } - ] - } - ] -} - -grouping_complex = { - "tables":[ - { - "description":"TABLE_1 description", - "name":"TABLE_1", - "static-objects":[ - { - "name":"OBJECT_1", - "description":"OBJECT_1 description", - "attrs":[ - { - "name":"GR_1_LEAF_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": "GR_1", - }, - { - "name":"GR_1_LEAF_2", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": 'GR_1', - }, - ] - }, - { - "name":"OBJECT_2", - "description":"OBJECT_2 description", - "attrs":[ - { - "name":"GR_5_LEAF_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": "GR_5", - }, - { - "name":"GR_5_LEAF_LIST_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": True, - "group": "GR_5", - }, - { - "name":"GR_6_LEAF_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": "GR_6", - }, - { - "name":"GR_6_LEAF_2", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": "GR_6", - }, - { - "name":"GR_6_CASE_1_LEAF_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": "GR_6", - }, - { - "name":"GR_6_CASE_1_LEAF_LIST_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": True, - "group": "GR_6", - }, - { - "name":"GR_6_CASE_2_LEAF_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": "GR_6", - }, - { - "name":"GR_6_CASE_2_LEAF_2", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": "GR_6", - }, - { - "name":"GR_6_CASE_2_LEAF_LIST_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": True, - "group": "GR_6", - }, - { - "name":"GR_6_CASE_2_LEAF_LIST_2", - "description": "", - "is-mandatory": False, - "is-leaf-list": True, - "group": "GR_6", - }, - { - "name":"GR_4_LEAF_1", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": "GR_4", - }, - { - "name":"GR_4_LEAF_2", - "description": "", - "is-mandatory": False, - "is-leaf-list": False, - "group": "GR_4", - }, - ] - } - ] - } - ] -} \ No newline at end of file diff --git a/tests/cli_autogen_input/config_db.json b/tests/cli_autogen_input/config_db.json deleted file mode 100644 index 5473d6158a..0000000000 --- a/tests/cli_autogen_input/config_db.json +++ /dev/null @@ -1,544 +0,0 @@ -{ - "COPP_GROUP": { - "default": { - "cbs": "600", - "cir": "600", - "meter_type": "packets", - "mode": "sr_tcm", - "queue": "0", - "red_action": "drop" - }, - "queue1_group1": { - "cbs": "6000", - "cir": "6000", - "meter_type": "packets", - "mode": "sr_tcm", - "queue": "1", - "red_action": "drop", - "trap_action": "trap", - "trap_priority": "1" - }, - "queue1_group2": { - "cbs": "600", - "cir": "600", - "meter_type": "packets", - "mode": "sr_tcm", - "queue": "1", - "red_action": "drop", - "trap_action": "trap", - "trap_priority": "1" - }, - "queue2_group1": { - "cbs": "1000", - "cir": "1000", - "genetlink_mcgrp_name": "packets", - "genetlink_name": "psample", - "meter_type": "packets", - "mode": "sr_tcm", - "queue": "2", - "red_action": "drop", - "trap_action": "trap", - "trap_priority": "1" - }, - "queue4_group1": { - "cbs": "600", - "cir": "600", - "color": "blind", - "meter_type": "packets", - "mode": "sr_tcm", - "queue": "4", - "red_action": "drop", - "trap_action": "trap", - "trap_priority": "4" - }, - "queue4_group2": { - "cbs": "600", - "cir": "600", - "meter_type": "packets", - "mode": "sr_tcm", - "queue": "4", - "red_action": "drop", - "trap_action": "copy", - "trap_priority": "4" - }, - "queue4_group3": { - "cbs": "600", - "cir": "600", - "color": "blind", - "meter_type": "packets", - "mode": "sr_tcm", - "queue": "4", - "red_action": "drop", - "trap_action": "trap", - "trap_priority": "4" - } - }, - "COPP_TRAP": { - "arp": { - "trap_group": "queue4_group2", - "trap_ids": "arp_req,arp_resp,neigh_discovery" - }, - "bgp": { - "trap_group": "queue4_group1", - "trap_ids": "bgp,bgpv6" - }, - "dhcp": { - "trap_group": "queue4_group3", - "trap_ids": "dhcp,dhcpv6" - }, - "ip2me": { - "trap_group": "queue1_group1", - "trap_ids": "ip2me" - }, - "lacp": { - "trap_group": "queue4_group1", - "trap_ids": "lacp" - }, - "lldp": { - "trap_group": "queue4_group3", - "trap_ids": "lldp" - }, - "nat": { - "trap_group": "queue1_group2", - "trap_ids": "src_nat_miss,dest_nat_miss" - }, - "sflow": { - "trap_group": "queue2_group1", - "trap_ids": "sample_packet" - }, - "ssh": { - "trap_group": "queue4_group2", - "trap_ids": "ssh" - }, - "udld": { - "trap_group": "queue4_group3", - "trap_ids": "udld" - } - }, - "CRM": { - "Config": { - "acl_counter_high_threshold": "85", - "acl_counter_low_threshold": "70", - "acl_counter_threshold_type": "percentage", - "acl_entry_high_threshold": "85", - "acl_entry_low_threshold": "70", - "acl_entry_threshold_type": "percentage", - "acl_group_high_threshold": "85", - "acl_group_low_threshold": "70", - "acl_group_threshold_type": "percentage", - "acl_table_high_threshold": "85", - "acl_table_low_threshold": "70", - "acl_table_threshold_type": "percentage", - "dnat_entry_high_threshold": "85", - "dnat_entry_low_threshold": "70", - "dnat_entry_threshold_type": "percentage", - "fdb_entry_high_threshold": "85", - "fdb_entry_low_threshold": "70", - "fdb_entry_threshold_type": "percentage", - "ipmc_entry_high_threshold": "85", - "ipmc_entry_low_threshold": "70", - "ipmc_entry_threshold_type": "percentage", - "ipv4_neighbor_high_threshold": "85", - "ipv4_neighbor_low_threshold": "70", - "ipv4_neighbor_threshold_type": "percentage", - "ipv4_nexthop_high_threshold": "85", - "ipv4_nexthop_low_threshold": "70", - "ipv4_nexthop_threshold_type": "percentage", - "ipv4_route_high_threshold": "85", - "ipv4_route_low_threshold": "70", - "ipv4_route_threshold_type": "percentage", - "ipv6_neighbor_high_threshold": "85", - "ipv6_neighbor_low_threshold": "70", - "ipv6_neighbor_threshold_type": "percentage", - "ipv6_nexthop_high_threshold": "85", - "ipv6_nexthop_low_threshold": "70", - "ipv6_nexthop_threshold_type": "percentage", - "ipv6_route_high_threshold": "85", - "ipv6_route_low_threshold": "70", - "ipv6_route_threshold_type": "percentage", - "nexthop_group_high_threshold": "85", - "nexthop_group_low_threshold": "70", - "nexthop_group_member_high_threshold": "85", - "nexthop_group_member_low_threshold": "70", - "nexthop_group_member_threshold_type": "percentage", - "nexthop_group_threshold_type": "percentage", - "polling_interval": "300", - "snat_entry_high_threshold": "85", - "snat_entry_low_threshold": "70", - "snat_entry_threshold_type": "percentage" - } - }, - "DEVICE_METADATA": { - "localhost": { - "buffer_model": "traditional", - "default_bgp_status": "up", - "default_pfcwd_status": "disable", - "hostname": "r-bulldog-02", - "hwsku": "ACS-MSN2100", - "mac": "98:03:9b:f8:e7:c0", - "platform": "x86_64-mlnx_msn2100-r0", - "type": "ToRRouter" - } - }, - "FEATURE": { - "bgp": { - "auto_restart": "enabled", - "has_global_scope": "False", - "has_per_asic_scope": "True", - "has_timer": "False", - "high_mem_alert": "disabled", - "state": "enabled" - }, - "database": { - "auto_restart": "disabled", - "has_global_scope": "True", - "has_per_asic_scope": "True", - "has_timer": "False", - "high_mem_alert": "disabled", - "state": "enabled" - }, - "dhcp_relay": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "state": "enabled" - }, - "lldp": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "True", - "has_timer": "False", - "high_mem_alert": "disabled", - "state": "enabled", - "status": "enabled" - }, - "mgmt-framework": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "True", - "high_mem_alert": "disabled", - "state": "enabled" - }, - "nat": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "state": "disabled" - }, - "pmon": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "state": "enabled" - }, - "radv": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "state": "enabled" - }, - "sflow": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "False", - "high_mem_alert": "disabled", - "state": "disabled" - }, - "snmp": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "True", - "high_mem_alert": "disabled", - "state": "enabled" - }, - "swss": { - "auto_restart": "enabled", - "has_global_scope": "False", - "has_per_asic_scope": "True", - "has_timer": "False", - "high_mem_alert": "disabled", - "state": "enabled" - }, - "syncd": { - "auto_restart": "enabled", - "has_global_scope": "False", - "has_per_asic_scope": "True", - "has_timer": "False", - "high_mem_alert": "disabled", - "state": "enabled" - }, - "teamd": { - "auto_restart": "enabled", - "has_global_scope": "False", - "has_per_asic_scope": "True", - "has_timer": "False", - "high_mem_alert": "disabled", - "state": "enabled" - }, - "telemetry": { - "auto_restart": "enabled", - "has_global_scope": "True", - "has_per_asic_scope": "False", - "has_timer": "True", - "high_mem_alert": "disabled", - "state": "enabled" - }, - "what-just-happened": { - "auto_restart": "disabled", - "has_timer": "True", - "high_mem_alert": "disabled", - "state": "enabled" - } - }, - "FLEX_COUNTER_TABLE": { - "BUFFER_POOL_WATERMARK": { - "FLEX_COUNTER_STATUS": "enable" - }, - "PFCWD": { - "FLEX_COUNTER_STATUS": "enable" - }, - "PG_WATERMARK": { - "FLEX_COUNTER_STATUS": "enable" - }, - "PORT": { - "FLEX_COUNTER_STATUS": "enable" - }, - "PORT_BUFFER_DROP": { - "FLEX_COUNTER_STATUS": "enable" - }, - "QUEUE": { - "FLEX_COUNTER_STATUS": "enable" - }, - "QUEUE_WATERMARK": { - "FLEX_COUNTER_STATUS": "enable" - }, - "RIF": { - "FLEX_COUNTER_STATUS": "enable" - } - }, - "KDUMP": { - "config": { - "enabled": "false", - "num_dumps": "3", - "memory": "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" - } - }, - "MGMT_INTERFACE": { - "eth0|10.210.25.44/22": { - "gwaddr": "10.210.24.1" - } - }, - "PORT": { - "Ethernet0": { - "admin_status": "up", - "alias": "etp1", - "index": "1", - "lanes": "0,1,2,3", - "speed": "100000" - }, - "Ethernet12": { - "admin_status": "up", - "alias": "etp4a", - "index": "4", - "lanes": "12,13", - "speed": "50000" - }, - "Ethernet14": { - "admin_status": "up", - "alias": "etp4b", - "index": "4", - "lanes": "14,15", - "speed": "50000" - }, - "Ethernet16": { - "admin_status": "up", - "alias": "etp5a", - "index": "5", - "lanes": "16,17", - "speed": "50000" - }, - "Ethernet18": { - "admin_status": "up", - "alias": "etp5b", - "index": "5", - "lanes": "18,19", - "speed": "50000" - }, - "Ethernet20": { - "admin_status": "up", - "alias": "etp6a", - "index": "6", - "lanes": "20", - "speed": "25000" - }, - "Ethernet21": { - "admin_status": "up", - "alias": "etp6b", - "index": "6", - "lanes": "21", - "speed": "25000" - }, - "Ethernet22": { - "admin_status": "up", - "alias": "etp6c", - "index": "6", - "lanes": "22", - "speed": "25000" - }, - "Ethernet23": { - "admin_status": "up", - "alias": "etp6d", - "index": "6", - "lanes": "23", - "speed": "25000" - }, - "Ethernet24": { - "admin_status": "up", - "alias": "etp7a", - "index": "7", - "lanes": "24", - "speed": "25000" - }, - "Ethernet25": { - "admin_status": "up", - "alias": "etp7b", - "index": "7", - "lanes": "25", - "speed": "25000" - }, - "Ethernet26": { - "admin_status": "up", - "alias": "etp7c", - "index": "7", - "lanes": "26", - "speed": "25000" - }, - "Ethernet27": { - "admin_status": "up", - "alias": "etp7d", - "index": "7", - "lanes": "27", - "speed": "25000" - }, - "Ethernet28": { - "admin_status": "up", - "alias": "etp8", - "index": "8", - "lanes": "28,29,30,31", - "speed": "100000" - }, - "Ethernet32": { - "admin_status": "up", - "alias": "etp9", - "index": "9", - "lanes": "32,33,34,35", - "speed": "100000" - }, - "Ethernet36": { - "admin_status": "up", - "alias": "etp10", - "index": "10", - "lanes": "36,37,38,39", - "speed": "100000" - }, - "Ethernet4": { - "admin_status": "up", - "alias": "etp2", - "index": "2", - "lanes": "4,5,6,7", - "speed": "100000" - }, - "Ethernet40": { - "admin_status": "up", - "alias": "etp11", - "index": "11", - "lanes": "40,41,42,43", - "speed": "100000" - }, - "Ethernet44": { - "admin_status": "up", - "alias": "etp12", - "index": "12", - "lanes": "44,45,46,47", - "speed": "100000" - }, - "Ethernet48": { - "admin_status": "up", - "alias": "etp13", - "index": "13", - "lanes": "48,49,50,51", - "speed": "100000" - }, - "Ethernet52": { - "admin_status": "up", - "alias": "etp14", - "index": "14", - "lanes": "52,53,54,55", - "speed": "100000" - }, - "Ethernet56": { - "admin_status": "up", - "alias": "etp15", - "index": "15", - "lanes": "56,57,58,59", - "speed": "100000" - }, - "Ethernet60": { - "admin_status": "up", - "alias": "etp16", - "index": "16", - "lanes": "60,61,62,63", - "speed": "100000" - }, - "Ethernet8": { - "admin_status": "up", - "alias": "etp3", - "index": "3", - "lanes": "8,9,10,11", - "speed": "100000" - } - }, - "SNMP": { - "LOCATION": { - "Location": "public" - } - }, - "SNMP_COMMUNITY": { - "public": { - "TYPE": "RO" - } - }, - "VERSIONS": { - "DATABASE": { - "VERSION": "version_2_0_0" - } - }, - "WJH": { - "global": { - "mode": "debug", - "nice_level": "1", - "pci_bandwidth": "50" - } - }, - "WJH_CHANNEL": { - "forwarding": { - "drop_category_list": "L2,L3,Tunnel", - "type": "raw_and_aggregated" - }, - "layer-1": { - "drop_category_list": "L1", - "type": "raw_and_aggregated" - } - } -} diff --git a/tests/cli_autogen_input/sonic-1-list.yang b/tests/cli_autogen_input/sonic-1-list.yang deleted file mode 100644 index 79a6529b3d..0000000000 --- a/tests/cli_autogen_input/sonic-1-list.yang +++ /dev/null @@ -1,29 +0,0 @@ -module sonic-1-list { - - yang-version 1.1; - - namespace "http://github.com/Azure/s-1-list"; - prefix s-1-list; - - container sonic-1-list { - /* sonic-1-list - top level container */ - - container TABLE_1 { - /* TABLE_1 - table container */ - - description "TABLE_1 description"; - - list TABLE_1_LIST { - /* TABLE_1 - object container */ - - description "TABLE_1_LIST description"; - - key "key_name"; - - leaf key_name { - type string; - } - } - } - } -} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-1-object-container.yang b/tests/cli_autogen_input/sonic-1-object-container.yang deleted file mode 100644 index e28ef7f90a..0000000000 --- a/tests/cli_autogen_input/sonic-1-object-container.yang +++ /dev/null @@ -1,23 +0,0 @@ -module sonic-1-object-container { - - yang-version 1.1; - - namespace "http://github.com/Azure/s-1-object"; - prefix s-1-object; - - container sonic-1-object-container { - /* sonic-1-object-container - top level container */ - - container TABLE_1 { - /* TABLE_1 - table container */ - - description "TABLE_1 description"; - - container OBJECT_1 { - /* OBJECT_1 - object container */ - - description "OBJECT_1 description"; - } - } - } -} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-1-table-container.yang b/tests/cli_autogen_input/sonic-1-table-container.yang deleted file mode 100644 index 58e7293c0d..0000000000 --- a/tests/cli_autogen_input/sonic-1-table-container.yang +++ /dev/null @@ -1,17 +0,0 @@ -module sonic-1-table-container { - - yang-version 1.1; - - namespace "http://github.com/Azure/s-1-table"; - prefix s-1-table; - - container sonic-1-table-container { - /* sonic-1-table-container - top level container */ - - container TABLE_1 { - /* TABLE_1 - table container */ - - description "TABLE_1 description"; - } - } -} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-2-lists.yang b/tests/cli_autogen_input/sonic-2-lists.yang deleted file mode 100644 index b20200415b..0000000000 --- a/tests/cli_autogen_input/sonic-2-lists.yang +++ /dev/null @@ -1,42 +0,0 @@ -module sonic-2-lists { - - yang-version 1.1; - - namespace "http://github.com/Azure/s-2-lists"; - prefix s-2-lists; - - container sonic-2-lists { - /* sonic-2-lists - top level container */ - - container TABLE_1 { - /* TALBE_1 - table container */ - - - description "TABLE_1 description"; - - list TABLE_1_LIST_1 { - /* TALBE_1_LIST_1 - object container */ - - description "TABLE_1_LIST_1 description"; - - key "key_name1"; - - leaf key_name1 { - type string; - } - } - - list TABLE_1_LIST_2 { - /* TALBE_1_LIST_2 - object container */ - - description "TABLE_1_LIST_2 description"; - - key "key_name2"; - - leaf key_name2 { - type string; - } - } - } - } -} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-2-object-containers.yang b/tests/cli_autogen_input/sonic-2-object-containers.yang deleted file mode 100644 index 249faf4c89..0000000000 --- a/tests/cli_autogen_input/sonic-2-object-containers.yang +++ /dev/null @@ -1,29 +0,0 @@ -module sonic-2-object-containers { - - yang-version 1.1; - - namespace "http://github.com/Azure/s-2-object"; - prefix s-2-object; - - container sonic-2-object-containers { - /* sonic-2-object-containers - top level container */ - - container TABLE_1 { - /* TABLE_1 - table container */ - - description "FIRST_TABLE description"; - - container OBJECT_1 { - /* OBJECT_1 - object container */ - - description "OBJECT_1 description"; - } - - container OBJECT_2 { - /* OBJECT_2 - object container */ - - description "OBJECT_2 description"; - } - } - } -} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-2-table-containers.yang b/tests/cli_autogen_input/sonic-2-table-containers.yang deleted file mode 100644 index 393512a313..0000000000 --- a/tests/cli_autogen_input/sonic-2-table-containers.yang +++ /dev/null @@ -1,23 +0,0 @@ -module sonic-2-table-containers { - - yang-version 1.1; - - namespace "http://github.com/Azure/s-2-table"; - prefix s-2-table; - - container sonic-2-table-containers { - /* sonic-2-table-containers - top level container */ - - container TABLE_1 { - /* TABLE_1 - table container */ - - description "TABLE_1 description"; - } - - container TABLE_2 { - /* TABLE_2 - table container */ - - description "TABLE_2 description"; - } - } -} diff --git a/tests/cli_autogen_input/sonic-choice-complex.yang b/tests/cli_autogen_input/sonic-choice-complex.yang deleted file mode 100644 index 7d6a66d89f..0000000000 --- a/tests/cli_autogen_input/sonic-choice-complex.yang +++ /dev/null @@ -1,91 +0,0 @@ -module sonic-choice-complex { - - yang-version 1.1; - - namespace "http://github.com/Azure/choice-complex"; - prefix choice-complex; - - import sonic-grouping-1 { - prefix sgroup1; - } - - import sonic-grouping-2 { - prefix sgroup2; - } - - grouping GR_5 { - leaf GR_5_LEAF_1 { - type string; - } - - leaf GR_5_LEAF_2 { - type string; - } - } - - grouping GR_6 { - leaf GR_6_LEAF_1 { - type string; - } - - leaf GR_6_LEAF_2 { - type string; - } - } - - container sonic-choice-complex { - /* sonic-choice-complex - top level container */ - - container TABLE_1 { - /* TABLE_1 - table container */ - - description "TABLE_1 description"; - - container OBJECT_1 { - /* OBJECT_1 - object container, it have - * 1 choice, which have 2 cases. - * first case have: 1 leaf, 1 leaf-list, 1 uses - * second case have: 2 leafs, 2 leaf-lists, 2 uses - */ - - description "OBJECT_1 description"; - - choice CHOICE_1 { - case CHOICE_1_CASE_1 { - leaf LEAF_1 { - type uint16; - } - - leaf-list LEAF_LIST_1 { - type string; - } - - uses sgroup1:GR_1; - } - - case CHOICE_1_CASE_2 { - leaf LEAF_2 { - type string; - } - - leaf LEAF_3 { - type string; - } - - leaf-list LEAF_LIST_2 { - type string; - } - - leaf-list LEAF_LIST_3 { - type string; - } - - uses GR_5; - uses sgroup1:GR_2; - uses sgroup2:GR_3; - } - } - } - } - } -} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-dynamic-object-complex-1.yang b/tests/cli_autogen_input/sonic-dynamic-object-complex-1.yang deleted file mode 100644 index 9beb98549d..0000000000 --- a/tests/cli_autogen_input/sonic-dynamic-object-complex-1.yang +++ /dev/null @@ -1,57 +0,0 @@ -module sonic-dynamic-object-complex-1 { - - yang-version 1.1; - - namespace "http://github.com/Azure/dynamic-complex-1"; - prefix dynamic-complex-1; - - container sonic-dynamic-object-complex-1 { - /* sonic-dynamic-object-complex-1 - top level container */ - - container TABLE_1 { - /* TABLE_1 - table container */ - - description "TABLE_1 description"; - - list OBJECT_1_LIST { - /* OBJECT_1_LIST - dynamic object container, it have: - * 1 key, - * 1 leaf, - * 1 leaf-list - * 1 choice - */ - - description "OBJECT_1_LIST description"; - - key "KEY_LEAF_1"; - - leaf KEY_LEAF_1 { - description "KEY_LEAF_1 description"; - type string; - } - - leaf OBJ_1_LEAF_1 { - description "OBJ_1_LEAF_1 description"; - type string; - } - - leaf-list OBJ_1_LEAF_LIST_1 { - type string; - } - - choice OBJ_1_CHOICE_1 { - case OBJ_1_CHOICE_1_CASE_1 { - leaf OBJ_1_CHOICE_1_LEAF_1 { - type uint16; - } - } - case OBJ_1_CHOICE_1_CASE_2 { - leaf OBJ_1_CHOICE_1_LEAF_2 { - type string; - } - } - } - } - } - } -} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-dynamic-object-complex-2.yang b/tests/cli_autogen_input/sonic-dynamic-object-complex-2.yang deleted file mode 100644 index 00e25c8135..0000000000 --- a/tests/cli_autogen_input/sonic-dynamic-object-complex-2.yang +++ /dev/null @@ -1,84 +0,0 @@ -module sonic-dynamic-object-complex-2 { - - yang-version 1.1; - - namespace "http://github.com/Azure/dynamic-complex-2"; - prefix dynamic-complex-2; - - container sonic-dynamic-object-complex-2 { - /* sonic-dynamic-object-complex-2 - top level container */ - - container TABLE_1 { - /* TABLE_1 - table container */ - - description "TABLE_1 description"; - - list OBJECT_1_LIST { - /* OBJECT_1_LIST - dynamic object container, it have: - * 2 keys - * 2 leaf, - * 2 leaf-list - * 2 choice - */ - - description "OBJECT_1_LIST description"; - - key "KEY_LEAF_1 KEY_LEAF_2"; - - leaf KEY_LEAF_1 { - description "KEY_LEAF_1 description"; - type string; - } - - leaf KEY_LEAF_2 { - description "KEY_LEAF_2 description"; - type string; - } - - leaf OBJ_1_LEAF_1 { - description "OBJ_1_LEAF_1 description"; - type string; - } - - leaf OBJ_1_LEAF_2 { - description "OBJ_1_LEAF_2 description"; - type string; - } - - leaf-list OBJ_1_LEAF_LIST_1 { - type string; - } - - leaf-list OBJ_1_LEAF_LIST_2 { - type string; - } - - choice OBJ_1_CHOICE_1 { - case OBJ_1_CHOICE_1_CASE_1 { - leaf OBJ_1_CHOICE_1_LEAF_1 { - type uint16; - } - } - case OBJ_1_CHOICE_1_CASE_2 { - leaf OBJ_1_CHOICE_1_LEAF_2 { - type string; - } - } - } - - choice OBJ_1_CHOICE_2 { - case OBJ_1_CHOICE_2_CASE_1 { - leaf OBJ_1_CHOICE_2_LEAF_1 { - type uint16; - } - } - case OBJ_1_CHOICE_2_CASE_2 { - leaf OBJ_1_CHOICE_2_LEAF_2 { - type string; - } - } - } - } - } - } -} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-grouping-1.yang b/tests/cli_autogen_input/sonic-grouping-1.yang deleted file mode 100644 index 831c3a4ad8..0000000000 --- a/tests/cli_autogen_input/sonic-grouping-1.yang +++ /dev/null @@ -1,25 +0,0 @@ -module sonic-grouping-1{ - - yang-version 1.1; - - namespace "http://github.com/Azure/s-grouping-1"; - prefix s-grouping-1; - - grouping GR_1 { - leaf GR_1_LEAF_1 { - type string; - } - leaf GR_1_LEAF_2 { - type string; - } - } - - grouping GR_2 { - leaf GR_2_LEAF_1 { - type string; - } - leaf GR_2_LEAF_2 { - type string; - } - } -} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-grouping-2.yang b/tests/cli_autogen_input/sonic-grouping-2.yang deleted file mode 100644 index bfaa13db15..0000000000 --- a/tests/cli_autogen_input/sonic-grouping-2.yang +++ /dev/null @@ -1,25 +0,0 @@ -module sonic-grouping-2 { - - yang-version 1.1; - - namespace "http://github.com/Azure/s-grouping-2"; - prefix s-grouping-2; - - grouping GR_3 { - leaf GR_3_LEAF_1 { - type string; - } - leaf GR_3_LEAF_2 { - type string; - } - } - - grouping GR_4 { - leaf GR_4_LEAF_1 { - type string; - } - leaf GR_4_LEAF_2 { - type string; - } - } -} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-grouping-complex.yang b/tests/cli_autogen_input/sonic-grouping-complex.yang deleted file mode 100644 index d6ed68563a..0000000000 --- a/tests/cli_autogen_input/sonic-grouping-complex.yang +++ /dev/null @@ -1,96 +0,0 @@ -module sonic-grouping-complex { - - yang-version 1.1; - - namespace "http://github.com/Azure/grouping-complex"; - prefix grouping-complex; - - import sonic-grouping-1 { - prefix sgroup1; - } - - import sonic-grouping-2 { - prefix sgroup2; - } - - grouping GR_5 { - leaf GR_5_LEAF_1 { - type string; - } - - leaf-list GR_5_LEAF_LIST_1 { - type string; - } - } - - grouping GR_6 { - leaf GR_6_LEAF_1 { - type string; - } - - leaf GR_6_LEAF_2 { - type string; - } - - choice GR_6_CHOICE_1 { - case CHOICE_1_CASE_1 { - leaf GR_6_CASE_1_LEAF_1 { - type uint16; - } - - leaf-list GR_6_CASE_1_LEAF_LIST_1 { - type string; - } - } - - case CHOICE_1_CASE_2 { - leaf GR_6_CASE_2_LEAF_1 { - type uint16; - } - - leaf GR_6_CASE_2_LEAF_2 { - type uint16; - } - - leaf-list GR_6_CASE_2_LEAF_LIST_1 { - type string; - } - - leaf-list GR_6_CASE_2_LEAF_LIST_2 { - type string; - } - } - } - } - - container sonic-grouping-complex { - /* sonic-grouping-complex - top level container */ - - container TABLE_1 { - /* TABLE_1 - table container */ - - description "TABLE_1 description"; - - container OBJECT_1 { - /* OBJECT_1 - object container, it have - * 1 choice, which have 2 cases. - * first case have: 1 leaf, 1 leaf-list, 1 uses - * second case have: 2 leafs, 2 leaf-lists, 2 uses - */ - - description "OBJECT_1 description"; - - uses sgroup1:GR_1; - } - - container OBJECT_2 { - - description "OBJECT_2 description"; - - uses GR_5; - uses GR_6; - uses sgroup2:GR_4; - } - } - } -} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-static-object-complex-1.yang b/tests/cli_autogen_input/sonic-static-object-complex-1.yang deleted file mode 100644 index a7dfee86ab..0000000000 --- a/tests/cli_autogen_input/sonic-static-object-complex-1.yang +++ /dev/null @@ -1,49 +0,0 @@ -module sonic-static-object-complex-1 { - - yang-version 1.1; - - namespace "http://github.com/Azure/static-complex-1"; - prefix static-complex-1; - - container sonic-static-object-complex-1 { - /* sonic-static-object-complex-1 - top level container */ - - container TABLE_1 { - /* TABLE_1 - table container */ - - description "TABLE_1 description"; - - container OBJECT_1 { - /* OBJECT_1 - object container, it have: - * 1 leaf, - * 1 leaf-list - * 1 choice - */ - - description "OBJECT_1 description"; - - leaf OBJ_1_LEAF_1 { - description "OBJ_1_LEAF_1 description"; - type string; - } - - leaf-list OBJ_1_LEAF_LIST_1 { - type string; - } - - choice OBJ_1_CHOICE_1 { - case OBJ_1_CHOICE_1_CASE_1 { - leaf OBJ_1_CHOICE_1_LEAF_1 { - type uint16; - } - } - case OBJ_1_CHOICE_1_CASE_2 { - leaf OBJ_1_CHOICE_1_LEAF_2 { - type string; - } - } - } - } - } - } -} \ No newline at end of file diff --git a/tests/cli_autogen_input/sonic-static-object-complex-2.yang b/tests/cli_autogen_input/sonic-static-object-complex-2.yang deleted file mode 100644 index 451a445ce6..0000000000 --- a/tests/cli_autogen_input/sonic-static-object-complex-2.yang +++ /dev/null @@ -1,71 +0,0 @@ -module sonic-static-object-complex-2 { - - yang-version 1.1; - - namespace "http://github.com/Azure/static-complex-2"; - prefix static-complex-2; - - container sonic-static-object-complex-2 { - /* sonic-static-object-complex-2 - top level container */ - - container TABLE_1 { - /* TABLE_1 - table container */ - - description "TABLE_1 description"; - - container OBJECT_1 { - /* OBJECT_1 - object container, it have: - * 2 leafs, - * 2 leaf-lists, - * 2 choices - */ - - description "OBJECT_1 description"; - - leaf OBJ_1_LEAF_1 { - description "OBJ_1_LEAF_1 description"; - type string; - } - - leaf OBJ_1_LEAF_2 { - description "OBJ_1_LEAF_2 description"; - type string; - } - - leaf-list OBJ_1_LEAF_LIST_1 { - type string; - } - - leaf-list OBJ_1_LEAF_LIST_2 { - type string; - } - - choice OBJ_1_CHOICE_1 { - case OBJ_1_CHOICE_1_CASE_1 { - leaf OBJ_1_CHOICE_1_LEAF_1 { - type uint16; - } - } - case OBJ_1_CHOICE_1_CASE_2 { - leaf OBJ_1_CHOICE_1_LEAF_2 { - type string; - } - } - } - - choice OBJ_1_CHOICE_2 { - case OBJ_1_CHOICE_2_CASE_1 { - leaf OBJ_1_CHOICE_2_LEAF_1 { - type uint16; - } - } - case OBJ_1_CHOICE_2_CASE_2 { - leaf OBJ_1_CHOICE_2_LEAF_2 { - type string; - } - } - } - } - } - } -} \ No newline at end of file diff --git a/tests/cli_autogen_yang_parser_test.py b/tests/cli_autogen_yang_parser_test.py deleted file mode 100644 index 9ed915c69b..0000000000 --- a/tests/cli_autogen_yang_parser_test.py +++ /dev/null @@ -1,196 +0,0 @@ -import os -import logging -import pprint - -from sonic_cli_gen.yang_parser import YangParser -from .cli_autogen_input import assert_dictionaries - -logger = logging.getLogger(__name__) - -test_path = os.path.dirname(os.path.abspath(__file__)) -yang_models_path = '/usr/local/yang-models' -test_yang_models = [ - 'sonic-1-table-container', - 'sonic-2-table-containers', - 'sonic-1-object-container', - 'sonic-2-object-containers', - 'sonic-1-list', - 'sonic-2-lists', - 'sonic-static-object-complex-1', - 'sonic-static-object-complex-2', - 'sonic-dynamic-object-complex-1', - 'sonic-dynamic-object-complex-2', - 'sonic-choice-complex', - 'sonic-grouping-complex', - 'sonic-grouping-1', - 'sonic-grouping-2', -] - - -class TestYangParser: - @classmethod - def setup_class(cls): - logger.info("SETUP") - os.environ['UTILITIES_UNIT_TESTING'] = "1" - move_yang_models() - - @classmethod - def teardown_class(cls): - logger.info("TEARDOWN") - os.environ['UTILITIES_UNIT_TESTING'] = "0" - remove_yang_models() - - def test_1_table_container(self): - """ Test for 1 'table' container - 'table' container represent TABLE in Config DB schema: - { - "TABLE": { - "OBJECT": { - "attr": "value" - ... - } - } - } - """ - - base_test('sonic-1-table-container', - assert_dictionaries.one_table_container) - - def test_2_table_containers(self): - """ Test for 2 'table' containers """ - - base_test('sonic-2-table-containers', - assert_dictionaries.two_table_containers) - - def test_1_object_container(self): - """ Test for 1 'object' container - 'object' container represent OBJECT in Config DB schema: - { - "TABLE": { - "OBJECT": { - "attr": "value" - ... - } - } - } - """ - - base_test('sonic-1-object-container', - assert_dictionaries.one_object_container) - - def test_2_object_containers(self): - """ Test for 2 'object' containers """ - - base_test('sonic-2-object-containers', - assert_dictionaries.two_object_containers) - - def test_1_list(self): - """ Test for 1 container that has inside - the YANG 'list' entity - """ - - base_test('sonic-1-list', assert_dictionaries.one_list) - - def test_2_lists(self): - """ Test for 2 containers that have inside - the YANG 'list' entity - """ - - base_test('sonic-2-lists', assert_dictionaries.two_lists) - - def test_static_object_complex_1(self): - """ Test for the object container with: - 1 leaf, 1 leaf-list, 1 choice. - """ - - base_test('sonic-static-object-complex-1', - assert_dictionaries.static_object_complex_1) - - def test_static_object_complex_2(self): - """ Test for object container with: - 2 leafs, 2 leaf-lists, 2 choices. - """ - - base_test('sonic-static-object-complex-2', - assert_dictionaries.static_object_complex_2) - - def test_dynamic_object_complex_1(self): - """ Test for object container with: - 1 key, 1 leaf, 1 leaf-list, 1 choice. - """ - - base_test('sonic-dynamic-object-complex-1', - assert_dictionaries.dynamic_object_complex_1) - - def test_dynamic_object_complex_2(self): - """ Test for object container with: - 2 keys, 2 leafs, 2 leaf-list, 2 choice. - """ - - base_test('sonic-dynamic-object-complex-2', - assert_dictionaries.dynamic_object_complex_2) - - def test_choice_complex(self): - """ Test for object container with the 'choice' - that have complex strucutre: - leafs, leaf-lists, multiple 'uses' from different files - """ - - base_test('sonic-choice-complex', - assert_dictionaries.choice_complex) - - def test_grouping_complex(self): - """ Test for object container with multitple 'uses' that using 'grouping' - from different files. The used 'grouping' have a complex structure: - leafs, leaf-lists, choices - """ - - base_test('sonic-grouping-complex', - assert_dictionaries.grouping_complex) - - -def base_test(yang_model_name, correct_dict): - """ General logic for each test case """ - - config_db_path = os.path.join(test_path, - 'cli_autogen_input/config_db.json') - parser = YangParser(yang_model_name=yang_model_name, - config_db_path=config_db_path, - allow_tbl_without_yang=True, - debug=False) - yang_dict = parser.parse_yang_model() - pretty_log_debug(yang_dict) - assert yang_dict == correct_dict - - -def move_yang_models(): - """ Move a test YANG models to known location - in order to be parsed by YangParser class - """ - - for yang_model in test_yang_models: - src_path = os.path.join(test_path, - 'cli_autogen_input', - yang_model + '.yang') - cmd = 'sudo cp {} {}'.format(src_path, yang_models_path) - os.system(cmd) - - -def remove_yang_models(): - """ Remove a test YANG models to known location - in order to be parsed by YangParser class - """ - - for yang_model in test_yang_models: - yang_model_path = os.path.join(yang_models_path, - yang_model + '.yang') - cmd = 'sudo rm {}'.format(yang_model_path) - os.system(cmd) - - -def pretty_log_debug(dictionary): - """ Pretty print of parsed dictionary """ - - for line in pprint.pformat(dictionary).split('\n'): - logging.debug(line) - diff --git a/utilities_common/util_base.py b/utilities_common/util_base.py index 9bea158b59..ff5570735c 100644 --- a/utilities_common/util_base.py +++ b/utilities_common/util_base.py @@ -24,7 +24,6 @@ def iter_namespace(ns_pkg): for _, module_name, ispkg in iter_namespace(plugins_namespace): if ispkg: - yield from self.load_plugins(importlib.import_module(module_name)) continue log.log_debug('importing plugin: {}'.format(module_name)) try: From a708f06097c7f8d96e11836cdbcf07577413781b Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Thu, 12 Aug 2021 05:44:16 +0000 Subject: [PATCH 15/60] config feature added Signed-off-by: Vivek Reddy Karri --- config/feature.py | 50 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/config/feature.py b/config/feature.py index 950671a3d7..c41ae09e3e 100644 --- a/config/feature.py +++ b/config/feature.py @@ -100,3 +100,53 @@ def feature_autorestart(db, name, autorestart): for ns, cfgdb in db.cfgdb_clients.items(): cfgdb.mod_entry('FEATURE', name, {'auto_restart': autorestart}) + +# +# 'auto_techsupport' command ('config feature auto_techsupport ...') +# +@feature.command(name='auto_techsupport', short_help="Enable/disable auto_techsupport capability for the processes running inside of this feature") +@click.argument('name', metavar='', required=True) +@click.argument('auto_techsupport', metavar='', required=True, type=click.Choice(["enabled", "disabled"])) +@pass_db +def feature_auto_techsupport(db, name, auto_techsupport): + """Enable/disable auto_techsupport capability for the processes running inside of this feature""" + entry_data_set = set() + + for ns, cfgdb in db.cfgdb_clients.items(): + entry_data = cfgdb.get_entry('FEATURE', name) + if not entry_data: + click.echo("Feature '{}' doesn't exist".format(name)) + sys.exit(1) + entry_data_set.add(entry_data['auto_techsupport']) + + if len(entry_data_set) > 1: + click.echo("Feature '{}' auto_techsupport is not consistent across namespaces".format(name)) + sys.exit(1) + + for ns, cfgdb in db.cfgdb_clients.items(): + cfgdb.mod_entry('FEATURE', name, {'auto_techsupport': auto_techsupport}) + +# +# 'cooloff' command ('config feature cooloff ...') +# +@feature.command(name='cooloff', short_help="Set the cooloff period in seconds for the auto_techsupport capability") +@click.argument('name', metavar='', required=True) +@click.argument('auto_techsupport', metavar='', required=True, type=int) +@pass_db +def feature_cooloff(db, name, cooloff): + """Set the cooloff period in seconds for the auto_techsupport capability""" + entry_data_set = set() + + for ns, cfgdb in db.cfgdb_clients.items(): + entry_data = cfgdb.get_entry('FEATURE', name) + if not entry_data: + click.echo("Feature '{}' doesn't exist".format(name)) + sys.exit(1) + entry_data_set.add(entry_data['cooloff']) + + if len(entry_data_set) > 1: + click.echo("Feature '{}' cooloff is not consistent across namespaces".format(name)) + sys.exit(1) + + for ns, cfgdb in db.cfgdb_clients.items(): + cfgdb.mod_entry('FEATURE', name, {'cooloff': cooloff}) \ No newline at end of file From e6122e9b27ebc0dfcd148824c069b59522833253 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Fri, 13 Aug 2021 04:53:26 +0000 Subject: [PATCH 16/60] Revert "[config][generic-update] Implementing patch sorting (#1599)" This reverts commit 920bb878fd9be37d62d3237ba3ea97d4cfe5c2d8. --- generic_config_updater/generic_updater.py | 8 +- generic_config_updater/gu_common.py | 539 +---- generic_config_updater/patch_sorter.py | 1010 ---------- setup.py | 1 - .../files/any_config_db.json | 2 - .../files/any_other_config_db.json | 4 - .../files/config_db_after_multi_patch.json | 2 +- .../config_db_after_single_operation.json | 83 - .../files/config_db_choice.json | 17 - .../files/config_db_no_dependencies.json | 39 - .../files/config_db_with_crm.json | 9 - .../files/config_db_with_device_metadata.json | 16 - .../files/config_db_with_interface.json | 20 - .../config_db_with_portchannel_and_acl.json | 25 - .../config_db_with_portchannel_interface.json | 10 - .../contrainer_with_container_config_db.json | 7 - .../files/dpb_1_split_full_config.json | 35 - .../files/dpb_1_to_4.json-patch | 88 - .../files/dpb_4_splits_full_config.json | 65 - .../files/dpb_4_to_1.json-patch | 58 - .../files/empty_config_db.json | 2 - .../files/simple_config_db_inc_deps.json | 20 - .../generic_config_updater/gu_common_test.py | 310 +-- .../patch_sorter_test.py | 1730 ----------------- 24 files changed, 23 insertions(+), 4077 deletions(-) delete mode 100644 generic_config_updater/patch_sorter.py delete mode 100644 tests/generic_config_updater/files/any_config_db.json delete mode 100644 tests/generic_config_updater/files/any_other_config_db.json delete mode 100644 tests/generic_config_updater/files/config_db_after_single_operation.json delete mode 100644 tests/generic_config_updater/files/config_db_choice.json delete mode 100644 tests/generic_config_updater/files/config_db_no_dependencies.json delete mode 100644 tests/generic_config_updater/files/config_db_with_crm.json delete mode 100644 tests/generic_config_updater/files/config_db_with_device_metadata.json delete mode 100644 tests/generic_config_updater/files/config_db_with_interface.json delete mode 100644 tests/generic_config_updater/files/config_db_with_portchannel_and_acl.json delete mode 100644 tests/generic_config_updater/files/config_db_with_portchannel_interface.json delete mode 100644 tests/generic_config_updater/files/contrainer_with_container_config_db.json delete mode 100644 tests/generic_config_updater/files/dpb_1_split_full_config.json delete mode 100644 tests/generic_config_updater/files/dpb_1_to_4.json-patch delete mode 100644 tests/generic_config_updater/files/dpb_4_splits_full_config.json delete mode 100644 tests/generic_config_updater/files/dpb_4_to_1.json-patch delete mode 100644 tests/generic_config_updater/files/empty_config_db.json delete mode 100644 tests/generic_config_updater/files/simple_config_db_inc_deps.json delete mode 100644 tests/generic_config_updater/patch_sorter_test.py diff --git a/generic_config_updater/generic_updater.py b/generic_config_updater/generic_updater.py index 061376b032..079d7ab742 100644 --- a/generic_config_updater/generic_updater.py +++ b/generic_config_updater/generic_updater.py @@ -3,7 +3,6 @@ from enum import Enum from .gu_common import GenericConfigUpdaterError, ConfigWrapper, \ DryRunConfigWrapper, PatchWrapper -from .patch_sorter import PatchSorter CHECKPOINTS_DIR = "/etc/sonic/checkpoints" CHECKPOINT_EXT = ".cp.json" @@ -17,6 +16,11 @@ def release_lock(self): # TODO: Implement ConfigLock pass +class PatchSorter: + def sort(self, patch): + # TODO: Implement patch sorter + raise NotImplementedError("PatchSorter.sort(patch) is not implemented yet") + class ChangeApplier: def apply(self, change): # TODO: Implement change applier @@ -32,7 +36,7 @@ def __init__(self, changeapplier=None, config_wrapper=None, patch_wrapper=None): - self.patchsorter = patchsorter if patchsorter is not None else PatchSorter(config_wrapper, patch_wrapper) + self.patchsorter = patchsorter if patchsorter is not None else PatchSorter() self.changeapplier = changeapplier if changeapplier is not None else ChangeApplier() self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper() self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper() diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index 66d9b0d7d9..2aa6a36d8a 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -1,12 +1,8 @@ import json import jsonpatch -from jsonpointer import JsonPointer import sonic_yang import subprocess -import yang as ly import copy -import re -from enum import Enum YANG_DIR = "/usr/local/yang-models" @@ -14,26 +10,8 @@ class GenericConfigUpdaterError(Exception): pass class JsonChange: - """ - A class that describes a partial change to a JSON object. - It is is similar to JsonPatch, but the order of updating the configs is unknown. - Only the final outcome of the update can be retrieved. - It provides a single function to apply the change to a given JSON object. - """ - def __init__(self, patch): - self.patch = patch - - def apply(self, config): - return self.patch.apply(config) - - def __str__(self): - return f'{self.patch}' - - def __eq__(self, other): - """Overrides the default implementation""" - if isinstance(other, JsonChange): - return self.patch == other.patch - return False + # TODO: Implement JsonChange + pass class ConfigWrapper: def __init__(self, yang_dir = YANG_DIR): @@ -132,6 +110,14 @@ def crop_tables_without_yang(self, config_db_as_json): return sy.jIn + def _create_and_connect_config_db(self): + if self.default_config_db_connector != None: + return self.default_config_db_connector + + config_db = ConfigDBConnector() + config_db.connect() + return config_db + class DryRunConfigWrapper(ConfigWrapper): # TODO: implement DryRunConfigWrapper # This class will simulate all read/write operations to ConfigDB on a virtual storage unit. @@ -140,12 +126,11 @@ class DryRunConfigWrapper(ConfigWrapper): class PatchWrapper: def __init__(self, config_wrapper=None): self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper() - self.path_addressing = PathAddressing() def validate_config_db_patch_has_yang_models(self, patch): config_db = {} for operation in patch: - tokens = self.path_addressing.get_path_tokens(operation[OperationWrapper.PATH_KEYWORD]) + tokens = operation['path'].split('/')[1:] if len(tokens) == 0: # Modifying whole config_db tables_dict = {table_name: {} for table_name in operation['value']} config_db.update(tables_dict) @@ -189,505 +174,3 @@ def convert_sonic_yang_patch_to_config_db_patch(self, patch): target_config_db = self.config_wrapper.convert_sonic_yang_to_config_db(target_yang) return self.generate_patch(current_config_db, target_config_db) - -class OperationType(Enum): - ADD = 1 - REMOVE = 2 - REPLACE = 3 - -class OperationWrapper: - OP_KEYWORD = "op" - PATH_KEYWORD = "path" - VALUE_KEYWORD = "value" - - def create(self, operation_type, path, value=None): - op_type = operation_type.name.lower() - - operation = {OperationWrapper.OP_KEYWORD: op_type, OperationWrapper.PATH_KEYWORD: path} - - if operation_type in [OperationType.ADD, OperationType.REPLACE]: - operation[OperationWrapper.VALUE_KEYWORD] = value - - return operation - -class PathAddressing: - """ - Path refers to the 'path' in JsonPatch operations: https://tools.ietf.org/html/rfc6902 - The path corresponds to JsonPointer: https://tools.ietf.org/html/rfc6901 - - All xpath operations in this class are only relevent to ConfigDb and the conversion to YANG xpath. - It is not meant to support all the xpath functionalities, just the ones relevent to ConfigDb/YANG. - """ - PATH_SEPARATOR = "/" - XPATH_SEPARATOR = "/" - def get_path_tokens(self, path): - return JsonPointer(path).parts - - def create_path(self, tokens): - return JsonPointer.from_parts(tokens).path - - def get_xpath_tokens(self, xpath): - """ - Splits the given xpath into tokens by '/'. - - Example: - xpath: /sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode - tokens: sonic-vlan:sonic-vlan, VLAN_MEMBER, VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8'], tagging_mode - """ - if xpath == "": - raise ValueError("xpath cannot be empty") - - if xpath == "/": - return [] - - idx = 0 - tokens = [] - while idx < len(xpath): - end = self._get_xpath_token_end(idx+1, xpath) - token = xpath[idx+1:end] - tokens.append(token) - idx = end - - return tokens - - def _get_xpath_token_end(self, start, xpath): - idx = start - while idx < len(xpath): - if xpath[idx] == PathAddressing.XPATH_SEPARATOR: - break - elif xpath[idx] == "[": - idx = self._get_xpath_predicate_end(idx, xpath) - idx = idx+1 - - return idx - - def _get_xpath_predicate_end(self, start, xpath): - idx = start - while idx < len(xpath): - if xpath[idx] == "]": - break - elif xpath[idx] == "'": - idx = self._get_xpath_single_quote_str_end(idx, xpath) - elif xpath[idx] == '"': - idx = self._get_xpath_double_quote_str_end(idx, xpath) - - idx = idx+1 - - return idx - - def _get_xpath_single_quote_str_end(self, start, xpath): - idx = start+1 # skip first single quote - while idx < len(xpath): - if xpath[idx] == "'": - break - # libyang implements XPATH 1.0 which does not escape single quotes - # libyang src: https://netopeer.liberouter.org/doc/libyang/master/html/howtoxpath.html - # XPATH 1.0 src: https://www.w3.org/TR/1999/REC-xpath-19991116/#NT-Literal - idx = idx+1 - - return idx - - def _get_xpath_double_quote_str_end(self, start, xpath): - idx = start+1 # skip first single quote - while idx < len(xpath): - if xpath[idx] == '"': - break - # libyang implements XPATH 1.0 which does not escape double quotes - # libyang src: https://netopeer.liberouter.org/doc/libyang/master/html/howtoxpath.html - # XPATH 1.0 src: https://www.w3.org/TR/1999/REC-xpath-19991116/#NT-Literal - idx = idx+1 - - return idx - - def create_xpath(self, tokens): - """ - Creates an xpath by combining the given tokens using '/' - Example: - tokens: module, container, list[key='value'], leaf - xpath: /module/container/list[key='value']/leaf - """ - if len(tokens) == 0: - return "/" - - return f"{PathAddressing.XPATH_SEPARATOR}{PathAddressing.XPATH_SEPARATOR.join(str(t) for t in tokens)}" - - def find_ref_paths(self, path, config): - """ - Finds the paths referencing any line under the given 'path' within the given 'config'. - Example: - path: /PORT - config: - { - "VLAN_MEMBER": { - "Vlan1000|Ethernet0": {}, - "Vlan1000|Ethernet4": {} - }, - "ACL_TABLE": { - "EVERFLOW": { - "ports": [ - "Ethernet4" - ], - }, - "EVERFLOWV6": { - "ports": [ - "Ethernet4", - "Ethernet8" - ] - } - }, - "PORT": { - "Ethernet0": {}, - "Ethernet4": {}, - "Ethernet8": {} - } - } - return: - /VLAN_MEMBER/Vlan1000|Ethernet0 - /VLAN_MEMBER/Vlan1000|Ethernet4 - /ACL_TABLE/EVERFLOW/ports/0 - /ACL_TABLE/EVERFLOW6/ports/0 - /ACL_TABLE/EVERFLOW6/ports/1 - """ - # TODO: Also fetch references by must statement (check similar statements) - return self._find_leafref_paths(path, config) - - def _find_leafref_paths(self, path, config): - sy = sonic_yang.SonicYang(YANG_DIR) - sy.loadYangModel() - - sy.loadData(config) - - xpath = self.convert_path_to_xpath(path, config, sy) - - leaf_xpaths = self._get_inner_leaf_xpaths(xpath, sy) - - ref_xpaths = [] - for xpath in leaf_xpaths: - ref_xpaths.extend(sy.find_data_dependencies(xpath)) - - ref_paths = [] - for ref_xpath in ref_xpaths: - ref_path = self.convert_xpath_to_path(ref_xpath, config, sy) - ref_paths.append(ref_path) - - return set(ref_paths) - - def _get_inner_leaf_xpaths(self, xpath, sy): - if xpath == "/": # Point to Root element which contains all xpaths - nodes = sy.root.tree_for() - else: # Otherwise get all nodes that match xpath - nodes = sy.root.find_path(xpath).data() - - for node in nodes: - for inner_node in node.tree_dfs(): - # TODO: leaflist also can be used as the 'path' argument in 'leafref' so add support to leaflist - if self._is_leaf_node(inner_node): - yield inner_node.path() - - def _is_leaf_node(self, node): - schema = node.schema() - return ly.LYS_LEAF == schema.nodetype() - - def convert_path_to_xpath(self, path, config, sy): - """ - Converts the given JsonPatch path (i.e. JsonPointer) to XPATH. - Example: - path: /VLAN_MEMBER/Vlan1000|Ethernet8/tagging_mode - xpath: /sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode - """ - self.convert_xpath_to_path - tokens = self.get_path_tokens(path) - if len(tokens) == 0: - return self.create_xpath(tokens) - - xpath_tokens = [] - table = tokens[0] - - cmap = sy.confDbYangMap[table] - - # getting the top level element : - xpath_tokens.append(cmap['module']+":"+cmap['topLevelContainer']) - - xpath_tokens.extend(self._get_xpath_tokens_from_container(cmap['container'], 0, tokens, config)) - - return self.create_xpath(xpath_tokens) - - def _get_xpath_tokens_from_container(self, model, token_index, path_tokens, config): - token = path_tokens[token_index] - xpath_tokens = [token] - - if len(path_tokens)-1 == token_index: - return xpath_tokens - - # check if the configdb token is referring to a list - list_model = self._get_list_model(model, token_index, path_tokens) - if list_model: - new_xpath_tokens = self._get_xpath_tokens_from_list(list_model, token_index+1, path_tokens, config[path_tokens[token_index]]) - xpath_tokens.extend(new_xpath_tokens) - return xpath_tokens - - # check if it is targetting a child container - child_container_model = self._get_model(model.get('container'), path_tokens[token_index+1]) - if child_container_model: - new_xpath_tokens = self._get_xpath_tokens_from_container(child_container_model, token_index+1, path_tokens, config[path_tokens[token_index]]) - xpath_tokens.extend(new_xpath_tokens) - return xpath_tokens - - new_xpath_tokens = self._get_xpath_tokens_from_leaf(model, token_index+1, path_tokens, config[path_tokens[token_index]]) - xpath_tokens.extend(new_xpath_tokens) - - return xpath_tokens - - def _get_xpath_tokens_from_list(self, model, token_index, path_tokens, config): - list_name = model['@name'] - - tableKey = path_tokens[token_index] - listKeys = model['key']['@value'] - keyDict = self._extractKey(tableKey, listKeys) - keyTokens = [f"[{key}='{keyDict[key]}']" for key in keyDict] - item_token = f"{list_name}{''.join(keyTokens)}" - - xpath_tokens = [item_token] - - # if whole list-item is needed i.e. if in the path is not referencing child leaf items - # Example: - # path: /VLAN/Vlan1000 - # xpath: /sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000'] - if len(path_tokens)-1 == token_index: - return xpath_tokens - - new_xpath_tokens = self._get_xpath_tokens_from_leaf(model, token_index+1, path_tokens,config[path_tokens[token_index]]) - xpath_tokens.extend(new_xpath_tokens) - return xpath_tokens - - def _get_xpath_tokens_from_leaf(self, model, token_index, path_tokens, config): - token = path_tokens[token_index] - - # checking all leaves - leaf_model = self._get_model(model.get('leaf'), token) - if leaf_model: - return [token] - - # checking choice - choices = model.get('choice') - if choices: - for choice in choices: - cases = choice['case'] - for case in cases: - leaf_model = self._get_model(case.get('leaf'), token) - if leaf_model: - return [token] - - # checking leaf-list (i.e. arrays of string, number or bool) - leaf_list_model = self._get_model(model.get('leaf-list'), token) - if leaf_list_model: - # if whole-list is to be returned, just return the token without checking the list items - # Example: - # path: /VLAN/Vlan1000/dhcp_servers - # xpath: /sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers - if len(path_tokens)-1 == token_index: - return [token] - list_config = config[token] - value = list_config[int(path_tokens[token_index+1])] - # To get a leaf-list instance with the value 'val' - # /module-name:container/leaf-list[.='val'] - # Source: Check examples in https://netopeer.liberouter.org/doc/libyang/master/html/howto_x_path.html - return [f"{token}[.='{value}']"] - - raise ValueError("Token not found") - - def _extractKey(self, tableKey, keys): - keyList = keys.split() - # get the value groups - value = tableKey.split("|") - # match lens - if len(keyList) != len(value): - raise ValueError("Value not found for {} in {}".format(keys, tableKey)) - # create the keyDict - keyDict = dict() - for i in range(len(keyList)): - keyDict[keyList[i]] = value[i].strip() - - return keyDict - - def _get_list_model(self, model, token_index, path_tokens): - parent_container_name = path_tokens[token_index] - clist = model.get('list') - # Container contains a single list, just return it - # TODO: check if matching also by name is necessary - if isinstance(clist, dict): - return clist - - if isinstance(clist, list): - configdb_values_str = path_tokens[token_index+1] - # Format: "value1|value2|value|..." - configdb_values = configdb_values_str.split("|") - for list_model in clist: - yang_keys_str = list_model['key']['@value'] - # Format: "key1 key2 key3 ..." - yang_keys = yang_keys_str.split() - # if same number of values and keys, this is the intended list-model - # TODO: Match also on types and not only the length of the keys/values - if len(yang_keys) == len(configdb_values): - return list_model - raise GenericConfigUpdaterError(f"Container {parent_container_name} has multiple lists, " - f"but none of them match the config_db value {configdb_values_str}") - - return None - - def convert_xpath_to_path(self, xpath, config, sy): - """ - Converts the given XPATH to JsonPatch path (i.e. JsonPointer). - Example: - xpath: /sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode - path: /VLAN_MEMBER/Vlan1000|Ethernet8/tagging_mode - """ - tokens = self.get_xpath_tokens(xpath) - if len(tokens) == 0: - return self.create_path([]) - - if len(tokens) == 1: - raise GenericConfigUpdaterError("xpath cannot be just the module-name, there is no mapping to path") - - table = tokens[1] - cmap = sy.confDbYangMap[table] - - path_tokens = self._get_path_tokens_from_container(cmap['container'], 1, tokens, config) - return self.create_path(path_tokens) - - def _get_path_tokens_from_container(self, model, token_index, xpath_tokens, config): - token = xpath_tokens[token_index] - path_tokens = [token] - - if len(xpath_tokens)-1 == token_index: - return path_tokens - - # check child list - list_name = xpath_tokens[token_index+1].split("[")[0] - list_model = self._get_model(model.get('list'), list_name) - if list_model: - new_path_tokens = self._get_path_tokens_from_list(list_model, token_index+1, xpath_tokens, config[token]) - path_tokens.extend(new_path_tokens) - return path_tokens - - container_name = xpath_tokens[token_index+1] - container_model = self._get_model(model.get('container'), container_name) - if container_model: - new_path_tokens = self._get_path_tokens_from_container(container_model, token_index+1, xpath_tokens, config[token]) - path_tokens.extend(new_path_tokens) - return path_tokens - - new_path_tokens = self._get_path_tokens_from_leaf(model, token_index+1, xpath_tokens, config[token]) - path_tokens.extend(new_path_tokens) - - return path_tokens - - def _get_path_tokens_from_list(self, model, token_index, xpath_tokens, config): - token = xpath_tokens[token_index] - key_dict = self._extract_key_dict(token) - - # If no keys specified return empty tokens, as we are already inside the correct table. - # Also note that the list name in SonicYang has no correspondence in ConfigDb and is ignored. - # Example where VLAN_MEMBER_LIST has no specific key/value: - # xpath: /sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST - # path: /VLAN_MEMBER - if not(key_dict): - return [] - - listKeys = model['key']['@value'] - key_list = listKeys.split() - - if len(key_list) != len(key_dict): - raise GenericConfigUpdaterError(f"Keys in configDb not matching keys in SonicYang. ConfigDb keys: {key_dict.keys()}. SonicYang keys: {key_list}") - - values = [key_dict[k] for k in key_list] - path_token = '|'.join(values) - path_tokens = [path_token] - - if len(xpath_tokens)-1 == token_index: - return path_tokens - - next_token = xpath_tokens[token_index+1] - # if the target node is a key, then it does not have a correspondene to path. - # Just return the current 'key1|key2|..' token as it already refers to the keys - # Example where the target node is 'name' which is a key in VLAN_MEMBER_LIST: - # xpath: /sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/name - # path: /VLAN_MEMBER/Vlan1000|Ethernet8 - if next_token in key_dict: - return path_tokens - - new_path_tokens = self._get_path_tokens_from_leaf(model, token_index+1, xpath_tokens, config[path_token]) - path_tokens.extend(new_path_tokens) - return path_tokens - - def _get_path_tokens_from_leaf(self, model, token_index, xpath_tokens, config): - token = xpath_tokens[token_index] - - # checking all leaves - leaf_model = self._get_model(model.get('leaf'), token) - if leaf_model: - return [token] - - # checking choices - choices = model.get('choice') - if choices: - for choice in choices: - cases = choice['case'] - for case in cases: - leaf_model = self._get_model(case.get('leaf'), token) - if leaf_model: - return [token] - - # checking leaf-list - leaf_list_tokens = token.split("[", 1) # split once on the first '[', a regex is used later to fetch keys/values - leaf_list_name = leaf_list_tokens[0] - leaf_list_model = self._get_model(model.get('leaf-list'), leaf_list_name) - if leaf_list_model: - # if whole-list is to be returned, just return the list-name without checking the list items - # Example: - # xpath: /sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers - # path: /VLAN/Vlan1000/dhcp_servers - if len(leaf_list_tokens) == 1: - return [leaf_list_name] - leaf_list_pattern = "^[^\[]+(?:\[\.='([^']*)'\])?$" - leaf_list_regex = re.compile(leaf_list_pattern) - match = leaf_list_regex.match(token) - # leaf_list_name = match.group(1) - leaf_list_value = match.group(1) - list_config = config[leaf_list_name] - list_idx = list_config.index(leaf_list_value) - return [leaf_list_name, list_idx] - - raise Exception("no leaf") - - def _extract_key_dict(self, list_token): - # Example: VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8'] - # the groups would be ('VLAN_MEMBER'), ("[name='Vlan1000'][port='Ethernet8']") - table_keys_pattern = "^([^\[]+)(.*)$" - text = list_token - table_keys_regex = re.compile(table_keys_pattern) - match = table_keys_regex.match(text) - # list_name = match.group(1) - all_key_value = match.group(2) - - # Example: [name='Vlan1000'][port='Ethernet8'] - # the findall groups would be ('name', 'Vlan1000'), ('port', 'Ethernet8') - key_value_pattern = "\[([^=]+)='([^']*)'\]" - matches = re.findall(key_value_pattern, all_key_value) - key_dict = {} - for item in matches: - key = item[0] - value = item[1] - key_dict[key] = value - - return key_dict - - def _get_model(self, model, name): - if isinstance(model, dict) and model['@name'] == name: - return model - if isinstance(model, list): - for submodel in model: - if submodel['@name'] == name: - return submodel - - return None diff --git a/generic_config_updater/patch_sorter.py b/generic_config_updater/patch_sorter.py deleted file mode 100644 index 8bf99ba004..0000000000 --- a/generic_config_updater/patch_sorter.py +++ /dev/null @@ -1,1010 +0,0 @@ -import copy -import json -import jsonpatch -from collections import deque -from enum import Enum -from .gu_common import OperationWrapper, OperationType, GenericConfigUpdaterError, JsonChange, PathAddressing - -class Diff: - """ - A class that contains the diff info between current and target configs. - """ - def __init__(self, current_config, target_config): - self.current_config = current_config - self.target_config = target_config - - def __hash__(self): - cc = json.dumps(self.current_config, sort_keys=True) - tc = json.dumps(self.target_config, sort_keys=True) - return hash((cc,tc)) - - def __eq__(self, other): - """Overrides the default implementation""" - if isinstance(other, Diff): - return self.current_config == other.current_config and self.target_config == other.target_config - - return False - - # TODO: Can be optimized to apply the move in place. JsonPatch supports that using the option 'in_place=True' - # Check: https://python-json-patch.readthedocs.io/en/latest/tutorial.html#applying-a-patch - # NOTE: in case move is applied in place, we will need to support `undo_move` as well. - def apply_move(self, move): - new_current_config = move.apply(self.current_config) - return Diff(new_current_config, self.target_config) - - def has_no_diff(self): - return self.current_config == self.target_config - -class JsonMove: - """ - A class similar to JsonPatch operation, but it allows the path to refer to non-existing middle elements. - - JsonPatch operation fails to update json if the path in the patch refers to element that do not exist. - For example, assume json to be: - {} - The following path will be rejected: - /elem1/key1 - The reason is 'elem1' does not exist in the json - - JsonMove on the other hand allows that given the target_config_tokens i.e. the target_config path, - and current_config_tokens i.e. current_config path where the update needs to happen. - """ - def __init__(self, diff, op_type, current_config_tokens, target_config_tokens=None): - operation = JsonMove._to_jsonpatch_operation(diff, op_type, current_config_tokens, target_config_tokens) - self.patch = jsonpatch.JsonPatch([operation]) - self.op_type = operation[OperationWrapper.OP_KEYWORD] - self.path = operation[OperationWrapper.PATH_KEYWORD] - self.value = operation.get(OperationWrapper.VALUE_KEYWORD, None) - - self.op_type = op_type - self.current_config_tokens = current_config_tokens - self.target_config_tokens = target_config_tokens - - @staticmethod - def _to_jsonpatch_operation(diff, op_type, current_config_tokens, target_config_tokens): - operation_wrapper = OperationWrapper() - path_addressing = PathAddressing() - - if op_type == OperationType.REMOVE: - path = path_addressing.create_path(current_config_tokens) - return operation_wrapper.create(op_type, path) - - if op_type == OperationType.REPLACE: - path = path_addressing.create_path(current_config_tokens) - value = JsonMove._get_value(diff.target_config, target_config_tokens) - return operation_wrapper.create(op_type, path, value) - - if op_type == OperationType.ADD: - return JsonMove._to_jsonpatch_add_operation(diff, current_config_tokens, target_config_tokens) - - raise ValueError(f"OperationType {op_type} is not supported") - - @staticmethod - def _get_value(config, tokens): - for token in tokens: - config = config[token] - - return copy.deepcopy(config) - - @staticmethod - def _to_jsonpatch_add_operation(diff, current_config_tokens, target_config_tokens): - """ - Check description of JsonMove class first. - - ADD operation path can refer to elements that do not exist, so to convert JsonMove to JsonPatch operation - We need to remove the non-existing tokens from the current_config path and move them to the value. - - Example: - Assume Target Config: - { - "dict1":{ - "key11": "value11" - } - } - Assume Current Config: - { - } - Assume JsonMove: - op_type=add, current_config_tokens=[dict1, key11], target_config_tokens=[dict1, key11] - - Converting this to operation directly would result in: - {"op":"add", "path":"/dict1/key11", "value":"value11"} - BUT this is not correct since 'dict1' which does not exist in Current Config. - Instead we convert to: - {"op":"add", "path":"/dict1", "value":{"key11": "value11"}} - """ - operation_wrapper = OperationWrapper() - path_addressing = PathAddressing() - - # if path refers to whole config i.e. no tokens, then just create the operation - if not current_config_tokens: - path = path_addressing.create_path(current_config_tokens) - value = JsonMove._get_value(diff.target_config, target_config_tokens) - return operation_wrapper.create(OperationType.ADD, path, value) - - # Start with getting target-config that match the path all the way to the value in json format - # Example: - # Assume target-config: - # { - # "dict1":{ - # "key11": "value11", - # "list12": [ - # "value121", - # "value122" - # ] - # }, - # "dict2":{ - # "key21": "value21" - # } - # } - # Assume target config tokens: - # dict1, list12, 1 - # filtered_config will be - # { - # "dict1":{ - # "list12": [ - # "value122" - # ] - # } - # } - target_ptr = diff.target_config - filtered_config = {} - filtered_config_ptr = filtered_config - for token_index in range(len(target_config_tokens)): - token = target_config_tokens[token_index] - - # Tokens are expected to be of the correct data-type i.e. string, int (list-index) - # So not checking the type of the token before consuming it - target_ptr = target_ptr[token] - - # if it is the last item, then just return the last target_ptr - if token_index == len(target_config_tokens)-1: - filtered_value = target_ptr - elif isinstance(target_ptr, list): - filtered_value = [] - else: - filtered_value = {} - - if isinstance(filtered_config_ptr, list): - filtered_config_ptr.append(filtered_value) # filtered_config list will contain only 1 value - else: # otherwise it is a dict - filtered_config_ptr[token] = filtered_value - - filtered_config_ptr = filtered_value - - # Then from the filtered_config get the all the tokens that exist in current_config - # This will be the new path, and the new value will be the corresponding filtered_config - # Example: - # Assume filtered_config - # { - # "dict1":{ - # "key11": "value11" - # } - # } - # Assume current-config - # { - # "dict1":{ - # "list12": [ - # "value122" - # ] - # } - # } - # Then the JsonPatch path would be: - # /dict1/list12 - # And JsonPatch value would be: - # [ "value122" ] - current_ptr = diff.current_config - new_tokens = [] - for token in current_config_tokens: - new_tokens.append(token) - was_list = isinstance(filtered_config, list) - if was_list: - # filtered_config list can only have 1 item - filtered_config = filtered_config[0] - else: - filtered_config = filtered_config[token] - - if was_list and token >= len(current_ptr): - break - if not(was_list) and token not in current_ptr: - break - current_ptr = current_ptr[token] - - op_type = OperationType.ADD - new_path = path_addressing.create_path(new_tokens) - new_value = copy.deepcopy(filtered_config) - - return operation_wrapper.create(op_type, new_path, new_value) - - @staticmethod - def from_patch(patch): - ops = list(patch) - if len(ops) != 1: - raise GenericConfigUpdaterError( - f"Only a patch of a single operation be converted to JsonMove. Patch has {len(ops)} operation/s") - - return JsonMove.from_operation(ops[0]) - - @staticmethod - def from_operation(operation): - path_addressing = PathAddressing() - op_type = OperationType[operation[OperationWrapper.OP_KEYWORD].upper()] - path = operation[OperationWrapper.PATH_KEYWORD] - if op_type in [OperationType.ADD, OperationType.REPLACE]: - value = operation[OperationWrapper.VALUE_KEYWORD] - else: - value = None - - tokens = path_addressing.get_path_tokens(path) - - target_config = {} - target_config_ptr = target_config - current_config = {} - current_config_ptr = current_config - for token in tokens[:-1]: - target_config_ptr[token] = {} - current_config_ptr[token] = {} - target_config_ptr = target_config_ptr[token] - current_config_ptr = current_config_ptr[token] - - if tokens: - target_config_ptr[tokens[-1]] = value - else: - # whole-config, just use value - target_config = value - - current_config_tokens = tokens - if op_type in [OperationType.ADD, OperationType.REPLACE]: - target_config_tokens = tokens - else: - target_config_tokens = None - - diff = Diff(current_config, target_config) - - return JsonMove(diff, op_type, current_config_tokens, target_config_tokens) - - def apply(self, config): - return self.patch.apply(config) - - def __str__(self): - return str(self.patch) - - def __repr__(self): - return str(self.patch) - - def __eq__(self, other): - """Overrides the default implementation""" - if isinstance(other, JsonMove): - return self.patch == other.patch - return False - - def __hash__(self): - return hash((self.op_type, self.path, json.dumps(self.value))) - -class MoveWrapper: - def __init__(self, move_generators, move_extenders, move_validators): - self.move_generators = move_generators - self.move_extenders = move_extenders - self.move_validators = move_validators - - def generate(self, diff): - processed_moves = set() - moves = deque([]) - - for move in self._generate_moves(diff): - if move in processed_moves: - continue - processed_moves.add(move) - yield move - moves.extend(self._extend_moves(move, diff)) - - while moves: - move = moves.popleft() - if move in processed_moves: - continue - processed_moves.add(move) - yield move - moves.extend(self._extend_moves(move, diff)) - - def validate(self, move, diff): - for validator in self.move_validators: - if not validator.validate(move, diff): - return False - return True - - def simulate(self, move, diff): - return diff.apply_move(move) - - def _generate_moves(self, diff): - for generator in self.move_generators: - for move in generator.generate(diff): - yield move - - def _extend_moves(self, move, diff): - for extender in self.move_extenders: - for newmove in extender.extend(move, diff): - yield newmove - -class DeleteWholeConfigMoveValidator: - """ - A class to validate not deleting whole config as it is not supported by JsonPatch lib. - """ - def validate(self, move, diff): - if move.op_type == OperationType.REMOVE and move.path == "": - return False - return True - -class FullConfigMoveValidator: - """ - A class to validate that full config is valid according to YANG models after applying the move. - """ - def __init__(self, config_wrapper): - self.config_wrapper = config_wrapper - - def validate(self, move, diff): - simulated_config = move.apply(diff.current_config) - return self.config_wrapper.validate_config_db_config(simulated_config) - -# TODO: Add this validation to YANG models instead -class UniqueLanesMoveValidator: - """ - A class to validate lanes and any port are unique between all ports. - """ - def validate(self, move, diff): - simulated_config = move.apply(diff.current_config) - - if "PORT" not in simulated_config: - return True - - ports = simulated_config["PORT"] - existing = set() - for port in ports: - attrs = ports[port] - if "lanes" in attrs: - lanes_str = attrs["lanes"] - lanes = lanes_str.split(", ") - for lane in lanes: - if lane in existing: - return False - existing.add(lane) - return True - -class CreateOnlyMoveValidator: - """ - A class to validate create-only fields are only added/removed but never replaced. - Parents of create-only fields are also only added/removed but never replaced when they contain - a modified create-only field. - """ - def __init__(self, path_addressing): - self.path_addressing = path_addressing - - def validate(self, move, diff): - if move.op_type != OperationType.REPLACE: - return True - - # The 'create-only' field needs to be common between current and simulated anyway but different. - # This means it is enough to just get the paths from current_config, paths that are not common can be ignored. - paths = self._get_create_only_paths(diff.current_config) - simulated_config = move.apply(diff.current_config) - - for path in paths: - tokens = self.path_addressing.get_path_tokens(path) - if self._value_exist_but_different(tokens, diff.current_config, simulated_config): - return False - - return True - - # TODO: create-only fields are hard-coded for now, it should be moved to YANG models - def _get_create_only_paths(self, config): - if "PORT" not in config: - return - - ports = config["PORT"] - - for port in ports: - attrs = ports[port] - if "lanes" in attrs: - yield f"/PORT/{port}/lanes" - - def _value_exist_but_different(self, tokens, current_config_ptr, simulated_config_ptr): - for token in tokens: - mod_token = int(token) if isinstance(current_config_ptr, list) else token - - if mod_token not in current_config_ptr: - return False - - if mod_token not in simulated_config_ptr: - return False - - current_config_ptr = current_config_ptr[mod_token] - simulated_config_ptr = simulated_config_ptr[mod_token] - - return current_config_ptr != simulated_config_ptr - -class NoDependencyMoveValidator: - """ - A class to validate that the modified configs do not have dependency on each other. This should prevent - moves that update whole config in a single step where multiple changed nodes are dependent on each. This - way dependent configs are never updated together. - """ - def __init__(self, path_addressing, config_wrapper): - self.path_addressing = path_addressing - self.config_wrapper = config_wrapper - - def validate(self, move, diff): - operation_type = move.op_type - path = move.path - - if operation_type == OperationType.ADD: - simulated_config = move.apply(diff.current_config) - # For add operation, we check the simulated config has no dependencies between nodes under the added path - if not self._validate_paths_config([path], simulated_config): - return False - elif operation_type == OperationType.REMOVE: - # For remove operation, we check the current config has no dependencies between nodes under the removed path - if not self._validate_paths_config([path], diff.current_config): - return False - elif operation_type == OperationType.REPLACE: - if not self._validate_replace(move, diff): - return False - - return True - - # NOTE: this function can be used for validating JsonChange as well which might have more than one move. - def _validate_replace(self, move, diff): - """ - The table below shows how mixed deletion/addition within replace affect this validation. - - The table is answring the question whether the change is valid: - Y = Yes - N = No - n/a = not applicable as the change itself is not valid - - symbols meaning; - +A, -A: adding, removing config A - +refA, -refA: adding, removing a reference to A config - - - +refA|-refA|refA - --|-----|-----|---- - +A| N | n/a | n/a - -A| n/a | N | n/a - A| Y | Y | Y - - The conclusion is that: - +A, +refA is invalid because there is a dependency and a single move should not have dependency - -A, -refA is invalid because there is a dependency and a single move should not have dependency - A kept unchanged can be ignored, as it is always OK regardless of what happens to its reference - Other states are all non applicable since they are invalid to begin with - - So verification would be: - if A is deleted and refA is deleted: return False - if A is added and refA is added: return False - return True - """ - simulated_config = move.apply(diff.current_config) - deleted_paths, added_paths = self._get_paths(diff.current_config, simulated_config, []) - - if not self._validate_paths_config(deleted_paths, diff.current_config): - return False - - if not self._validate_paths_config(added_paths, diff.target_config): - return False - - return True - - def _get_paths(self, current_ptr, target_ptr, tokens): - deleted_paths = [] - added_paths = [] - - if isinstance(current_ptr, list) or isinstance(target_ptr, list): - tmp_deleted_paths, tmp_added_paths = self._get_list_paths(current_ptr, target_ptr, tokens) - deleted_paths.extend(tmp_deleted_paths) - added_paths.extend(tmp_added_paths) - return deleted_paths, added_paths - - if isinstance(current_ptr, dict): - for token in current_ptr: - tokens.append(token) - if token not in target_ptr: - deleted_paths.append(self.path_addressing.create_path(tokens)) - else: - tmp_deleted_paths, tmp_added_paths = self._get_paths(current_ptr[token], target_ptr[token], tokens) - deleted_paths.extend(tmp_deleted_paths) - added_paths.extend(tmp_added_paths) - tokens.pop() - - for token in target_ptr: - tokens.append(token) - if token not in current_ptr: - added_paths.append(self.path_addressing.create_path(tokens)) - tokens.pop() - - return deleted_paths, added_paths - - # current/target configs are not dict nor list, so handle them as string, int, bool, float - if current_ptr != target_ptr: - # tokens.append(token) - deleted_paths.append(self.path_addressing.create_path(tokens)) - added_paths.append(self.path_addressing.create_path(tokens)) - # tokens.pop() - - return deleted_paths, added_paths - - def _get_list_paths(self, current_list, target_list, tokens): - """ - Gets all paths within the given list, assume list items are unique - """ - deleted_paths = [] - added_paths = [] - - hashed_target = set(target_list) - for index, value in enumerate(current_list): - if value not in hashed_target: - tokens.append(index) - deleted_paths.append(self.path_addressing.create_path(tokens)) - tokens.pop() - - hashed_current = set(current_list) - for index, value in enumerate(target_list): - if value not in hashed_current: - tokens.append(index) - # added_paths refer to paths in the target config and not necessarily the current config - added_paths.append(self.path_addressing.create_path(tokens)) - tokens.pop() - - return deleted_paths, added_paths - - def _validate_paths_config(self, paths, config): - """ - validates all config under paths do not have config and its references - """ - refs = self._find_ref_paths(paths, config) - for ref in refs: - for path in paths: - if ref.startswith(path): - return False - - return True - - def _find_ref_paths(self, paths, config): - refs = [] - for path in paths: - refs.extend(self.path_addressing.find_ref_paths(path, config)) - return refs - -class LowLevelMoveGenerator: - """ - A class to generate the low level moves i.e. moves corresponding to differences between current/target config - where the path of the move does not have children. - """ - def __init__(self, path_addressing): - self.path_addressing = path_addressing - def generate(self, diff): - single_run_generator = SingleRunLowLevelMoveGenerator(diff, self.path_addressing) - for move in single_run_generator.generate(): - yield move - -class SingleRunLowLevelMoveGenerator: - """ - A class that can only run once to assist LowLevelMoveGenerator with generating the moves. - """ - def __init__(self, diff, path_addressing): - self.diff = diff - self.path_addressing = path_addressing - - def generate(self): - current_ptr = self.diff.current_config - target_ptr = self.diff.target_config - current_tokens = [] - target_tokens = [] - - for move in self._traverse(current_ptr, target_ptr, current_tokens, target_tokens): - yield move - - def _traverse(self, current_ptr, target_ptr, current_tokens, target_tokens): - """ - Traverses the current/target config trees. - The given ptrs can be: - dict - list of string, number, boolean, int - string, number, boolean, int - - list of dict is not allowed - """ - if isinstance(current_ptr, list) or isinstance(target_ptr, list): - for move in self._traverse_list(current_ptr, target_ptr, current_tokens, target_tokens): - yield move - return - - if isinstance(current_ptr, dict) or isinstance(target_ptr, dict): - for key in current_ptr: - current_tokens.append(key) - if key in target_ptr: - target_tokens.append(key) - for move in self._traverse(current_ptr[key], target_ptr[key], current_tokens, target_tokens): - yield move - target_tokens.pop() - else: - for move in self._traverse_current(current_ptr[key], current_tokens): - yield move - - current_tokens.pop() - - for key in target_ptr: - if key in current_ptr: - continue # Already tried in the previous loop - - target_tokens.append(key) - current_tokens.append(key) - for move in self._traverse_target(target_ptr[key], current_tokens, target_tokens): - yield move - current_tokens.pop() - target_tokens.pop() - - return - - # The current/target ptr are neither dict nor list, so they might be string, int, float, bool - for move in self._traverse_value(current_ptr, target_ptr, current_tokens, target_tokens): - yield move - - def _traverse_list(self, current_ptr, target_ptr, current_tokens, target_tokens): - # if same elements different order, just sort by replacing whole list - # Example: - # current: [1, 2, 3, 4] - # target: [4, 3, 2, 1] - # returned move: REPLACE, current, target - current_dict_cnts = self._list_to_dict_with_count(current_ptr) - target_dict_cnts = self._list_to_dict_with_count(target_ptr) - if current_dict_cnts == target_dict_cnts: - for move in self._traverse_value(current_ptr, target_ptr, current_tokens, target_tokens): - yield move - return - - # Otherwise try add missing and remove additional elements - # Try remove - if current_ptr is not None: - for current_index, current_item in enumerate(current_ptr): - if current_dict_cnts[current_item] > target_dict_cnts.get(current_item, 0): - current_tokens.append(current_index) - for move in self._traverse_current_value(current_item, current_tokens): - yield move - current_tokens.pop() - # Try add - if target_ptr is not None: - current_cnt = len(current_ptr) if current_ptr is not None else 0 - for target_index, target_item in enumerate(target_ptr): - if target_dict_cnts[target_item] > current_dict_cnts.get(target_item, 0): - index = min(current_cnt, target_index) - current_tokens.append(index) - target_tokens.append(target_index) - for move in self._traverse_target_value(target_item, current_tokens, target_tokens): - yield move - target_tokens.pop() - current_tokens.pop() - - # Try replace - if current_ptr is not None and target_ptr is not None: - for current_index, current_item in enumerate(current_ptr): - for target_index, target_item in enumerate(target_ptr): - if current_dict_cnts[current_item] > target_dict_cnts.get(current_item, 0) and \ - target_dict_cnts[target_item] > current_dict_cnts.get(target_item, 0): - current_tokens.append(current_index) - target_tokens.append(target_index) - for move in self._traverse_value(current_item, target_item, current_tokens, target_tokens): - yield move - target_tokens.pop() - current_tokens.pop() - - def _traverse_value(self, current_value, target_value, current_tokens, target_tokens): - if current_value == target_value: - return - - yield JsonMove(self.diff, OperationType.REPLACE, current_tokens, target_tokens) - - def _traverse_current(self, ptr, current_tokens): - if isinstance(ptr, list): - for move in self._traverse_current_list(ptr, current_tokens): - yield move - return - - if isinstance(ptr, dict): - if len(ptr) == 0: - yield JsonMove(self.diff, OperationType.REMOVE, current_tokens) - return - - for key in ptr: - current_tokens.append(key) - for move in self._traverse_current(ptr[key], current_tokens): - yield move - current_tokens.pop() - - return - - # ptr is not a dict nor a list, it can be string, int, float, bool - for move in self._traverse_current_value(ptr, current_tokens): - yield move - - def _traverse_current_list(self, ptr, current_tokens): - if len(ptr) == 0: - yield JsonMove(self.diff, OperationType.REMOVE, current_tokens) - return - - for index, val in enumerate(ptr): - current_tokens.append(index) - for move in self._traverse_current_value(val, current_tokens): - yield move - current_tokens.pop() - - def _traverse_current_value(self, val, current_tokens): - yield JsonMove(self.diff, OperationType.REMOVE, current_tokens) - - def _traverse_target(self, ptr, current_tokens, target_tokens): - if isinstance(ptr, list): - for move in self._traverse_target_list(ptr, current_tokens, target_tokens): - yield move - return - - if isinstance(ptr, dict): - if len(ptr) == 0: - yield JsonMove(self.diff, OperationType.ADD, current_tokens, target_tokens) - return - - for key in ptr: - current_tokens.append(key) - target_tokens.append(key) - for move in self._traverse_target(ptr[key], current_tokens, target_tokens): - yield move - target_tokens.pop() - current_tokens.pop() - - return - - # target configs are not dict nor list, so handle them as string, int, bool, float - for move in self._traverse_target_value(ptr, current_tokens, target_tokens): - yield move - - def _traverse_target_list(self, ptr, current_tokens, target_tokens): - if len(ptr) == 0: - yield JsonMove(self.diff, OperationType.ADD, current_tokens, target_tokens) - return - - for index, val in enumerate(ptr): - # _traverse_target_list is called when the whole list is missing - # in such case any item should be added at first location i.e. 0 - current_tokens.append(0) - target_tokens.append(index) - for move in self._traverse_target_value(val, current_tokens, target_tokens): - yield move - target_tokens.pop() - current_tokens.pop() - - def _traverse_target_value(self, val, current_tokens, target_tokens): - yield JsonMove(self.diff, OperationType.ADD, current_tokens, target_tokens) - - def _list_to_dict_with_count(self, items): - counts = dict() - - if items is None: - return counts - - for item in items: - counts[item] = counts.get(item, 0) + 1 - - return counts - -class UpperLevelMoveExtender: - """ - A class to extend the given move by including its parent. It has 3 cases: - 1) If parent was in current and target, then replace the parent - 2) If parent was in current but not target, then delete the parent - 3) If parent was in target but not current, then add the parent - """ - def extend(self, move, diff): - # if no tokens i.e. whole config - if not move.current_config_tokens: - return - - upper_current_tokens = move.current_config_tokens[:-1] - operation_type = self._get_upper_operation(upper_current_tokens, diff) - - upper_target_tokens = None - if operation_type in [OperationType.ADD, OperationType.REPLACE]: - upper_target_tokens = upper_current_tokens - - yield JsonMove(diff, operation_type, upper_current_tokens, upper_target_tokens) - - # get upper operation assumes ConfigDb to not have list-of-objects, only list-of-values - def _get_upper_operation(self, tokens, diff): - current_ptr = diff.current_config - target_ptr = diff.target_config - - for token in tokens: - if token not in current_ptr: - return OperationType.ADD - current_ptr = current_ptr[token] - if token not in target_ptr: - return OperationType.REMOVE - target_ptr = target_ptr[token] - - return OperationType.REPLACE - -class DeleteInsteadOfReplaceMoveExtender: - """ - A class to extend the given REPLACE move by adding a REMOVE move. - """ - def extend(self, move, diff): - operation_type = move.op_type - - if operation_type != OperationType.REPLACE: - return - - new_move = JsonMove(diff, OperationType.REMOVE, move.current_config_tokens) - - yield new_move - -class DeleteRefsMoveExtender: - """ - A class to extend the given DELETE move by adding DELETE moves to configs referring to the path in the move. - """ - def __init__(self, path_addressing): - self.path_addressing = path_addressing - - def extend(self, move, diff): - operation_type = move.op_type - - if operation_type != OperationType.REMOVE: - return - - for ref_path in self.path_addressing.find_ref_paths(move.path, diff.current_config): - yield JsonMove(diff, OperationType.REMOVE, self.path_addressing.get_path_tokens(ref_path)) - -class DfsSorter: - def __init__(self, move_wrapper): - self.visited = {} - self.move_wrapper = move_wrapper - - def sort(self, diff): - if diff.has_no_diff(): - return [] - - diff_hash = hash(diff) - if diff_hash in self.visited: - return None - self.visited[diff_hash] = True - - moves = self.move_wrapper.generate(diff) - - for move in moves: - if self.move_wrapper.validate(move, diff): - new_diff = self.move_wrapper.simulate(move, diff) - new_moves = self.sort(new_diff) - if new_moves is not None: - return [move] + new_moves - - return None - -class BfsSorter: - def __init__(self, move_wrapper): - self.visited = {} - self.move_wrapper = move_wrapper - - def sort(self, diff): - diff_queue = deque([]) - prv_moves_queue = deque([]) - - diff_queue.append(diff) - prv_moves_queue.append([]) - - while len(diff_queue): - diff = diff_queue.popleft() - prv_moves = prv_moves_queue.popleft() - - diff_hash = hash(diff) - if diff_hash in self.visited: - continue - self.visited[diff_hash] = True - - if diff.has_no_diff(): - return prv_moves - - moves = self.move_wrapper.generate(diff) - for move in moves: - if self.move_wrapper.validate(move, diff): - new_diff = self.move_wrapper.simulate(move, diff) - new_prv_moves = prv_moves + [move] - - diff_queue.append(new_diff) - prv_moves_queue.append(new_prv_moves) - - return None - -class MemoizationSorter: - def __init__(self, move_wrapper): - self.visited = {} - self.move_wrapper = move_wrapper - self.mem = {} - - def rec(self, diff): - if diff.has_no_diff(): - return [] - - diff_hash = hash(diff) - if diff_hash in self.mem: - return self.mem[diff_hash] - if diff_hash in self.visited: - return None - self.visited[diff_hash] = True - - moves = self.move_wrapper.generate(diff) - - bst_moves = None - for move in moves: - if self.move_wrapper.validate(move, diff): - new_diff = self.move_wrapper.simulate(move, diff) - new_moves = self.sort(new_diff) - if new_moves != None and (bst_moves is None or len(bst_moves) > len(new_moves)+1): - bst_moves = [move] + new_moves - - self.mem[diff_hash] = bst_moves - return bst_moves - -class Algorithm(Enum): - DFS = 1 - BFS = 2 - MEMOIZATION = 3 - -class SortAlgorithmFactory: - def __init__(self, operation_wrapper, config_wrapper, path_addressing): - self.operation_wrapper = operation_wrapper - self.config_wrapper = config_wrapper - self.path_addressing = path_addressing - - def create(self, algorithm=Algorithm.DFS): - move_generators = [LowLevelMoveGenerator(self.path_addressing)] - move_extenders = [UpperLevelMoveExtender(), - DeleteInsteadOfReplaceMoveExtender(), - DeleteRefsMoveExtender(self.path_addressing)] - move_validators = [DeleteWholeConfigMoveValidator(), - FullConfigMoveValidator(self.config_wrapper), - NoDependencyMoveValidator(self.path_addressing, self.config_wrapper), - UniqueLanesMoveValidator(), - CreateOnlyMoveValidator(self.path_addressing) ] - - move_wrapper = MoveWrapper(move_generators, move_extenders, move_validators) - - if algorithm == Algorithm.DFS: - sorter = DfsSorter(move_wrapper) - elif algorithm == Algorithm.BFS: - sorter = BfsSorter(move_wrapper) - elif algorithm == Algorithm.MEMOIZATION: - sorter = MemoizationSorter(move_wrapper) - else: - raise ValueError(f"Algorithm {algorithm} is not supported") - - return sorter - -class PatchSorter: - def __init__(self, config_wrapper, patch_wrapper, sort_algorithm_factory=None): - self.config_wrapper = config_wrapper - self.patch_wrapper = patch_wrapper - self.operation_wrapper = OperationWrapper() - self.path_addressing = PathAddressing() - self.sort_algorithm_factory = sort_algorithm_factory if sort_algorithm_factory else \ - SortAlgorithmFactory(self.operation_wrapper, config_wrapper, self.path_addressing) - - def sort(self, patch, algorithm=Algorithm.DFS): - current_config = self.config_wrapper.get_config_db_as_json() - target_config = self.patch_wrapper.simulate_patch(patch, current_config) - - diff = Diff(current_config, target_config) - - sort_algorithm = self.sort_algorithm_factory.create(algorithm) - moves = sort_algorithm.sort(diff) - - if moves is None: - raise GenericConfigUpdaterError("There is no possible sorting") - - changes = [JsonChange(move.patch) for move in moves] - - return changes diff --git a/setup.py b/setup.py index c80e11e65b..216328726c 100644 --- a/setup.py +++ b/setup.py @@ -189,7 +189,6 @@ def run_tests(self): 'jinja2>=2.11.3', 'jsondiff>=1.2.0', 'jsonpatch>=1.32.0', - 'jsonpointer>=1.9', 'm2crypto>=0.31.0', 'natsort>=6.2.1', # 6.2.1 is the last version which supports Python 2. Can update once we no longer support Python 2 'netaddr>=0.8.0', diff --git a/tests/generic_config_updater/files/any_config_db.json b/tests/generic_config_updater/files/any_config_db.json deleted file mode 100644 index 2c63c08510..0000000000 --- a/tests/generic_config_updater/files/any_config_db.json +++ /dev/null @@ -1,2 +0,0 @@ -{ -} diff --git a/tests/generic_config_updater/files/any_other_config_db.json b/tests/generic_config_updater/files/any_other_config_db.json deleted file mode 100644 index c258f768cf..0000000000 --- a/tests/generic_config_updater/files/any_other_config_db.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "VLAN": { - } -} diff --git a/tests/generic_config_updater/files/config_db_after_multi_patch.json b/tests/generic_config_updater/files/config_db_after_multi_patch.json index 39dff7d688..042bf1d51b 100644 --- a/tests/generic_config_updater/files/config_db_after_multi_patch.json +++ b/tests/generic_config_updater/files/config_db_after_multi_patch.json @@ -119,4 +119,4 @@ "key12": "value12" } } -} +} \ No newline at end of file diff --git a/tests/generic_config_updater/files/config_db_after_single_operation.json b/tests/generic_config_updater/files/config_db_after_single_operation.json deleted file mode 100644 index 0f2f447537..0000000000 --- a/tests/generic_config_updater/files/config_db_after_single_operation.json +++ /dev/null @@ -1,83 +0,0 @@ -{ - "VLAN_MEMBER": { - "Vlan1000|Ethernet0": { - "tagging_mode": "untagged" - }, - "Vlan1000|Ethernet4": { - "tagging_mode": "untagged" - } - }, - "VLAN": { - "Vlan1000": { - "vlanid": "1000", - "dhcp_servers": [ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ] - } - }, - "ACL_TABLE": { - "NO-NSW-PACL-V4": { - "type": "L3", - "policy_desc": "NO-NSW-PACL-V4", - "ports": [ - "Ethernet0" - ] - }, - "DATAACL": { - "policy_desc": "DATAACL", - "ports": [ - "Ethernet4" - ], - "stage": "ingress", - "type": "L3" - }, - "EVERFLOW": { - "policy_desc": "EVERFLOW", - "ports": [ - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRROR" - }, - "EVERFLOWV6": { - "policy_desc": "EVERFLOWV6", - "ports": [ - "Ethernet4", - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRRORV6" - } - }, - "PORT": { - "Ethernet0": { - "alias": "Eth1", - "lanes": "65, 66, 67, 68", - "description": "Ethernet0 100G link", - "speed": "100000" - }, - "Ethernet4": { - "admin_status": "up", - "alias": "fortyGigE0/4", - "description": "Servers0:eth0", - "index": "1", - "lanes": "29,30,31,32", - "mtu": "9100", - "pfc_asym": "off", - "speed": "40000" - }, - "Ethernet8": { - "admin_status": "up", - "alias": "fortyGigE0/8", - "description": "Servers1:eth0", - "index": "2", - "lanes": "33,34,35,36", - "mtu": "9100", - "pfc_asym": "off", - "speed": "40000" - } - } -} diff --git a/tests/generic_config_updater/files/config_db_choice.json b/tests/generic_config_updater/files/config_db_choice.json deleted file mode 100644 index eaece3248f..0000000000 --- a/tests/generic_config_updater/files/config_db_choice.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "ACL_RULE": { - "SSH_ONLY|RULE1": { - "L4_SRC_PORT":"65174-6530" - } - }, - "ACL_TABLE": { - "SSH_ONLY": { - "policy_desc": "SSH_ONLY", - "type": "CTRLPLANE", - "stage": "ingress", - "services": [ - "SSH" - ] - } - } -} diff --git a/tests/generic_config_updater/files/config_db_no_dependencies.json b/tests/generic_config_updater/files/config_db_no_dependencies.json deleted file mode 100644 index 12bdd464a5..0000000000 --- a/tests/generic_config_updater/files/config_db_no_dependencies.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "VLAN": { - "Vlan1000": { - "vlanid": "1000", - "dhcp_servers": [ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ] - } - }, - "ACL_TABLE": { - "EVERFLOW": { - "policy_desc": "EVERFLOW", - "ports": [ - "" - ], - "stage": "ingress", - "type": "MIRROR" - }, - "EVERFLOWV6": { - "policy_desc": "EVERFLOWV6", - "ports": [ - "" - ], - "stage": "ingress", - "type": "MIRRORV6" - } - }, - "PORT": { - "Ethernet0": { - "alias": "Eth1", - "lanes": "65, 66, 67, 68", - "description": "Ethernet0 100G link", - "speed": "100000" - } - } -} diff --git a/tests/generic_config_updater/files/config_db_with_crm.json b/tests/generic_config_updater/files/config_db_with_crm.json deleted file mode 100644 index 5fd324d988..0000000000 --- a/tests/generic_config_updater/files/config_db_with_crm.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "CRM": { - "Config": { - "acl_counter_high_threshold": "90", - "acl_counter_low_threshold": "70", - "acl_counter_threshold_type": "free" - } - } -} \ No newline at end of file diff --git a/tests/generic_config_updater/files/config_db_with_device_metadata.json b/tests/generic_config_updater/files/config_db_with_device_metadata.json deleted file mode 100644 index 34def579f6..0000000000 --- a/tests/generic_config_updater/files/config_db_with_device_metadata.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "DEVICE_METADATA": { - "localhost": { - "default_bgp_status": "up", - "default_pfcwd_status": "disable", - "bgp_asn": "65100", - "deployment_id": "1", - "docker_routing_config_mode": "separated", - "hostname": "vlab-01", - "hwsku": "Force10-S6000", - "type": "ToRRouter", - "platform": "x86_64-kvm_x86_64-r0", - "mac": "52:54:00:99:7e:85" - } - } -} \ No newline at end of file diff --git a/tests/generic_config_updater/files/config_db_with_interface.json b/tests/generic_config_updater/files/config_db_with_interface.json deleted file mode 100644 index 2e1c488a4a..0000000000 --- a/tests/generic_config_updater/files/config_db_with_interface.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "INTERFACE": { - "Ethernet8": {}, - "Ethernet8|10.0.0.1/30": { - "family": "IPv4", - "scope": "global" - } - }, - "PORT": { - "Ethernet8": { - "admin_status": "up", - "alias": "eth8", - "description": "Ethernet8", - "fec": "rs", - "lanes": "65", - "mtu": "9000", - "speed": "25000" - } - } -} diff --git a/tests/generic_config_updater/files/config_db_with_portchannel_and_acl.json b/tests/generic_config_updater/files/config_db_with_portchannel_and_acl.json deleted file mode 100644 index 23d33890f3..0000000000 --- a/tests/generic_config_updater/files/config_db_with_portchannel_and_acl.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "PORT": { - "Ethernet0": { - "alias": "Eth1/1", - "lanes": "65", - "description": "", - "speed": "10000" - } - }, - "PORTCHANNEL": { - "PortChannel0001": { - "admin_status": "up" - } - }, - "ACL_TABLE": { - "NO-NSW-PACL-V4": { - "type": "L3", - "policy_desc": "NO-NSW-PACL-V4", - "ports": [ - "Ethernet0", - "PortChannel0001" - ] - } - } -} diff --git a/tests/generic_config_updater/files/config_db_with_portchannel_interface.json b/tests/generic_config_updater/files/config_db_with_portchannel_interface.json deleted file mode 100644 index 4e05639dc5..0000000000 --- a/tests/generic_config_updater/files/config_db_with_portchannel_interface.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "PORTCHANNEL": { - "PortChannel0001": { - "admin_status": "up" - } - }, - "PORTCHANNEL_INTERFACE": { - "PortChannel0001|1.1.1.1/24": {} - } -} diff --git a/tests/generic_config_updater/files/contrainer_with_container_config_db.json b/tests/generic_config_updater/files/contrainer_with_container_config_db.json deleted file mode 100644 index b0680b22b5..0000000000 --- a/tests/generic_config_updater/files/contrainer_with_container_config_db.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "FLEX_COUNTER_TABLE": { - "BUFFER_POOL_WATERMARK": { - "FLEX_COUNTER_STATUS": "enable" - } - } -} diff --git a/tests/generic_config_updater/files/dpb_1_split_full_config.json b/tests/generic_config_updater/files/dpb_1_split_full_config.json deleted file mode 100644 index 2097289606..0000000000 --- a/tests/generic_config_updater/files/dpb_1_split_full_config.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "PORT": { - "Ethernet0": { - "alias": "Eth1", - "lanes": "65, 66, 67, 68", - "description": "Ethernet0 100G link", - "speed": "100000" - } - }, - "ACL_TABLE": { - "NO-NSW-PACL-V4": { - "type": "L3", - "policy_desc": "NO-NSW-PACL-V4", - "ports": [ - "Ethernet0" - ] - } - }, - "VLAN_MEMBER": { - "Vlan100|Ethernet0": { - "tagging_mode": "untagged" - } - }, - "VLAN": { - "Vlan100": { - "vlanid": "100", - "dhcp_servers": [ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ] - } - } -} diff --git a/tests/generic_config_updater/files/dpb_1_to_4.json-patch b/tests/generic_config_updater/files/dpb_1_to_4.json-patch deleted file mode 100644 index 8eddd7a19d..0000000000 --- a/tests/generic_config_updater/files/dpb_1_to_4.json-patch +++ /dev/null @@ -1,88 +0,0 @@ -[ - { - "op": "add", - "path": "/PORT/Ethernet3", - "value": { - "alias": "Eth1/4", - "lanes": "68", - "description": "", - "speed": "10000" - } - }, - { - "op": "add", - "path": "/PORT/Ethernet1", - "value": { - "alias": "Eth1/2", - "lanes": "66", - "description": "", - "speed": "10000" - } - }, - { - "op": "add", - "path": "/PORT/Ethernet2", - "value": { - "alias": "Eth1/3", - "lanes": "67", - "description": "", - "speed": "10000" - } - }, - { - "op": "replace", - "path": "/PORT/Ethernet0/lanes", - "value": "65" - }, - { - "op": "replace", - "path": "/PORT/Ethernet0/alias", - "value": "Eth1/1" - }, - { - "op": "replace", - "path": "/PORT/Ethernet0/description", - "value": "" - }, - { - "op": "replace", - "path": "/PORT/Ethernet0/speed", - "value": "10000" - }, - { - "op": "add", - "path": "/VLAN_MEMBER/Vlan100|Ethernet2", - "value": { - "tagging_mode": "untagged" - } - }, - { - "op": "add", - "path": "/VLAN_MEMBER/Vlan100|Ethernet3", - "value": { - "tagging_mode": "untagged" - } - }, - { - "op": "add", - "path": "/VLAN_MEMBER/Vlan100|Ethernet1", - "value": { - "tagging_mode": "untagged" - } - }, - { - "op": "add", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1", - "value": "Ethernet1" - }, - { - "op": "add", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/2", - "value": "Ethernet2" - }, - { - "op": "add", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/3", - "value": "Ethernet3" - } -] diff --git a/tests/generic_config_updater/files/dpb_4_splits_full_config.json b/tests/generic_config_updater/files/dpb_4_splits_full_config.json deleted file mode 100644 index 23d1b9ecfc..0000000000 --- a/tests/generic_config_updater/files/dpb_4_splits_full_config.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "PORT": { - "Ethernet0": { - "alias": "Eth1/1", - "lanes": "65", - "description": "", - "speed": "10000" - }, - "Ethernet1": { - "alias": "Eth1/2", - "lanes": "66", - "description": "", - "speed": "10000" - }, - "Ethernet2": { - "alias": "Eth1/3", - "lanes": "67", - "description": "", - "speed": "10000" - }, - "Ethernet3": { - "alias": "Eth1/4", - "lanes": "68", - "description": "", - "speed": "10000" - } - }, - "ACL_TABLE": { - "NO-NSW-PACL-V4": { - "type": "L3", - "policy_desc": "NO-NSW-PACL-V4", - "ports": [ - "Ethernet0", - "Ethernet1", - "Ethernet2", - "Ethernet3" - ] - } - }, - "VLAN_MEMBER": { - "Vlan100|Ethernet0": { - "tagging_mode": "untagged" - }, - "Vlan100|Ethernet1": { - "tagging_mode": "untagged" - }, - "Vlan100|Ethernet2": { - "tagging_mode": "untagged" - }, - "Vlan100|Ethernet3": { - "tagging_mode": "untagged" - } - }, - "VLAN": { - "Vlan100": { - "vlanid": "100", - "dhcp_servers": [ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ] - } - } -} diff --git a/tests/generic_config_updater/files/dpb_4_to_1.json-patch b/tests/generic_config_updater/files/dpb_4_to_1.json-patch deleted file mode 100644 index 33addd290d..0000000000 --- a/tests/generic_config_updater/files/dpb_4_to_1.json-patch +++ /dev/null @@ -1,58 +0,0 @@ -[ - { - "op": "remove", - "path": "/PORT/Ethernet2" - }, - { - "op": "remove", - "path": "/PORT/Ethernet1" - }, - { - "op": "remove", - "path": "/PORT/Ethernet3" - }, - { - "op": "replace", - "path": "/PORT/Ethernet0/alias", - "value": "Eth1" - }, - { - "op": "replace", - "path": "/PORT/Ethernet0/lanes", - "value": "65, 66, 67, 68" - }, - { - "op": "replace", - "path": "/PORT/Ethernet0/description", - "value": "Ethernet0 100G link" - }, - { - "op": "replace", - "path": "/PORT/Ethernet0/speed", - "value": "100000" - }, - { - "op": "remove", - "path": "/VLAN_MEMBER/Vlan100|Ethernet1" - }, - { - "op": "remove", - "path": "/VLAN_MEMBER/Vlan100|Ethernet3" - }, - { - "op": "remove", - "path": "/VLAN_MEMBER/Vlan100|Ethernet2" - }, - { - "op": "remove", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1" - }, - { - "op": "remove", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1" - }, - { - "op": "remove", - "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1" - } -] diff --git a/tests/generic_config_updater/files/empty_config_db.json b/tests/generic_config_updater/files/empty_config_db.json deleted file mode 100644 index 2c63c08510..0000000000 --- a/tests/generic_config_updater/files/empty_config_db.json +++ /dev/null @@ -1,2 +0,0 @@ -{ -} diff --git a/tests/generic_config_updater/files/simple_config_db_inc_deps.json b/tests/generic_config_updater/files/simple_config_db_inc_deps.json deleted file mode 100644 index 4554582103..0000000000 --- a/tests/generic_config_updater/files/simple_config_db_inc_deps.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "ACL_TABLE": { - "EVERFLOW": { - "policy_desc": "EVERFLOW", - "ports": [ - "Ethernet0" - ], - "stage": "ingress", - "type": "MIRROR" - } - }, - "PORT": { - "Ethernet0": { - "alias": "Eth1", - "lanes": "65, 66, 67, 68", - "description": "Ethernet0 100G link", - "speed": "100000" - } - } -} diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py index f69ec08030..f18ad45799 100644 --- a/tests/generic_config_updater/gu_common_test.py +++ b/tests/generic_config_updater/gu_common_test.py @@ -1,12 +1,15 @@ import json import jsonpatch -import sonic_yang import unittest from unittest.mock import MagicMock, Mock - from .gutest_helpers import create_side_effect_dict, Files + import generic_config_updater.gu_common as gu_common +# import sys +# sys.path.insert(0,'../../generic_config_updater') +# import gu_common + class TestConfigWrapper(unittest.TestCase): def setUp(self): self.config_wrapper_mock = gu_common.ConfigWrapper() @@ -330,306 +333,3 @@ def __assert_same_patch(self, config_db_patch, sonic_yang_patch, config_wrapper, config_wrapper.convert_sonic_yang_to_config_db(after_update_sonic_yang) self.assertTrue(patch_wrapper.verify_same_json(after_update_config_db_cropped, after_update_sonic_yang_as_config_db)) - -class TestPathAddressing(unittest.TestCase): - def setUp(self): - self.path_addressing = gu_common.PathAddressing() - self.sy_only_models = sonic_yang.SonicYang(gu_common.YANG_DIR) - self.sy_only_models.loadYangModel() - - def test_get_path_tokens(self): - def check(path, tokens): - expected=tokens - actual=self.path_addressing.get_path_tokens(path) - self.assertEqual(expected, actual) - - check("", []) - check("/", [""]) - check("/token", ["token"]) - check("/more/than/one/token", ["more", "than", "one", "token"]) - check("/has/numbers/0/and/symbols/^", ["has", "numbers", "0", "and", "symbols", "^"]) - check("/~0/this/is/telda", ["~", "this", "is", "telda"]) - check("/~1/this/is/forward-slash", ["/", "this", "is", "forward-slash"]) - check("/\\\\/no-escaping", ["\\\\", "no-escaping"]) - check("////empty/tokens/are/ok", ["", "", "", "empty", "tokens", "are", "ok"]) - - def test_create_path(self): - def check(tokens, path): - expected=path - actual=self.path_addressing.create_path(tokens) - self.assertEqual(expected, actual) - - check([], "",) - check([""], "/",) - check(["token"], "/token") - check(["more", "than", "one", "token"], "/more/than/one/token") - check(["has", "numbers", "0", "and", "symbols", "^"], "/has/numbers/0/and/symbols/^") - check(["~", "this", "is", "telda"], "/~0/this/is/telda") - check(["/", "this", "is", "forward-slash"], "/~1/this/is/forward-slash") - check(["\\\\", "no-escaping"], "/\\\\/no-escaping") - check(["", "", "", "empty", "tokens", "are", "ok"], "////empty/tokens/are/ok") - check(["~token", "telda-not-followed-by-0-or-1"], "/~0token/telda-not-followed-by-0-or-1") - - def test_get_xpath_tokens(self): - def check(path, tokens): - expected=tokens - actual=self.path_addressing.get_xpath_tokens(path) - self.assertEqual(expected, actual) - - self.assertRaises(ValueError, check, "", []) - check("/", []) - check("/token", ["token"]) - check("/more/than/one/token", ["more", "than", "one", "token"]) - check("/multi/tokens/with/empty/last/token/", ["multi", "tokens", "with", "empty", "last", "token", ""]) - check("/has/numbers/0/and/symbols/^", ["has", "numbers", "0", "and", "symbols", "^"]) - check("/has[a='predicate']/in/the/beginning", ["has[a='predicate']", "in", "the", "beginning"]) - check("/ha/s[a='predicate']/in/the/middle", ["ha", "s[a='predicate']", "in", "the", "middle"]) - check("/ha/s[a='predicate-in-the-end']", ["ha", "s[a='predicate-in-the-end']"]) - check("/it/has[more='than'][one='predicate']/somewhere", ["it", "has[more='than'][one='predicate']", "somewhere"]) - check("/ha/s[a='predicate\"with']/double-quotes/inside", ["ha", "s[a='predicate\"with']", "double-quotes", "inside"]) - check('/a/predicate[with="double"]/quotes', ["a", 'predicate[with="double"]', "quotes"]) - check('/multiple["predicate"][with="double"]/quotes', ['multiple["predicate"][with="double"]', "quotes"]) - check('/multiple["predicate"][with="double"]/quotes', ['multiple["predicate"][with="double"]', "quotes"]) - check('/ha/s[a="predicate\'with"]/single-quote/inside', ["ha", 's[a="predicate\'with"]', "single-quote", "inside"]) - # XPATH 1.0 does not support single-quote within single-quoted string. str literal can be '[^']*' - # Not validating no single-quote within single-quoted string - check("/a/mix['of''quotes\"does']/not/work/well", ["a", "mix['of''quotes\"does']", "not", "work", "well"]) - # XPATH 1.0 does not support double-quotes within double-quoted string. str literal can be "[^"]*" - # Not validating no double-quotes within double-quoted string - check('/a/mix["of""quotes\'does"]/not/work/well', ["a", 'mix["of""quotes\'does"]', "not", "work", "well"]) - - def test_create_xpath(self): - def check(tokens, xpath): - expected=xpath - actual=self.path_addressing.create_xpath(tokens) - self.assertEqual(expected, actual) - - check([], "/") - check(["token"], "/token") - check(["more", "than", "one", "token"], "/more/than/one/token") - check(["multi", "tokens", "with", "empty", "last", "token", ""], "/multi/tokens/with/empty/last/token/") - check(["has", "numbers", "0", "and", "symbols", "^"], "/has/numbers/0/and/symbols/^") - check(["has[a='predicate']", "in", "the", "beginning"], "/has[a='predicate']/in/the/beginning") - check(["ha", "s[a='predicate']", "in", "the", "middle"], "/ha/s[a='predicate']/in/the/middle") - check(["ha", "s[a='predicate-in-the-end']"], "/ha/s[a='predicate-in-the-end']") - check(["it", "has[more='than'][one='predicate']", "somewhere"], "/it/has[more='than'][one='predicate']/somewhere") - check(["ha", "s[a='predicate\"with']", "double-quotes", "inside"], "/ha/s[a='predicate\"with']/double-quotes/inside") - check(["a", 'predicate[with="double"]', "quotes"], '/a/predicate[with="double"]/quotes') - check(['multiple["predicate"][with="double"]', "quotes"], '/multiple["predicate"][with="double"]/quotes') - check(['multiple["predicate"][with="double"]', "quotes"], '/multiple["predicate"][with="double"]/quotes') - check(["ha", 's[a="predicate\'with"]', "single-quote", "inside"], '/ha/s[a="predicate\'with"]/single-quote/inside') - # XPATH 1.0 does not support single-quote within single-quoted string. str literal can be '[^']*' - # Not validating no single-quote within single-quoted string - check(["a", "mix['of''quotes\"does']", "not", "work", "well"], "/a/mix['of''quotes\"does']/not/work/well", ) - # XPATH 1.0 does not support double-quotes within double-quoted string. str literal can be "[^"]*" - # Not validating no double-quotes within double-quoted string - check(["a", 'mix["of""quotes\'does"]', "not", "work", "well"], '/a/mix["of""quotes\'does"]/not/work/well') - - def test_find_ref_paths__ref_is_the_whole_key__returns_ref_paths(self): - # Arrange - path = "/PORT/Ethernet0" - expected = [ - "/ACL_TABLE/NO-NSW-PACL-V4/ports/0", - "/VLAN_MEMBER/Vlan1000|Ethernet0", - ] - - # Act - actual = self.path_addressing.find_ref_paths(path, Files.CROPPED_CONFIG_DB_AS_JSON) - - # Assert - self.assertCountEqual(expected, actual) - - def test_find_ref_paths__ref_is_a_part_of_key__returns_ref_paths(self): - # Arrange - path = "/VLAN/Vlan1000" - expected = [ - "/VLAN_MEMBER/Vlan1000|Ethernet0", - "/VLAN_MEMBER/Vlan1000|Ethernet4", - "/VLAN_MEMBER/Vlan1000|Ethernet8", - ] - - # Act - actual = self.path_addressing.find_ref_paths(path, Files.CROPPED_CONFIG_DB_AS_JSON) - - # Assert - self.assertCountEqual(expected, actual) - - def test_find_ref_paths__ref_is_in_multilist__returns_ref_paths(self): - # Arrange - path = "/PORT/Ethernet8" - expected = [ - "/INTERFACE/Ethernet8", - "/INTERFACE/Ethernet8|10.0.0.1~130", - ] - - # Act - actual = self.path_addressing.find_ref_paths(path, Files.CONFIG_DB_WITH_INTERFACE) - - # Assert - self.assertCountEqual(expected, actual) - - def test_find_ref_paths__ref_is_in_leafref_union__returns_ref_paths(self): - # Arrange - path = "/PORTCHANNEL/PortChannel0001" - expected = [ - "/ACL_TABLE/NO-NSW-PACL-V4/ports/1", - ] - - # Act - actual = self.path_addressing.find_ref_paths(path, Files.CONFIG_DB_WITH_PORTCHANNEL_AND_ACL) - - # Assert - self.assertCountEqual(expected, actual) - - def test_find_ref_paths__path_is_table__returns_ref_paths(self): - # Arrange - path = "/PORT" - expected = [ - "/ACL_TABLE/DATAACL/ports/0", - "/ACL_TABLE/EVERFLOW/ports/0", - "/ACL_TABLE/EVERFLOWV6/ports/0", - "/ACL_TABLE/EVERFLOWV6/ports/1", - "/ACL_TABLE/NO-NSW-PACL-V4/ports/0", - "/VLAN_MEMBER/Vlan1000|Ethernet0", - "/VLAN_MEMBER/Vlan1000|Ethernet4", - "/VLAN_MEMBER/Vlan1000|Ethernet8", - ] - - # Act - actual = self.path_addressing.find_ref_paths(path, Files.CROPPED_CONFIG_DB_AS_JSON) - - # Assert - self.assertCountEqual(expected, actual) - - def test_find_ref_paths__whole_config_path__returns_all_refs(self): - # Arrange - path = "" - expected = [ - "/ACL_TABLE/DATAACL/ports/0", - "/ACL_TABLE/EVERFLOW/ports/0", - "/ACL_TABLE/EVERFLOWV6/ports/0", - "/ACL_TABLE/EVERFLOWV6/ports/1", - "/ACL_TABLE/NO-NSW-PACL-V4/ports/0", - "/VLAN_MEMBER/Vlan1000|Ethernet0", - "/VLAN_MEMBER/Vlan1000|Ethernet4", - "/VLAN_MEMBER/Vlan1000|Ethernet8", - ] - - # Act - actual = self.path_addressing.find_ref_paths(path, Files.CROPPED_CONFIG_DB_AS_JSON) - - # Assert - self.assertCountEqual(expected, actual) - - def test_convert_path_to_xpath(self): - def check(path, xpath, config=None): - if not config: - config = Files.CROPPED_CONFIG_DB_AS_JSON - - expected=xpath - actual=self.path_addressing.convert_path_to_xpath(path, config, self.sy_only_models) - self.assertEqual(expected, actual) - - check(path="", xpath="/") - check(path="/VLAN_MEMBER", xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER") - check(path="/VLAN/Vlan1000/dhcp_servers", - xpath="/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers") - check(path="/VLAN/Vlan1000/dhcp_servers/0", - xpath="/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers[.='192.0.0.1']") - check(path="/PORT/Ethernet0/lanes", xpath="/sonic-port:sonic-port/PORT/PORT_LIST[name='Ethernet0']/lanes") - check(path="/ACL_TABLE/NO-NSW-PACL-V4/ports/0", - xpath="/sonic-acl:sonic-acl/ACL_TABLE/ACL_TABLE_LIST[ACL_TABLE_NAME='NO-NSW-PACL-V4']/ports[.='Ethernet0']") - check(path="/ACL_TABLE/NO-NSW-PACL-V4/ports/0", - xpath="/sonic-acl:sonic-acl/ACL_TABLE/ACL_TABLE_LIST[ACL_TABLE_NAME='NO-NSW-PACL-V4']/ports[.='Ethernet0']") - check(path="/VLAN_MEMBER/Vlan1000|Ethernet8/tagging_mode", - xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode") - check(path="/VLAN_MEMBER/Vlan1000|Ethernet8", - xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']") - check(path="/DEVICE_METADATA/localhost/hwsku", - xpath="/sonic-device_metadata:sonic-device_metadata/DEVICE_METADATA/localhost/hwsku", - config=Files.CONFIG_DB_WITH_DEVICE_METADATA) - check(path="/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK/FLEX_COUNTER_STATUS", - xpath="/sonic-flex_counter:sonic-flex_counter/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK/FLEX_COUNTER_STATUS", - config=Files.CONTRAINER_WITH_CONTAINER_CONFIG_DB) - check(path="/ACL_RULE/SSH_ONLY|RULE1/L4_SRC_PORT", - xpath="/sonic-acl:sonic-acl/ACL_RULE/ACL_RULE_LIST[ACL_TABLE_NAME='SSH_ONLY'][RULE_NAME='RULE1']/L4_SRC_PORT", - config=Files.CONFIG_DB_CHOICE) - check(path="/INTERFACE/Ethernet8", - xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_LIST[name='Ethernet8']", - config=Files.CONFIG_DB_WITH_INTERFACE) - check(path="/INTERFACE/Ethernet8|10.0.0.1~130", - xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_IPPREFIX_LIST[name='Ethernet8'][ip-prefix='10.0.0.1/30']", - config=Files.CONFIG_DB_WITH_INTERFACE) - check(path="/INTERFACE/Ethernet8|10.0.0.1~130/scope", - xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_IPPREFIX_LIST[name='Ethernet8'][ip-prefix='10.0.0.1/30']/scope", - config=Files.CONFIG_DB_WITH_INTERFACE) - check(path="/PORTCHANNEL_INTERFACE", - xpath="/sonic-portchannel:sonic-portchannel/PORTCHANNEL_INTERFACE", - config=Files.CONFIG_DB_WITH_PORTCHANNEL_INTERFACE) - check(path="/PORTCHANNEL_INTERFACE/PortChannel0001|1.1.1.1~124", - xpath="/sonic-portchannel:sonic-portchannel/PORTCHANNEL_INTERFACE/PORTCHANNEL_INTERFACE_IPPREFIX_LIST[name='PortChannel0001'][ip_prefix='1.1.1.1/24']", - config=Files.CONFIG_DB_WITH_PORTCHANNEL_INTERFACE) - - def test_convert_xpath_to_path(self): - def check(xpath, path, config=None): - if not config: - config = Files.CROPPED_CONFIG_DB_AS_JSON - - expected=path - actual=self.path_addressing.convert_xpath_to_path(xpath, config, self.sy_only_models) - self.assertEqual(expected, actual) - - check(xpath="/",path="") - check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER", path="/VLAN_MEMBER") - check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST",path="/VLAN_MEMBER") - check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']", - path="/VLAN_MEMBER/Vlan1000|Ethernet8") - check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/name", - path="/VLAN_MEMBER/Vlan1000|Ethernet8") - check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/port", - path="/VLAN_MEMBER/Vlan1000|Ethernet8") - check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode", - path="/VLAN_MEMBER/Vlan1000|Ethernet8/tagging_mode") - check(xpath="/sonic-vlan:sonic-acl/ACL_RULE", path="/ACL_RULE") - check(xpath="/sonic-vlan:sonic-acl/ACL_RULE/ACL_RULE_LIST[ACL_TABLE_NAME='SSH_ONLY'][RULE_NAME='RULE1']", - path="/ACL_RULE/SSH_ONLY|RULE1", - config=Files.CONFIG_DB_CHOICE) - check(xpath="/sonic-acl:sonic-acl/ACL_RULE/ACL_RULE_LIST[ACL_TABLE_NAME='SSH_ONLY'][RULE_NAME='RULE1']/L4_SRC_PORT", - path="/ACL_RULE/SSH_ONLY|RULE1/L4_SRC_PORT", - config=Files.CONFIG_DB_CHOICE) - check(xpath="/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers", - path="/VLAN/Vlan1000/dhcp_servers") - check(xpath="/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers[.='192.0.0.1']", - path="/VLAN/Vlan1000/dhcp_servers/0") - check(xpath="/sonic-port:sonic-port/PORT/PORT_LIST[name='Ethernet0']/lanes", path="/PORT/Ethernet0/lanes") - check(xpath="/sonic-acl:sonic-acl/ACL_TABLE/ACL_TABLE_LIST[ACL_TABLE_NAME='NO-NSW-PACL-V4']/ports[.='Ethernet0']", - path="/ACL_TABLE/NO-NSW-PACL-V4/ports/0") - check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode", - path="/VLAN_MEMBER/Vlan1000|Ethernet8/tagging_mode") - check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']", - path="/VLAN_MEMBER/Vlan1000|Ethernet8") - check(xpath="/sonic-device_metadata:sonic-device_metadata/DEVICE_METADATA/localhost/hwsku", - path="/DEVICE_METADATA/localhost/hwsku", - config=Files.CONFIG_DB_WITH_DEVICE_METADATA) - check(xpath="/sonic-flex_counter:sonic-flex_counter/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK", - path="/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK", - config=Files.CONTRAINER_WITH_CONTAINER_CONFIG_DB) - check(xpath="/sonic-flex_counter:sonic-flex_counter/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK/FLEX_COUNTER_STATUS", - path="/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK/FLEX_COUNTER_STATUS", - config=Files.CONTRAINER_WITH_CONTAINER_CONFIG_DB) - check(xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_LIST[name='Ethernet8']", - path="/INTERFACE/Ethernet8", - config=Files.CONFIG_DB_WITH_INTERFACE) - check(xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_IPPREFIX_LIST[name='Ethernet8'][ip-prefix='10.0.0.1/30']", - path="/INTERFACE/Ethernet8|10.0.0.1~130", - config=Files.CONFIG_DB_WITH_INTERFACE) - check(xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_IPPREFIX_LIST[name='Ethernet8'][ip-prefix='10.0.0.1/30']/scope", - path="/INTERFACE/Ethernet8|10.0.0.1~130/scope", - config=Files.CONFIG_DB_WITH_INTERFACE) - check(xpath="/sonic-portchannel:sonic-portchannel/PORTCHANNEL_INTERFACE", - path="/PORTCHANNEL_INTERFACE", - config=Files.CONFIG_DB_WITH_PORTCHANNEL_INTERFACE) - check(xpath="/sonic-portchannel:sonic-portchannel/PORTCHANNEL_INTERFACE/PORTCHANNEL_INTERFACE_IPPREFIX_LIST[name='PortChannel0001'][ip_prefix='1.1.1.1/24']", - path="/PORTCHANNEL_INTERFACE/PortChannel0001|1.1.1.1~124", - config=Files.CONFIG_DB_WITH_PORTCHANNEL_INTERFACE) - diff --git a/tests/generic_config_updater/patch_sorter_test.py b/tests/generic_config_updater/patch_sorter_test.py deleted file mode 100644 index 4da9fb901b..0000000000 --- a/tests/generic_config_updater/patch_sorter_test.py +++ /dev/null @@ -1,1730 +0,0 @@ -import jsonpatch -import unittest -from unittest.mock import MagicMock, Mock - -import generic_config_updater.patch_sorter as ps -from .gutest_helpers import Files, create_side_effect_dict -from generic_config_updater.gu_common import ConfigWrapper, PatchWrapper, OperationWrapper, \ - GenericConfigUpdaterError, OperationType, JsonChange, PathAddressing - -class TestDiff(unittest.TestCase): - def test_apply_move__updates_current_config(self): - # Arrange - diff = ps.Diff(current_config=Files.CROPPED_CONFIG_DB_AS_JSON, target_config=Files.ANY_CONFIG_DB) - move = ps.JsonMove.from_patch(Files.SINGLE_OPERATION_CONFIG_DB_PATCH) - - expected = ps.Diff(current_config=Files.CONFIG_DB_AFTER_SINGLE_OPERATION, target_config=Files.ANY_CONFIG_DB) - - # Act - actual = diff.apply_move(move) - - # Assert - self.assertEqual(expected.current_config, actual.current_config) - self.assertEqual(expected.target_config, actual.target_config) - - def test_has_no_diff__diff_exists__returns_false(self): - # Arrange - diff = ps.Diff(current_config=Files.CROPPED_CONFIG_DB_AS_JSON, - target_config=Files.CONFIG_DB_AFTER_SINGLE_OPERATION) - - # Act and Assert - self.assertFalse(diff.has_no_diff()) - - def test_has_no_diff__no_diff__returns_true(self): - # Arrange - diff = ps.Diff(current_config=Files.CROPPED_CONFIG_DB_AS_JSON, - target_config=Files.CROPPED_CONFIG_DB_AS_JSON) - - # Act and Assert - self.assertTrue(diff.has_no_diff()) - - def test_hash__different_current_config__different_hashes(self): - # Arrange - diff1 = ps.Diff(current_config=Files.CROPPED_CONFIG_DB_AS_JSON, target_config=Files.ANY_CONFIG_DB) - diff2 = ps.Diff(current_config=Files.CROPPED_CONFIG_DB_AS_JSON, target_config=Files.ANY_CONFIG_DB) - diff3 = ps.Diff(current_config=Files.CONFIG_DB_AFTER_SINGLE_OPERATION, target_config=Files.ANY_CONFIG_DB) - - # Act - hash1 = hash(diff1) - hash2 = hash(diff2) - hash3 = hash(diff3) - - # Assert - self.assertEqual(hash1, hash2) # same current config - self.assertNotEqual(hash1, hash3) - - def test_hash__different_target_config__different_hashes(self): - # Arrange - diff1 = ps.Diff(current_config=Files.ANY_CONFIG_DB, target_config=Files.CROPPED_CONFIG_DB_AS_JSON) - diff2 = ps.Diff(current_config=Files.ANY_CONFIG_DB, target_config=Files.CROPPED_CONFIG_DB_AS_JSON) - diff3 = ps.Diff(current_config=Files.ANY_CONFIG_DB, target_config=Files.CONFIG_DB_AFTER_SINGLE_OPERATION) - - # Act - hash1 = hash(diff1) - hash2 = hash(diff2) - hash3 = hash(diff3) - - # Assert - self.assertEqual(hash1, hash2) # same target config - self.assertNotEqual(hash1, hash3) - - def test_hash__swapped_current_and_target_configs__different_hashes(self): - # Arrange - diff1 = ps.Diff(current_config=Files.ANY_CONFIG_DB, target_config=Files.ANY_OTHER_CONFIG_DB) - diff2 = ps.Diff(current_config=Files.ANY_OTHER_CONFIG_DB, target_config=Files.ANY_CONFIG_DB) - - # Act - hash1 = hash(diff1) - hash2 = hash(diff2) - - # Assert - self.assertNotEqual(hash1, hash2) - - def test_eq__different_current_config__returns_false(self): - # Arrange - diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_CONFIG_DB) - other_diff = ps.Diff(Files.ANY_OTHER_CONFIG_DB, Files.ANY_CONFIG_DB) - - # Act and assert - self.assertNotEqual(diff, other_diff) - self.assertFalse(diff == other_diff) - - def test_eq__different_target_config__returns_false(self): - # Arrange - diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_CONFIG_DB) - other_diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_OTHER_CONFIG_DB) - - # Act and assert - self.assertNotEqual(diff, other_diff) - self.assertFalse(diff == other_diff) - - def test_eq__different_target_config__returns_true(self): - # Arrange - diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_CONFIG_DB) - other_diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_CONFIG_DB) - - # Act and assert - self.assertEqual(diff, other_diff) - self.assertTrue(diff == other_diff) - -class TestJsonMove(unittest.TestCase): - def setUp(self): - self.operation_wrapper = OperationWrapper() - self.any_op_type = OperationType.REPLACE - self.any_tokens = ["table1", "key11"] - self.any_path = "/table1/key11" - self.any_config = { - "table1": { - "key11": "value11" - } - } - self.any_value = "value11" - self.any_operation = self.operation_wrapper.create(self.any_op_type, self.any_path, self.any_value) - self.any_diff = ps.Diff(self.any_config, self.any_config) - - def test_ctor__delete_op_whole_config__none_value_and_empty_path(self): - # Arrange - path = "" - diff = ps.Diff(current_config={}, target_config=self.any_config) - - # Act - jsonmove = ps.JsonMove(diff, OperationType.REMOVE, []) - - # Assert - self.verify_jsonmove(self.operation_wrapper.create(OperationType.REMOVE, path), - OperationType.REMOVE, - [], - None, - jsonmove) - def test_ctor__remove_op__operation_created_directly(self): - # Arrange and Act - jsonmove = ps.JsonMove(self.any_diff, OperationType.REMOVE, self.any_tokens) - - # Assert - self.verify_jsonmove(self.operation_wrapper.create(OperationType.REMOVE, self.any_path), - OperationType.REMOVE, - self.any_tokens, - None, - jsonmove) - - def test_ctor__replace_op_whole_config__whole_config_value_and_empty_path(self): - # Arrange - path = "" - diff = ps.Diff(current_config={}, target_config=self.any_config) - - # Act - jsonmove = ps.JsonMove(diff, OperationType.REPLACE, [], []) - - # Assert - self.verify_jsonmove(self.operation_wrapper.create(OperationType.REPLACE, path, self.any_config), - OperationType.REPLACE, - [], - [], - jsonmove) - - def test_ctor__replace_op__operation_created_directly(self): - # Arrange and Act - jsonmove = ps.JsonMove(self.any_diff, OperationType.REPLACE, self.any_tokens, self.any_tokens) - - # Assert - self.verify_jsonmove(self.operation_wrapper.create(OperationType.REPLACE, self.any_path, self.any_value), - OperationType.REPLACE, - self.any_tokens, - self.any_tokens, - jsonmove) - - def test_ctor__add_op_whole_config__whole_config_value_and_empty_path(self): - # Arrange - path = "" - diff = ps.Diff(current_config={}, target_config=self.any_config) - - # Act - jsonmove = ps.JsonMove(diff, OperationType.ADD, [], []) - - # Assert - self.verify_jsonmove(self.operation_wrapper.create(OperationType.ADD, path, self.any_config), - OperationType.ADD, - [], - [], - jsonmove) - - def test_ctor__add_op_path_exist__same_value_and_path(self): - # Arrange and Act - jsonmove = ps.JsonMove(self.any_diff, OperationType.ADD, self.any_tokens, self.any_tokens) - - # Assert - self.verify_jsonmove(self.operation_wrapper.create(OperationType.ADD, self.any_path, self.any_value), - OperationType.ADD, - self.any_tokens, - self.any_tokens, - jsonmove) - - def test_ctor__add_op_path_exist_include_list__same_value_and_path(self): - # Arrange - current_config = { - "table1": { - "list1": ["value11", "value13"] - } - } - target_config = { - "table1": { - "list1": ["value11", "value12", "value13", "value14"] - } - } - diff = ps.Diff(current_config, target_config) - op_type = OperationType.ADD - current_config_tokens = ["table1", "list1", 1] # Index is 1 which does not exist in target - target_config_tokens = ["table1", "list1", 1] - expected_jsonpatch_path = "/table1/list1/1" - expected_jsonpatch_value = "value12" - # NOTE: the target config can contain more diff than the given move. - - # Act - jsonmove = ps.JsonMove(diff, op_type, current_config_tokens, target_config_tokens) - - # Assert - self.verify_jsonmove(self.operation_wrapper.create(op_type, expected_jsonpatch_path, expected_jsonpatch_value), - op_type, - current_config_tokens, - target_config_tokens, - jsonmove) - - def test_ctor__add_op_path_exist_list_index_doesnot_exist_in_target___same_value_and_path(self): - # Arrange - current_config = { - "table1": { - "list1": ["value11"] - } - } - target_config = { - "table1": { - "list1": ["value12"] - } - } - diff = ps.Diff(current_config, target_config) - op_type = OperationType.ADD - current_config_tokens = ["table1", "list1", 1] # Index is 1 which does not exist in target - target_config_tokens = ["table1", "list1", 0] - expected_jsonpatch_path = "/table1/list1/1" - expected_jsonpatch_value = "value12" - # NOTE: the target config can contain more diff than the given move. - - # Act - jsonmove = ps.JsonMove(diff, op_type, current_config_tokens, target_config_tokens) - - # Assert - self.verify_jsonmove(self.operation_wrapper.create(op_type, expected_jsonpatch_path, expected_jsonpatch_value), - op_type, - current_config_tokens, - target_config_tokens, - jsonmove) - - def test_ctor__add_op_path_doesnot_exist__value_and_path_of_parent(self): - # Arrange - current_config = { - } - target_config = { - "table1": { - "key11": { - "key111": "value111" - } - } - } - diff = ps.Diff(current_config, target_config) - op_type = OperationType.ADD - current_config_tokens = ["table1", "key11", "key111"] - target_config_tokens = ["table1", "key11", "key111"] - expected_jsonpatch_path = "/table1" - expected_jsonpatch_value = { - "key11": { - "key111": "value111" - } - } - # NOTE: the target config can contain more diff than the given move. - - # Act - jsonmove = ps.JsonMove(diff, op_type, current_config_tokens, target_config_tokens) - - # Assert - self.verify_jsonmove(self.operation_wrapper.create(op_type, expected_jsonpatch_path, expected_jsonpatch_value), - op_type, - current_config_tokens, - target_config_tokens, - jsonmove) - - def test_ctor__add_op_path_doesnot_exist_include_list__value_and_path_of_parent(self): - # Arrange - current_config = { - } - target_config = { - "table1": { - "list1": ["value11", "value12", "value13", "value14"] - } - } - diff = ps.Diff(current_config, target_config) - op_type = OperationType.ADD - current_config_tokens = ["table1", "list1", 0] - target_config_tokens = ["table1", "list1", 1] - expected_jsonpatch_path = "/table1" - expected_jsonpatch_value = { - "list1": ["value12"] - } - # NOTE: the target config can contain more diff than the given move. - - # Act - jsonmove = ps.JsonMove(diff, op_type, current_config_tokens, target_config_tokens) - - # Assert - self.verify_jsonmove(self.operation_wrapper.create(op_type, expected_jsonpatch_path, expected_jsonpatch_value), - op_type, - current_config_tokens, - target_config_tokens, - jsonmove) - - def test_from_patch__more_than_1_op__failure(self): - # Arrange - patch = jsonpatch.JsonPatch([self.any_operation, self.any_operation]) - - # Act and Assert - self.assertRaises(GenericConfigUpdaterError, ps.JsonMove.from_patch, patch) - - def test_from_patch__delete_op__delete_jsonmove(self): - # Arrange - operation = self.operation_wrapper.create(OperationType.REMOVE, self.any_path) - patch = jsonpatch.JsonPatch([operation]) - - # Act - jsonmove = ps.JsonMove.from_patch(patch) - - # Assert - self.verify_jsonmove(operation, - OperationType.REMOVE, - self.any_tokens, - None, - jsonmove) - - def test_from_patch__replace_op__replace_jsonmove(self): - # Arrange - operation = self.operation_wrapper.create(OperationType.REPLACE, self.any_path, self.any_value) - patch = jsonpatch.JsonPatch([operation]) - - # Act - jsonmove = ps.JsonMove.from_patch(patch) - - # Assert - self.verify_jsonmove(operation, - OperationType.REPLACE, - self.any_tokens, - self.any_tokens, - jsonmove) - - def test_from_patch__add_op__add_jsonmove(self): - # Arrange - operation = self.operation_wrapper.create(OperationType.ADD, self.any_path, self.any_value) - patch = jsonpatch.JsonPatch([operation]) - - # Act - jsonmove = ps.JsonMove.from_patch(patch) - - # Assert - self.verify_jsonmove(operation, - OperationType.ADD, - self.any_tokens, - self.any_tokens, - jsonmove) - - def test_from_patch__add_op_with_list_indexes__add_jsonmove(self): - # Arrange - path = "/table1/key11/list1111/3" - value = "value11111" - # From a JsonPatch it is not possible to figure out if the '3' is an item in a list or a dictionary, - # will assume by default a dictionary for simplicity. - tokens = ["table1", "key11", "list1111", "3"] - operation = self.operation_wrapper.create(OperationType.ADD, path, value) - patch = jsonpatch.JsonPatch([operation]) - - # Act - jsonmove = ps.JsonMove.from_patch(patch) - - # Assert - self.verify_jsonmove(operation, - OperationType.ADD, - tokens, - tokens, - jsonmove) - - def test_from_patch__replace_whole_config__whole_config_jsonmove(self): - # Arrange - tokens = [] - path = "" - value = {"table1": {"key1": "value1"} } - operation = self.operation_wrapper.create(OperationType.REPLACE, path, value) - patch = jsonpatch.JsonPatch([operation]) - - # Act - jsonmove = ps.JsonMove.from_patch(patch) - - # Assert - self.verify_jsonmove(operation, - OperationType.REPLACE, - tokens, - tokens, - jsonmove) - - def verify_jsonmove(self, - expected_operation, - expected_op_type, - expected_current_config_tokens, - expected_target_config_tokens, - jsonmove): - expected_patch = jsonpatch.JsonPatch([expected_operation]) - self.assertEqual(expected_patch, jsonmove.patch) - self.assertEqual(expected_op_type, jsonmove.op_type) - self.assertListEqual(expected_current_config_tokens, jsonmove.current_config_tokens) - self.assertEqual(expected_target_config_tokens, jsonmove.target_config_tokens) - -class TestMoveWrapper(unittest.TestCase): - def setUp(self): - self.any_current_config = {} - self.any_target_config = {} - self.any_diff = ps.Diff(self.any_current_config, self.any_target_config) - self.any_move = Mock() - self.any_other_move1 = Mock() - self.any_other_move2 = Mock() - self.any_extended_move = Mock() - self.any_other_extended_move1 = Mock() - self.any_other_extended_move2 = Mock() - - self.single_move_generator = Mock() - self.single_move_generator.generate.side_effect = \ - create_side_effect_dict({(str(self.any_diff),): [self.any_move]}) - - self.another_single_move_generator = Mock() - self.another_single_move_generator.generate.side_effect = \ - create_side_effect_dict({(str(self.any_diff),): [self.any_other_move1]}) - - self.multiple_move_generator = Mock() - self.multiple_move_generator.generate.side_effect = create_side_effect_dict( - {(str(self.any_diff),): [self.any_move, self.any_other_move1, self.any_other_move2]}) - - self.single_move_extender = Mock() - self.single_move_extender.extend.side_effect = create_side_effect_dict( - { - (str(self.any_move), str(self.any_diff)): [self.any_extended_move], - (str(self.any_extended_move), str(self.any_diff)): [], # As first extended move will be extended - (str(self.any_other_extended_move1), str(self.any_diff)): [] # Needed when mixed with other extenders - }) - - self.another_single_move_extender = Mock() - self.another_single_move_extender.extend.side_effect = create_side_effect_dict( - { - (str(self.any_move), str(self.any_diff)): [self.any_other_extended_move1], - (str(self.any_other_extended_move1), str(self.any_diff)): [], # As first extended move will be extended - (str(self.any_extended_move), str(self.any_diff)): [] # Needed when mixed with other extenders - }) - - self.multiple_move_extender = Mock() - self.multiple_move_extender.extend.side_effect = create_side_effect_dict( - { - (str(self.any_move), str(self.any_diff)): \ - [self.any_extended_move, self.any_other_extended_move1, self.any_other_extended_move2], - # All extended moves will be extended - (str(self.any_extended_move), str(self.any_diff)): [], - (str(self.any_other_extended_move1), str(self.any_diff)): [], - (str(self.any_other_extended_move2), str(self.any_diff)): [], - }) - - self.mixed_move_extender = Mock() - self.mixed_move_extender.extend.side_effect = create_side_effect_dict( - { - (str(self.any_move), str(self.any_diff)): [self.any_extended_move], - (str(self.any_other_move1), str(self.any_diff)): [self.any_other_extended_move1], - (str(self.any_extended_move), str(self.any_diff)): \ - [self.any_other_extended_move1, self.any_other_extended_move2], - # All extended moves will be extended - (str(self.any_other_extended_move1), str(self.any_diff)): [], - (str(self.any_other_extended_move2), str(self.any_diff)): [], - }) - - self.fail_move_validator = Mock() - self.fail_move_validator.validate.side_effect = create_side_effect_dict( - {(str(self.any_move), str(self.any_diff)): False}) - - self.success_move_validator = Mock() - self.success_move_validator.validate.side_effect = create_side_effect_dict( - {(str(self.any_move), str(self.any_diff)): True}) - - def test_ctor__assigns_values_correctly(self): - # Arrange - move_generators = Mock() - move_extenders = Mock() - move_validators = Mock() - - # Act - move_wrapper = ps.MoveWrapper(move_generators, move_extenders, move_validators) - - # Assert - self.assertIs(move_generators, move_wrapper.move_generators) - self.assertIs(move_extenders, move_wrapper.move_extenders) - self.assertIs(move_validators, move_wrapper.move_validators) - - def test_generate__single_move_generator__single_move_returned(self): - # Arrange - move_generators = [self.single_move_generator] - move_wrapper = ps.MoveWrapper(move_generators, [], []) - expected = [self.any_move] - - # Act - actual = list(move_wrapper.generate(self.any_diff)) - - # Assert - self.assertListEqual(expected, actual) - - def test_generate__multiple_move_generator__multiple_move_returned(self): - # Arrange - move_generators = [self.multiple_move_generator] - move_wrapper = ps.MoveWrapper(move_generators, [], []) - expected = [self.any_move, self.any_other_move1, self.any_other_move2] - - # Act - actual = list(move_wrapper.generate(self.any_diff)) - - # Assert - self.assertListEqual(expected, actual) - - def test_generate__different_move_generators__different_moves_returned(self): - # Arrange - move_generators = [self.single_move_generator, self.another_single_move_generator] - move_wrapper = ps.MoveWrapper(move_generators, [], []) - expected = [self.any_move, self.any_other_move1] - - # Act - actual = list(move_wrapper.generate(self.any_diff)) - - # Assert - self.assertListEqual(expected, actual) - - def test_generate__duplicate_generated_moves__unique_moves_returned(self): - # Arrange - move_generators = [self.single_move_generator, self.single_move_generator] - move_wrapper = ps.MoveWrapper(move_generators, [], []) - expected = [self.any_move] - - # Act - actual = list(move_wrapper.generate(self.any_diff)) - - # Assert - self.assertListEqual(expected, actual) - - def test_generate__single_move_extender__one_extended_move_returned(self): - # Arrange - move_generators = [self.single_move_generator] - move_extenders = [self.single_move_extender] - move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) - expected = [self.any_move, self.any_extended_move] - - # Act - actual = list(move_wrapper.generate(self.any_diff)) - - # Assert - self.assertListEqual(expected, actual) - - def test_generate__multiple_move_extender__multiple_extended_move_returned(self): - # Arrange - move_generators = [self.single_move_generator] - move_extenders = [self.multiple_move_extender] - move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) - expected = [self.any_move, self.any_extended_move, self.any_other_extended_move1, self.any_other_extended_move2] - - # Act - actual = list(move_wrapper.generate(self.any_diff)) - - # Assert - self.assertListEqual(expected, actual) - - def test_generate__different_move_extenders__different_extended_moves_returned(self): - # Arrange - move_generators = [self.single_move_generator] - move_extenders = [self.single_move_extender, self.another_single_move_extender] - move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) - expected = [self.any_move, self.any_extended_move, self.any_other_extended_move1] - - # Act - actual = list(move_wrapper.generate(self.any_diff)) - - # Assert - self.assertListEqual(expected, actual) - - def test_generate__duplicate_extended_moves__unique_moves_returned(self): - # Arrange - move_generators = [self.single_move_generator] - move_extenders = [self.single_move_extender, self.single_move_extender] - move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) - expected = [self.any_move, self.any_extended_move] - - # Act - actual = list(move_wrapper.generate(self.any_diff)) - - # Assert - self.assertListEqual(expected, actual) - - def test_generate__mixed_extended_moves__unique_moves_returned(self): - # Arrange - move_generators = [self.single_move_generator, self.another_single_move_generator] - move_extenders = [self.mixed_move_extender] - move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) - expected = [self.any_move, - self.any_other_move1, - self.any_extended_move, - self.any_other_extended_move1, - self.any_other_extended_move2] - - # Act - actual = list(move_wrapper.generate(self.any_diff)) - - # Assert - self.assertListEqual(expected, actual) - - def test_validate__validation_fail__false_returned(self): - # Arrange - move_validators = [self.fail_move_validator] - move_wrapper = ps.MoveWrapper([], [], move_validators) - - # Act and assert - self.assertFalse(move_wrapper.validate(self.any_move, self.any_diff)) - - def test_validate__validation_succeed__true_returned(self): - # Arrange - move_validators = [self.success_move_validator] - move_wrapper = ps.MoveWrapper([], [], move_validators) - - # Act and assert - self.assertTrue(move_wrapper.validate(self.any_move, self.any_diff)) - - def test_validate__multiple_validators_last_fail___false_returned(self): - # Arrange - move_validators = [self.success_move_validator, self.success_move_validator, self.fail_move_validator] - move_wrapper = ps.MoveWrapper([], [], move_validators) - - # Act and assert - self.assertFalse(move_wrapper.validate(self.any_move, self.any_diff)) - - def test_validate__multiple_validators_succeed___true_returned(self): - # Arrange - move_validators = [self.success_move_validator, self.success_move_validator, self.success_move_validator] - move_wrapper = ps.MoveWrapper([], [], move_validators) - - # Act and assert - self.assertTrue(move_wrapper.validate(self.any_move, self.any_diff)) - - def test_simulate__applies_move(self): - # Arrange - diff = Mock() - diff.apply_move.side_effect = create_side_effect_dict({(str(self.any_move), ): self.any_diff}) - move_wrapper = ps.MoveWrapper(None, None, None) - - # Act - actual = move_wrapper.simulate(self.any_move, diff) - - # Assert - self.assertIs(self.any_diff, actual) - -class TestDeleteWholeConfigMoveValidator(unittest.TestCase): - def setUp(self): - self.operation_wrapper = OperationWrapper() - self.validator = ps.DeleteWholeConfigMoveValidator() - self.any_diff = Mock() - self.any_non_whole_config_path = "/table1" - self.whole_config_path = "" - - def test_validate__non_remove_op_non_whole_config__success(self): - self.verify(OperationType.REPLACE, self.any_non_whole_config_path, True) - self.verify(OperationType.ADD, self.any_non_whole_config_path, True) - - def test_validate__remove_op_non_whole_config__success(self): - self.verify(OperationType.REMOVE, self.any_non_whole_config_path, True) - - def test_validate__non_remove_op_whole_config__success(self): - self.verify(OperationType.REPLACE, self.whole_config_path, True) - self.verify(OperationType.ADD, self.whole_config_path, True) - - def test_validate__remove_op_whole_config__failure(self): - self.verify(OperationType.REMOVE, self.whole_config_path, False) - - def verify(self, operation_type, path, expected): - # Arrange - value = None - if operation_type in [OperationType.ADD, OperationType.REPLACE]: - value = Mock() - - operation = self.operation_wrapper.create(operation_type, path, value) - move = ps.JsonMove.from_operation(operation) - - # Act - actual = self.validator.validate(move, self.any_diff) - - # Assert - self.assertEqual(expected, actual) - -class TestUniqueLanesMoveValidator(unittest.TestCase): - def setUp(self): - self.validator = ps.UniqueLanesMoveValidator() - - def test_validate__no_port_table__success(self): - config = {"ACL_TABLE": {}} - self.validate_target_config(config) - - def test_validate__empty_port_table__success(self): - config = {"PORT": {}} - self.validate_target_config(config) - - def test_validate__single_lane__success(self): - config = {"PORT": {"Ethernet0": {"lanes": "66", "speed":"10000"}}} - self.validate_target_config(config) - - def test_validate__different_lanes_single_port___success(self): - config = {"PORT": {"Ethernet0": {"lanes": "66, 67, 68", "speed":"10000"}}} - self.validate_target_config(config) - - def test_validate__different_lanes_multi_ports___success(self): - config = {"PORT": { - "Ethernet0": {"lanes": "64, 65", "speed":"10000"}, - "Ethernet1": {"lanes": "66, 67, 68", "speed":"10000"}, - }} - self.validate_target_config(config) - - def test_validate__same_lanes_single_port___success(self): - config = {"PORT": {"Ethernet0": {"lanes": "65, 65", "speed":"10000"}}} - self.validate_target_config(config, False) - - def validate_target_config(self, target_config, expected=True): - # Arrange - current_config = {} - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) - - # Act - actual = self.validator.validate(move, diff) - - # Assert - self.assertEqual(expected, actual) - -class TestFullConfigMoveValidator(unittest.TestCase): - def setUp(self): - self.any_current_config = Mock() - self.any_target_config = Mock() - self.any_simulated_config = Mock() - self.any_diff = ps.Diff(self.any_current_config, self.any_target_config) - self.any_move = Mock() - self.any_move.apply.side_effect = \ - create_side_effect_dict({(str(self.any_current_config),): self.any_simulated_config}) - - def test_validate__invalid_config_db_after_applying_move__failure(self): - # Arrange - config_wrapper = Mock() - config_wrapper.validate_config_db_config.side_effect = \ - create_side_effect_dict({(str(self.any_simulated_config),): False}) - validator = ps.FullConfigMoveValidator(config_wrapper) - - # Act and assert - self.assertFalse(validator.validate(self.any_move, self.any_diff)) - - def test_validate__valid_config_db_after_applying_move__success(self): - # Arrange - config_wrapper = Mock() - config_wrapper.validate_config_db_config.side_effect = \ - create_side_effect_dict({(str(self.any_simulated_config),): True}) - validator = ps.FullConfigMoveValidator(config_wrapper) - - # Act and assert - self.assertTrue(validator.validate(self.any_move, self.any_diff)) - -class TestCreateOnlyMoveValidator(unittest.TestCase): - def setUp(self): - self.validator = ps.CreateOnlyMoveValidator(ps.PathAddressing()) - self.any_diff = ps.Diff({}, {}) - - def test_validate__non_replace_operation__success(self): - # Assert - self.assertTrue(self.validator.validate( \ - ps.JsonMove(self.any_diff, OperationType.ADD, [], []), self.any_diff)) - self.assertTrue(self.validator.validate( \ - ps.JsonMove(self.any_diff, OperationType.REMOVE, [], []), self.any_diff)) - - def test_validate__no_create_only_field__success(self): - current_config = {"PORT": {}} - target_config = {"PORT": {}, "ACL_TABLE": {}} - self.verify_diff(current_config, target_config) - - def test_validate__same_create_only_field__success(self): - current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} - target_config = {"PORT": {"Ethernet0":{"lanes":"65"}}, "ACL_TABLE": {}} - self.verify_diff(current_config, target_config) - - def test_validate__different_create_only_field__failure(self): - current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} - target_config = {"PORT": {"Ethernet0":{"lanes":"66"}}, "ACL_TABLE": {}} - self.verify_diff(current_config, target_config, expected=False) - - def test_validate__different_create_only_field_directly_updated__failure(self): - current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} - target_config = {"PORT": {"Ethernet0":{"lanes":"66"}}, "ACL_TABLE": {}} - self.verify_diff(current_config, - target_config, - ["PORT", "Ethernet0", "lanes"], - ["PORT", "Ethernet0", "lanes"], - False) - - def test_validate__different_create_only_field_updating_parent__failure(self): - current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} - target_config = {"PORT": {"Ethernet0":{"lanes":"66"}}, "ACL_TABLE": {}} - self.verify_diff(current_config, - target_config, - ["PORT", "Ethernet0"], - ["PORT", "Ethernet0"], - False) - - def test_validate__different_create_only_field_updating_grandparent__failure(self): - current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} - target_config = {"PORT": {"Ethernet0":{"lanes":"66"}}, "ACL_TABLE": {}} - self.verify_diff(current_config, - target_config, - ["PORT"], - ["PORT"], - False) - - def test_validate__same_create_only_field_directly_updated__failure(self): - current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} - target_config = {"PORT": {"Ethernet0":{"lanes":"65"}}, "ACL_TABLE": {}} - self.verify_diff(current_config, - target_config, - ["PORT", "Ethernet0", "lanes"], - ["PORT", "Ethernet0", "lanes"]) - - def test_validate__same_create_only_field_updating_parent__failure(self): - current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} - target_config = {"PORT": {"Ethernet0":{"lanes":"65"}}, "ACL_TABLE": {}} - self.verify_diff(current_config, - target_config, - ["PORT", "Ethernet0"], - ["PORT", "Ethernet0"]) - - def test_validate__same_create_only_field_updating_grandparent__failure(self): - current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} - target_config = {"PORT": {"Ethernet0":{"lanes":"65"}}, "ACL_TABLE": {}} - self.verify_diff(current_config, - target_config, - ["PORT"], - ["PORT"]) - - def verify_diff(self, current_config, target_config, current_config_tokens=None, target_config_tokens=None, expected=True): - # Arrange - current_config_tokens = current_config_tokens if current_config_tokens else [] - target_config_tokens = target_config_tokens if target_config_tokens else [] - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, current_config_tokens, target_config_tokens) - - # Act - actual = self.validator.validate(move, diff) - - # Assert - self.assertEqual(expected, actual) - -class TestNoDependencyMoveValidator(unittest.TestCase): - def setUp(self): - path_addressing = ps.PathAddressing() - config_wrapper = ConfigWrapper() - self.validator = ps.NoDependencyMoveValidator(path_addressing, config_wrapper) - - def test_validate__add_full_config_has_dependencies__failure(self): - # Arrange - # CROPPED_CONFIG_DB_AS_JSON has dependencies between PORT and ACL_TABLE - diff = ps.Diff(Files.EMPTY_CONFIG_DB, Files.CROPPED_CONFIG_DB_AS_JSON) - move = ps.JsonMove(diff, OperationType.ADD, [], []) - - # Act and assert - self.assertFalse(self.validator.validate(move, diff)) - - def test_validate__add_full_config_no_dependencies__success(self): - # Arrange - diff = ps.Diff(Files.EMPTY_CONFIG_DB, Files.CONFIG_DB_NO_DEPENDENCIES) - move = ps.JsonMove(diff, OperationType.ADD, [], []) - - # Act and assert - self.assertTrue(self.validator.validate(move, diff)) - - def test_validate__add_table_has_no_dependencies__success(self): - # Arrange - target_config = Files.CROPPED_CONFIG_DB_AS_JSON - # prepare current config by removing ACL_TABLE from current config - current_config = self.prepare_config(target_config, jsonpatch.JsonPatch([ - {"op": "remove", "path":"/ACL_TABLE"} - ])) - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.ADD, ["ACL_TABLE"], ["ACL_TABLE"]) - - # Act and assert - self.assertTrue(self.validator.validate(move, diff)) - - def test_validate__remove_full_config_has_dependencies__failure(self): - # Arrange - # CROPPED_CONFIG_DB_AS_JSON has dependencies between PORT and ACL_TABLE - diff = ps.Diff(Files.CROPPED_CONFIG_DB_AS_JSON, Files.EMPTY_CONFIG_DB) - move = ps.JsonMove(diff, OperationType.REMOVE, [], []) - - # Act and assert - self.assertFalse(self.validator.validate(move, diff)) - - def test_validate__remove_full_config_no_dependencies__success(self): - # Arrange - diff = ps.Diff(Files.EMPTY_CONFIG_DB, Files.CONFIG_DB_NO_DEPENDENCIES) - move = ps.JsonMove(diff, OperationType.REMOVE, [], []) - - # Act and assert - self.assertTrue(self.validator.validate(move, diff)) - - def test_validate__remove_table_has_no_dependencies__success(self): - # Arrange - current_config = Files.CROPPED_CONFIG_DB_AS_JSON - target_config = self.prepare_config(current_config, jsonpatch.JsonPatch([ - {"op": "remove", "path":"/ACL_TABLE"} - ])) - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REMOVE, ["ACL_TABLE"]) - - # Act and assert - self.assertTrue(self.validator.validate(move, diff)) - - def test_validate__replace_whole_config_item_added_ref_added__failure(self): - # Arrange - target_config = Files.SIMPLE_CONFIG_DB_INC_DEPS - # prepare current config by removing an item and its ref from target config - current_config = self.prepare_config(target_config, jsonpatch.JsonPatch([ - {"op": "replace", "path":"/ACL_TABLE/EVERFLOW/ports/0", "value":""}, - {"op": "remove", "path":"/PORT/Ethernet0"} - ])) - - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) - - # Act and assert - self.assertFalse(self.validator.validate(move, diff)) - - def test_validate__replace_whole_config_item_removed_ref_removed__false(self): - # Arrange - current_config = Files.SIMPLE_CONFIG_DB_INC_DEPS - # prepare target config by removing an item and its ref from current config - target_config = self.prepare_config(current_config, jsonpatch.JsonPatch([ - {"op": "replace", "path":"/ACL_TABLE/EVERFLOW/ports/0", "value":""}, - {"op": "remove", "path":"/PORT/Ethernet0"} - ])) - - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) - - # Act and assert - self.assertFalse(self.validator.validate(move, diff)) - - def test_validate__replace_whole_config_item_same_ref_added__true(self): - # Arrange - target_config = Files.SIMPLE_CONFIG_DB_INC_DEPS - # prepare current config by removing ref from target config - current_config = self.prepare_config(target_config, jsonpatch.JsonPatch([ - {"op": "replace", "path":"/ACL_TABLE/EVERFLOW/ports/0", "value":""} - ])) - - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) - - # Act and assert - self.assertTrue(self.validator.validate(move, diff)) - - def test_validate__replace_whole_config_item_same_ref_removed__true(self): - # Arrange - current_config= Files.SIMPLE_CONFIG_DB_INC_DEPS - # prepare target config by removing ref from current config - target_config = self.prepare_config(current_config, jsonpatch.JsonPatch([ - {"op": "replace", "path":"/ACL_TABLE/EVERFLOW/ports/0", "value":""} - ])) - - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) - - # Act and assert - self.assertTrue(self.validator.validate(move, diff)) - - def test_validate__replace_whole_config_item_same_ref_same__true(self): - # Arrange - current_config= Files.SIMPLE_CONFIG_DB_INC_DEPS - # prepare target config by removing ref from current config - target_config = current_config - - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, OperationType.REPLACE, [], []) - - # Act and assert - self.assertTrue(self.validator.validate(move, diff)) - - def prepare_config(self, config, patch): - return patch.apply(config) - -class TestLowLevelMoveGenerator(unittest.TestCase): - def setUp(self): - path_addressing = PathAddressing() - self.generator = ps.LowLevelMoveGenerator(path_addressing) - - def test_generate__no_diff__no_moves(self): - self.verify() - - def test_generate__replace_key__replace_move(self): - self.verify(tc_ops=[{"op": "replace", 'path': '/PORT/Ethernet0/description', 'value':'any-desc'}]) - - def test_generate__leaf_key_missing__add_move(self): - self.verify( - cc_ops=[{"op": "remove", 'path': '/ACL_TABLE/EVERFLOW/policy_desc'}], - ex_ops=[{"op": "add", 'path': '/ACL_TABLE/EVERFLOW/policy_desc', 'value':'EVERFLOW'}] - ) - - def test_generate__leaf_key_additional__remove_move(self): - self.verify( - tc_ops=[{"op": "remove", 'path': '/ACL_TABLE/EVERFLOW/policy_desc'}] - ) - - def test_generate__table_missing__add_leafs_moves(self): - self.verify( - cc_ops=[{"op": "remove", 'path': '/VLAN'}], - ex_ops=[{'op': 'add', 'path': '/VLAN', 'value': {'Vlan1000': {'vlanid': '1000'}}}, - {'op': 'add', 'path': '/VLAN', 'value': {'Vlan1000': {'dhcp_servers': ['192.0.0.1']}}}, - {'op': 'add', 'path': '/VLAN', 'value': {'Vlan1000': {'dhcp_servers': ['192.0.0.2']}}}, - {'op': 'add', 'path': '/VLAN', 'value': {'Vlan1000': {'dhcp_servers': ['192.0.0.3']}}}, - {'op': 'add', 'path': '/VLAN', 'value': {'Vlan1000': {'dhcp_servers': ['192.0.0.4']}}}] - ) - - def test_generate__table_additional__remove_leafs_moves(self): - self.verify( - tc_ops=[{"op": "remove", 'path': '/VLAN'}], - ex_ops=[{'op': 'remove', 'path': '/VLAN/Vlan1000/vlanid'}, - {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, - {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/1'}, - {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/2'}, - {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/3'}] - ) - - def test_generate__leaf_table_missing__add_table(self): - self.verify( - tc_ops=[{"op": "add", 'path': '/NEW_TABLE', 'value':{}}] - ) - - def test_generate__leaf_table_additional__remove_table(self): - self.verify( - cc_ops=[{"op": "add", 'path': '/NEW_TABLE', 'value':{}}], - ex_ops=[{"op": "remove", 'path': '/NEW_TABLE'}] - ) - - def test_generate__replace_list_item__remove_add_replace_moves(self): - self.verify( - tc_ops=[{"op": "replace", 'path': '/ACL_TABLE/EVERFLOW/ports/0', 'value':'Ethernet0'}], - ex_ops=[ - {"op": "remove", 'path': '/ACL_TABLE/EVERFLOW/ports/0'}, - {"op": "add", 'path': '/ACL_TABLE/EVERFLOW/ports/0', 'value':'Ethernet0'}, - {"op": "replace", 'path': '/ACL_TABLE/EVERFLOW/ports/0', 'value':'Ethernet0'}, - ]) - - def test_generate__remove_list_item__remove_move(self): - self.verify( - tc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}]) - - def test_generate__remove_multiple_list_items__multiple_remove_moves(self): - self.verify( - tc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, - {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}], - ex_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, - {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/1'}] - ) - - def test_generate__remove_all_list_items__multiple_remove_moves(self): - self.verify( - tc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[]}], - ex_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, - {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/2'}, - {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/3'}, - {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/1'}] - ) - - def test_generate__add_list_items__add_move(self): - self.verify( - tc_ops=[{"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.5'}] - ) - - def test_generate__add_multiple_list_items__multiple_add_moves(self): - self.verify( - tc_ops=[{"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.5'}, - {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/3', 'value':'192.168.1.6'}] - ) - - def test_generate__add_all_list_items__multiple_add_moves(self): - self.verify( - cc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[]}], - ex_ops=[{"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.0.0.1'}, - {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.0.0.2'}, - {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.0.0.3'}, - {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.0.0.4'}] - ) - - def test_generate__replace_multiple_list_items__multiple_remove_add_replace_moves(self): - self.verify( - tc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.5'}, - {"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/3', 'value':'192.168.1.6'}], - ex_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, - {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/3'}, - {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.5'}, - {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/3', 'value':'192.168.1.6'}, - {"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.5'}, - {"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/3', 'value':'192.168.1.6'}, - {"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/3', 'value':'192.168.1.5'}, - {"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.6'}] - ) - - def test_generate__different_order_list_items__whole_list_replace_move(self): - self.verify( - tc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[ - "192.0.0.4", - "192.0.0.3", - "192.0.0.2", - "192.0.0.1" - ]}]) - - def test_generate__whole_list_missing__add_items_moves(self): - self.verify( - cc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers'}], - ex_ops=[{'op': 'add', 'path': '/VLAN/Vlan1000/dhcp_servers', 'value': ['192.0.0.1']}, - {'op': 'add', 'path': '/VLAN/Vlan1000/dhcp_servers', 'value': ['192.0.0.2']}, - {'op': 'add', 'path': '/VLAN/Vlan1000/dhcp_servers', 'value': ['192.0.0.3']}, - {'op': 'add', 'path': '/VLAN/Vlan1000/dhcp_servers', 'value': ['192.0.0.4']}]) - - def test_generate__whole_list_additional__remove_items_moves(self): - self.verify( - tc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers'}], - ex_ops=[{'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, - {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/1'}, - {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/2'}, - {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/3'}]) - - def test_generate__empty_list_missing__add_whole_list(self): - self.verify( - tc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[]}], - cc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers'}], - ex_ops=[{'op': 'add', 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[]}]) - - def test_generate__empty_list_additional__remove_whole_list(self): - self.verify( - tc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers'}], - cc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[]}], - ex_ops=[{'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers'}]) - - def test_generate__dpb_1_to_4_example(self): - # Arrange - diff = ps.Diff(Files.DPB_1_SPLIT_FULL_CONFIG, Files.DPB_4_SPLITS_FULL_CONFIG) - - # Act - moves = list(self.generator.generate(diff)) - - # Assert - self.verify_moves([{'op': 'replace', 'path': '/PORT/Ethernet0/alias', 'value': 'Eth1/1'}, - {'op': 'replace', 'path': '/PORT/Ethernet0/lanes', 'value': '65'}, - {'op': 'replace', 'path': '/PORT/Ethernet0/description', 'value': ''}, - {'op': 'replace', 'path': '/PORT/Ethernet0/speed', 'value': '10000'}, - {'op': 'add', 'path': '/PORT/Ethernet1', 'value': {'alias': 'Eth1/2'}}, - {'op': 'add', 'path': '/PORT/Ethernet1', 'value': {'lanes': '66'}}, - {'op': 'add', 'path': '/PORT/Ethernet1', 'value': {'description': ''}}, - {'op': 'add', 'path': '/PORT/Ethernet1', 'value': {'speed': '10000'}}, - {'op': 'add', 'path': '/PORT/Ethernet2', 'value': {'alias': 'Eth1/3'}}, - {'op': 'add', 'path': '/PORT/Ethernet2', 'value': {'lanes': '67'}}, - {'op': 'add', 'path': '/PORT/Ethernet2', 'value': {'description': ''}}, - {'op': 'add', 'path': '/PORT/Ethernet2', 'value': {'speed': '10000'}}, - {'op': 'add', 'path': '/PORT/Ethernet3', 'value': {'alias': 'Eth1/4'}}, - {'op': 'add', 'path': '/PORT/Ethernet3', 'value': {'lanes': '68'}}, - {'op': 'add', 'path': '/PORT/Ethernet3', 'value': {'description': ''}}, - {'op': 'add', 'path': '/PORT/Ethernet3', 'value': {'speed': '10000'}}, - {'op': 'add', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/1', 'value': 'Ethernet1'}, - {'op': 'add', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/1', 'value': 'Ethernet2'}, - {'op': 'add', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/1', 'value': 'Ethernet3'}, - {'op': 'add', 'path': '/VLAN_MEMBER/Vlan100|Ethernet1', 'value': {'tagging_mode': 'untagged'}}, - {'op': 'add', 'path': '/VLAN_MEMBER/Vlan100|Ethernet2', 'value': {'tagging_mode': 'untagged'}}, - {'op': 'add', 'path': '/VLAN_MEMBER/Vlan100|Ethernet3', 'value': {'tagging_mode': 'untagged'}}], - moves) - - def test_generate__dpb_4_to_1_example(self): - # Arrange - diff = ps.Diff(Files.DPB_4_SPLITs_FULL_CONFIG, Files.DPB_1_SPLIT_FULL_CONFIG) - - # Act - moves = list(self.generator.generate(diff)) - - # Assert - self.verify_moves([{'op': 'replace', 'path': '/PORT/Ethernet0/alias', 'value': 'Eth1'}, - {'op': 'replace', 'path': '/PORT/Ethernet0/lanes', 'value': '65, 66, 67, 68'}, - {'op': 'replace', 'path': '/PORT/Ethernet0/description', 'value': 'Ethernet0 100G link'}, - {'op': 'replace', 'path': '/PORT/Ethernet0/speed', 'value': '100000'}, - {'op': 'remove', 'path': '/PORT/Ethernet1/alias'}, - {'op': 'remove', 'path': '/PORT/Ethernet1/lanes'}, - {'op': 'remove', 'path': '/PORT/Ethernet1/description'}, - {'op': 'remove', 'path': '/PORT/Ethernet1/speed'}, - {'op': 'remove', 'path': '/PORT/Ethernet2/alias'}, - {'op': 'remove', 'path': '/PORT/Ethernet2/lanes'}, - {'op': 'remove', 'path': '/PORT/Ethernet2/description'}, - {'op': 'remove', 'path': '/PORT/Ethernet2/speed'}, - {'op': 'remove', 'path': '/PORT/Ethernet3/alias'}, - {'op': 'remove', 'path': '/PORT/Ethernet3/lanes'}, - {'op': 'remove', 'path': '/PORT/Ethernet3/description'}, - {'op': 'remove', 'path': '/PORT/Ethernet3/speed'}, - {'op': 'remove', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/1'}, - {'op': 'remove', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/2'}, - {'op': 'remove', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/3'}, - {'op': 'remove', 'path': '/VLAN_MEMBER/Vlan100|Ethernet1/tagging_mode'}, - {'op': 'remove', 'path': '/VLAN_MEMBER/Vlan100|Ethernet2/tagging_mode'}, - {'op': 'remove', 'path': '/VLAN_MEMBER/Vlan100|Ethernet3/tagging_mode'}], - moves) - - def verify(self, tc_ops=None, cc_ops=None, ex_ops=None): - """ - Generate a diff where target config is modified using the given tc_ops. - The expected low level moves should ex_ops if it is not None, otherwise tc_ops - """ - # Arrange - diff = self.get_diff(target_config_ops=tc_ops, current_config_ops=cc_ops) - expected = ex_ops if ex_ops is not None else \ - tc_ops if tc_ops is not None else \ - [] - - # Act - actual = self.generator.generate(diff) - - # Assert - self.verify_moves(expected, actual) - - def verify_moves(self, ops, moves): - moves_ops = [list(move.patch)[0] for move in moves] - self.assertCountEqual(ops, moves_ops) - - def get_diff(self, target_config_ops = None, current_config_ops = None): - current_config = Files.CROPPED_CONFIG_DB_AS_JSON - if current_config_ops: - cc_patch = jsonpatch.JsonPatch(current_config_ops) - current_config = cc_patch.apply(current_config) - - target_config = Files.CROPPED_CONFIG_DB_AS_JSON - if target_config_ops: - tc_patch = jsonpatch.JsonPatch(target_config_ops) - target_config = tc_patch.apply(target_config) - - return ps.Diff(current_config, target_config) - -class TestUpperLevelMoveExtender(unittest.TestCase): - def setUp(self): - self.extender = ps.UpperLevelMoveExtender() - self.any_diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_CONFIG_DB) - - def test_extend__root_level_move__no_extended_moves(self): - self.verify(OperationType.REMOVE, []) - self.verify(OperationType.ADD, [], []) - self.verify(OperationType.REPLACE, [], []) - - def test_extend__remove_key_upper_level_does_not_exist__remove_upper_level(self): - self.verify(OperationType.REMOVE, - ["ACL_TABLE", "EVERFLOW", "policy_desc"], - tc_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW'}], - ex_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW'}]) - - def test_extend__remove_key_upper_level_does_exist__replace_upper_level(self): - self.verify(OperationType.REMOVE, - ["ACL_TABLE", "EVERFLOW", "policy_desc"], - tc_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW/policy_desc'}], - ex_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW', 'value':{ - "ports": [ - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRROR" - }}]) - - def test_extend__remove_list_item_upper_level_does_not_exist__remove_upper_level(self): - self.verify(OperationType.REMOVE, - ["VLAN", "Vlan1000", "dhcp_servers", 1], - tc_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers'}], - ex_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers'}]) - - def test_extend__remove_list_item_upper_level_does_exist__replace_upper_level(self): - self.verify(OperationType.REMOVE, - ["VLAN", "Vlan1000", "dhcp_servers", 1], - tc_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers/1'}], - ex_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers', 'value':[ - "192.0.0.1", - "192.0.0.3", - "192.0.0.4" - ]}]) - - def test_extend__add_key_upper_level_missing__add_upper_level(self): - self.verify(OperationType.ADD, - ["ACL_TABLE", "EVERFLOW", "policy_desc"], - ["ACL_TABLE", "EVERFLOW", "policy_desc"], - cc_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW'}], - ex_ops=[{'op':'add', 'path':'/ACL_TABLE/EVERFLOW', 'value':{ - "policy_desc": "EVERFLOW", - "ports": [ - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRROR" - }}]) - - def test_extend__add_key_upper_level_exist__replace_upper_level(self): - self.verify(OperationType.ADD, - ["ACL_TABLE", "EVERFLOW", "policy_desc"], - ["ACL_TABLE", "EVERFLOW", "policy_desc"], - cc_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW/policy_desc'}], - ex_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW', 'value':{ - "policy_desc": "EVERFLOW", - "ports": [ - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRROR" - }}]) - - def test_extend__add_list_item_upper_level_missing__add_upper_level(self): - self.verify(OperationType.ADD, - ["VLAN", "Vlan1000", "dhcp_servers", 1], - ["VLAN", "Vlan1000", "dhcp_servers", 1], - cc_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers'}], - ex_ops=[{'op':'add', 'path':'/VLAN/Vlan1000/dhcp_servers', 'value':[ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ]}]) - - def test_extend__add_list_item_upper_level_exist__replace_upper_level(self): - self.verify(OperationType.ADD, - ["VLAN", "Vlan1000", "dhcp_servers", 1], - ["VLAN", "Vlan1000", "dhcp_servers", 1], - cc_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers/1'}], - ex_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers', 'value':[ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ]}]) - - def test_extend__add_table__replace_whole_config(self): - self.verify(OperationType.ADD, - ["ACL_TABLE"], - ["ACL_TABLE"], - cc_ops=[{'op':'remove', 'path':'/ACL_TABLE'}], - ex_ops=[{'op':'replace', 'path':'', 'value':Files.CROPPED_CONFIG_DB_AS_JSON}]) - - def test_extend__replace_key__replace_upper_level(self): - self.verify(OperationType.REPLACE, - ["ACL_TABLE", "EVERFLOW", "policy_desc"], - ["ACL_TABLE", "EVERFLOW", "policy_desc"], - cc_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW/policy_desc', 'value':'old_desc'}], - ex_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW', 'value':{ - "policy_desc": "EVERFLOW", - "ports": [ - "Ethernet8" - ], - "stage": "ingress", - "type": "MIRROR" - }}]) - - def test_extend__replace_list_item__replace_upper_level(self): - self.verify(OperationType.REPLACE, - ["VLAN", "Vlan1000", "dhcp_servers", 1], - ["VLAN", "Vlan1000", "dhcp_servers", 1], - cc_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers/1', 'value':'192.0.0.7'}], - ex_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers', 'value':[ - "192.0.0.1", - "192.0.0.2", - "192.0.0.3", - "192.0.0.4" - ]}]) - - def test_extend__replace_table__replace_whole_config(self): - self.verify(OperationType.REPLACE, - ["VLAN"], - ["VLAN"], - cc_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers/1', 'value':'192.0.0.7'}], - ex_ops=[{'op':'replace', 'path':'', 'value':Files.CROPPED_CONFIG_DB_AS_JSON}]) - - def verify(self, op_type, ctokens, ttokens=None, cc_ops=[], tc_ops=[], ex_ops=[]): - """ - cc_ops, tc_ops are used to build the diff object. - diff, op_type, ctokens, ttokens are used to build the move. - move is extended and the result should match ex_ops. - """ - # Arrange - current_config=jsonpatch.JsonPatch(cc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) - target_config=jsonpatch.JsonPatch(tc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, op_type, ctokens, ttokens) - - # Act - moves = self.extender.extend(move, diff) - - # Assert - self.verify_moves(ex_ops, moves) - - def verify_moves(self, ex_ops, moves): - moves_ops = [list(move.patch)[0] for move in moves] - self.assertCountEqual(ex_ops, moves_ops) - -class TestDeleteInsteadOfReplaceMoveExtender(unittest.TestCase): - def setUp(self): - self.extender = ps.DeleteInsteadOfReplaceMoveExtender() - - def test_extend__non_replace__no_extended_moves(self): - self.verify(OperationType.REMOVE, - ["ACL_TABLE"], - tc_ops=[{'op':'remove', 'path':'/ACL_TABLE'}], - ex_ops=[]) - self.verify(OperationType.ADD, - ["ACL_TABLE"], - ["ACL_TABLE"], - cc_ops=[{'op':'remove', 'path':'/ACL_TABLE'}], - ex_ops=[]) - - def test_extend__replace_key__delete_key(self): - self.verify(OperationType.REPLACE, - ["ACL_TABLE", "EVERFLOW", "policy_desc"], - ["ACL_TABLE", "EVERFLOW", "policy_desc"], - cc_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW/policy_desc', 'value':'old_desc'}], - ex_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW/policy_desc'}]) - - def test_extend__replace_list_item__delete_list_item(self): - self.verify(OperationType.REPLACE, - ["VLAN", "Vlan1000", "dhcp_servers", 1], - ["VLAN", "Vlan1000", "dhcp_servers", 1], - cc_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers/1', 'value':'192.0.0.7'}], - ex_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers/1'}]) - - def test_extend__replace_table__delete_table(self): - self.verify(OperationType.REPLACE, - ["ACL_TABLE"], - ["ACL_TABLE"], - cc_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW/policy_desc', 'value':'old_desc'}], - ex_ops=[{'op':'remove', 'path':'/ACL_TABLE'}]) - - def test_extend__replace_whole_config__delete_whole_config(self): - self.verify(OperationType.REPLACE, - [], - [], - cc_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW/policy_desc', 'value':'old_desc'}], - ex_ops=[{'op':'remove', 'path':''}]) - - def verify(self, op_type, ctokens, ttokens=None, cc_ops=[], tc_ops=[], ex_ops=[]): - """ - cc_ops, tc_ops are used to build the diff object. - diff, op_type, ctokens, ttokens are used to build the move. - move is extended and the result should match ex_ops. - """ - # Arrange - current_config=jsonpatch.JsonPatch(cc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) - target_config=jsonpatch.JsonPatch(tc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, op_type, ctokens, ttokens) - - # Act - moves = self.extender.extend(move, diff) - - # Assert - self.verify_moves(ex_ops, moves) - - def verify_moves(self, ex_ops, moves): - moves_ops = [list(move.patch)[0] for move in moves] - self.assertCountEqual(ex_ops, moves_ops) - -class DeleteRefsMoveExtender(unittest.TestCase): - def setUp(self): - self.extender = ps.DeleteRefsMoveExtender(PathAddressing()) - - def test_extend__non_delete_ops__no_extended_moves(self): - self.verify(OperationType.ADD, - ["ACL_TABLE"], - ["ACL_TABLE"], - cc_ops=[{'op':'remove', 'path':'/ACL_TABLE'}], - ex_ops=[]) - self.verify(OperationType.REPLACE, - ["ACL_TABLE"], - ["ACL_TABLE"], - cc_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW'}], - ex_ops=[]) - - def test_extend__path_with_no_refs__no_extended_moves(self): - self.verify(OperationType.REMOVE, - ["ACL_TABLE"], - tc_ops=[{'op':'remove', 'path':'/ACL_TABLE'}], - ex_ops=[]) - - def test_extend__path_with_direct_refs__extended_moves(self): - self.verify(OperationType.REMOVE, - ["PORT", "Ethernet0"], - tc_ops=[{'op':'remove', 'path':'/PORT/Ethernet0'}], - ex_ops=[{'op': 'remove', 'path': '/VLAN_MEMBER/Vlan1000|Ethernet0'}, - {'op': 'remove', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/0'}]) - - def test_extend__path_with_refs_to_children__extended_moves(self): - self.verify(OperationType.REMOVE, - ["PORT"], - tc_ops=[{'op':'remove', 'path':'/PORT/Ethernet0'}], - ex_ops=[{'op': 'remove', 'path': '/VLAN_MEMBER/Vlan1000|Ethernet0'}, - {'op': 'remove', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/0'}, - {'op': 'remove', 'path': '/VLAN_MEMBER/Vlan1000|Ethernet4'}, - {'op': 'remove', 'path': '/ACL_TABLE/DATAACL/ports/0'}, - {'op': 'remove', 'path': '/VLAN_MEMBER/Vlan1000|Ethernet8'}, - {'op': 'remove', 'path': '/ACL_TABLE/EVERFLOWV6/ports/0'}, - {'op': 'remove', 'path': '/ACL_TABLE/EVERFLOW/ports/0'}, - {'op': 'remove', 'path': '/ACL_TABLE/EVERFLOWV6/ports/1'}]) - - def verify(self, op_type, ctokens, ttokens=None, cc_ops=[], tc_ops=[], ex_ops=[]): - """ - cc_ops, tc_ops are used to build the diff object. - diff, op_type, ctokens, ttokens are used to build the move. - move is extended and the result should match ex_ops. - """ - # Arrange - current_config=jsonpatch.JsonPatch(cc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) - target_config=jsonpatch.JsonPatch(tc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) - diff = ps.Diff(current_config, target_config) - move = ps.JsonMove(diff, op_type, ctokens, ttokens) - - # Act - moves = self.extender.extend(move, diff) - - # Assert - self.verify_moves(ex_ops, moves) - - def verify_moves(self, ex_ops, moves): - moves_ops = [list(move.patch)[0] for move in moves] - self.assertCountEqual(ex_ops, moves_ops) - -class TestSortAlgorithmFactory(unittest.TestCase): - def test_dfs_sorter(self): - self.verify(ps.Algorithm.DFS, ps.DfsSorter) - - def test_bfs_sorter(self): - self.verify(ps.Algorithm.BFS, ps.BfsSorter) - - def test_memoization_sorter(self): - self.verify(ps.Algorithm.MEMOIZATION, ps.MemoizationSorter) - - def verify(self, algo, algo_class): - # Arrange - factory = ps.SortAlgorithmFactory(OperationWrapper(), ConfigWrapper(), PathAddressing()) - expected_generators = [ps.LowLevelMoveGenerator] - expected_extenders = [ps.UpperLevelMoveExtender, ps.DeleteInsteadOfReplaceMoveExtender, ps.DeleteRefsMoveExtender] - expected_validator = [ps.DeleteWholeConfigMoveValidator, - ps.FullConfigMoveValidator, - ps.NoDependencyMoveValidator, - ps.UniqueLanesMoveValidator, - ps.CreateOnlyMoveValidator] - - # Act - sorter = factory.create(algo) - actual_generators = [type(item) for item in sorter.move_wrapper.move_generators] - actual_extenders = [type(item) for item in sorter.move_wrapper.move_extenders] - actual_validators = [type(item) for item in sorter.move_wrapper.move_validators] - - # Assert - self.assertIsInstance(sorter, algo_class) - self.assertCountEqual(expected_generators, actual_generators) - self.assertCountEqual(expected_extenders, actual_extenders) - self.assertCountEqual(expected_validator, actual_validators) - -class TestPatchSorter(unittest.TestCase): - def create_patch_sorter(self, config=None): - if config is None: - config=Files.CROPPED_CONFIG_DB_AS_JSON - config_wrapper = ConfigWrapper() - config_wrapper.get_config_db_as_json = MagicMock(return_value=config) - patch_wrapper = PatchWrapper(config_wrapper) - operation_wrapper = OperationWrapper() - path_addressing= ps.PathAddressing() - sort_algorithm_factory = ps.SortAlgorithmFactory(operation_wrapper, config_wrapper, path_addressing) - - return ps.PatchSorter(config_wrapper, patch_wrapper, sort_algorithm_factory) - - def test_sort__empty_patch__returns_empty_changes_list(self): - # Arrange - patch = jsonpatch.JsonPatch([]) - expected = [] - - # Act - actual = self.create_patch_sorter().sort(patch) - - # Assert - self.assertCountEqual(expected, actual) - - def test_sort__patch_with_single_simple_operation__returns_one_change(self): - # Arrange - patch = jsonpatch.JsonPatch([{"op":"remove", "path":"/VLAN/Vlan1000/dhcp_servers/0"}]) - expected = [JsonChange(patch)] - - # Act - actual = self.create_patch_sorter().sort(patch) - - # Assert - self.assertCountEqual(expected, actual) - - def test_sort__replacing_create_only_field__success(self): - # Arrange - patch = jsonpatch.JsonPatch([{"op":"replace", "path": "/PORT/Ethernet0/lanes", "value":"67"}]) - - # Act - actual = self.create_patch_sorter(Files.DPB_1_SPLIT_FULL_CONFIG).sort(patch) - - # Assert - self.assertNotEqual(None, actual) - - def test_sort__inter_dependency_within_same_table__success(self): - # Arrange - patch = jsonpatch.JsonPatch([{"op":"add", "path":"/VLAN_INTERFACE", "value": { - "Vlan1000|fc02:1000::1/64": {}, - "Vlan1000|192.168.0.1/21": {}, - "Vlan1000": {} - }}]) - expected = [ - JsonChange(jsonpatch.JsonPatch([{"op": "add", "path": "/VLAN_INTERFACE", "value": {"Vlan1000": {}}}])), - JsonChange(jsonpatch.JsonPatch([{"op": "add", "path": "/VLAN_INTERFACE/Vlan1000|fc02:1000::1~164", "value": {}}])), - JsonChange(jsonpatch.JsonPatch([{"op": "add", "path": "/VLAN_INTERFACE/Vlan1000|192.168.0.1~121", "value": {}}])) - ] - - # Act - actual = self.create_patch_sorter().sort(patch) - - # Assert - self.assertListEqual(expected, actual) - - def test_sort__add_table__success(self): - self.verify(cc_ops=[{"op":"remove", "path":"/ACL_TABLE"}]) - - def test_sort__remove_table__success(self): - self.verify(tc_ops=[{"op":"remove", "path":"/ACL_TABLE"}]) - - def test_sort__modify_value_in_existing_table__success(self): - self.verify(tc_ops=[{"op":"replace", "path":"/ACL_TABLE/EVERFLOW/stage", "value":"egress"}]) - - def test_sort__modify_value_in_existing_array__success(self): - self.verify(tc_ops=[{"op":"replace", "path":"/ACL_TABLE/EVERFLOWV6/ports/0", "value":"Ethernet0"}]) - - def test_sort__add_value_to_existing_array__success(self): - self.verify(tc_ops=[{"op":"add", "path":"/ACL_TABLE/EVERFLOWV6/ports/0", "value":"Ethernet0"}]) - - def test_sort__add_new_key_to_existing_table__success(self): - self.verify(cc_ops=[{"op":"remove", "path":"/ACL_TABLE/EVERFLOWV6"}]) - - def test_sort__remove_2_items_with_dependency_from_different_tables__success(self): - self.verify(tc_ops=[{"op":"remove", "path":"/PORT/Ethernet0"}, - {"op":"remove", "path":"/VLAN_MEMBER/Vlan1000|Ethernet0"}, - {"op":"remove", "path":"/ACL_TABLE/NO-NSW-PACL-V4"}], # removing ACL from current and target - cc_ops=[{"op":"remove", "path":"/ACL_TABLE/NO-NSW-PACL-V4"}]) - - def test_sort__add_2_items_with_dependency_from_different_tables__success(self): - self.verify(tc_ops=[{"op":"remove", "path":"/ACL_TABLE/NO-NSW-PACL-V4"}], # removing ACL from current and target - cc_ops=[{"op":"remove", "path":"/PORT/Ethernet0"}, - {"op":"remove", "path":"/VLAN_MEMBER/Vlan1000|Ethernet0"}, - {"op":"remove", "path":"/ACL_TABLE/NO-NSW-PACL-V4"}]) - - def test_sort__remove_2_items_with_dependency_from_same_table__success(self): - self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_INTERFACE}, - {"op":"remove", "path":"/INTERFACE/Ethernet8"}, - {"op":"remove", "path":"/INTERFACE/Ethernet8|10.0.0.1~130"}], - cc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_INTERFACE}]) - - def test_sort__add_2_items_with_dependency_from_same_table__success(self): - self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_INTERFACE}], - cc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_INTERFACE}, - {"op":"remove", "path":"/INTERFACE/Ethernet8"}, - {"op":"remove", "path":"/INTERFACE/Ethernet8|10.0.0.1~130"}]) - - def test_sort__replace_mandatory_item__success(self): - self.verify(tc_ops=[{"op":"replace", "path":"/ACL_TABLE/EVERFLOWV6/type", "value":"L2"}]) - - def test_sort__dpb_1_to_4__success(self): - self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.DPB_4_SPLITS_FULL_CONFIG}], - cc_ops=[{"op":"replace", "path":"", "value":Files.DPB_1_SPLIT_FULL_CONFIG}]) - - def test_sort__dpb_4_to_1__success(self): - self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.DPB_1_SPLIT_FULL_CONFIG}], - cc_ops=[{"op":"replace", "path":"", "value":Files.DPB_4_SPLITS_FULL_CONFIG}]) - - def test_sort__remove_an_item_with_default_value__success(self): - self.verify(tc_ops=[{"op":"remove", "path":"/ACL_TABLE/EVERFLOW/stage"}]) - - def test_sort__modify_items_with_dependencies_using_must__success(self): - self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_CRM}, - {"op":"replace", "path":"/CRM/Config/acl_counter_high_threshold", "value":"60"}, - {"op":"replace", "path":"/CRM/Config/acl_counter_low_threshold", "value":"50"}], - cc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_CRM}]) - - # in the following example, it is possible to start with acl_counter_high_threshold - self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_CRM}, - {"op":"replace", "path":"/CRM/Config/acl_counter_high_threshold", "value":"80"}, - {"op":"replace", "path":"/CRM/Config/acl_counter_low_threshold", "value":"60"}], - cc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_CRM}]) - - def verify(self, cc_ops=[], tc_ops=[]): - # Arrange - config_wrapper=ConfigWrapper() - target_config=jsonpatch.JsonPatch(tc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) - current_config=jsonpatch.JsonPatch(cc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) - patch=jsonpatch.make_patch(current_config, target_config) - - # Act - actual = self.create_patch_sorter(current_config).sort(patch) - - # Assert - simulated_config = current_config - for move in actual: - simulated_config = move.apply(simulated_config) - self.assertTrue(config_wrapper.validate_config_db_config(simulated_config)) - self.assertEqual(target_config, simulated_config) From 6c4f96cf5d6dacabc48aef7e25afd63ebebbfc07 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Sat, 14 Aug 2021 07:03:56 +0000 Subject: [PATCH 17/60] Added new state table schema Signed-off-by: Vivek Reddy Karri --- scripts/coredump_gen_handler | 14 ++++++++++++- utilities_common/auto_techsupport_helper.py | 23 +++++++++++++++++++-- 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/scripts/coredump_gen_handler b/scripts/coredump_gen_handler index e7582dcf8f..ad72ba5754 100644 --- a/scripts/coredump_gen_handler +++ b/scripts/coredump_gen_handler @@ -16,7 +16,18 @@ import syslog from swsscommon.swsscommon import SonicV2Connector from utilities_common.auto_techsupport_helper import * -class CoreDumpCreateHandle(): +def handle_coredump_cleanup(dump_name): + self.db = SonicV2Connector(host="127.0.0.1") + self.db.connect(CFG_DB) + if self.db.get(CFG_DB, AUTO_TS, CFG_STATE) != "enabled": + return + + + +class CriticalProcCoreDumpHandle(): + """ + Class to handle coredump creation event for critical processes inside the docker + """ def __init__(self, core_name): self.core_name = core_name self.db = None @@ -146,6 +157,7 @@ def main(): syslog.openlog(logoption=syslog.LOG_PID) cls = CoreDumpCreateHandle() cls.handle_core_dump_creation_event(args.name) + handle_coredump_cleanup(dump_name) if __name__ == "__main__": main() \ No newline at end of file diff --git a/utilities_common/auto_techsupport_helper.py b/utilities_common/auto_techsupport_helper.py index 6b5188aaf5..d61d191f1c 100644 --- a/utilities_common/auto_techsupport_helper.py +++ b/utilities_common/auto_techsupport_helper.py @@ -8,9 +8,11 @@ import math import syslog -AUTO_TS = "AUTO_TECHSUPPORT|global" CFG_DB = "CONFIG_DB" -CFG_STATE = "state" +AUTO_TS = "AUTO_TECHSUPPORT|global" +CFG_INVOC_TS = "auto_invoke_ts" +CFG_CORE_CLEANUP = "coredump_cleanup" +CFG_TS_CLEANUP = "techsupport_cleanup" CFG_MAX_TS = "max_techsupport_size" COOLOFF = "cooloff" CFG_CORE_USAGE = "core_usage" @@ -29,6 +31,23 @@ # State DB Attributes STATE_DB = "STATE_DB" TS_MAP = "AUTO_TECHSUPPORT|TS_CORE_MAP" +""" +key = "AUTO_TECHSUPPORT|TS_CORE_MAP" + = +Eg: +sonic_dump_sonic_20210412_223645 = orchagent.1599047232.39.core;1599047233;orchagent +sonic_dump_sonic_20210405_202756 = python3.1617684247.17.core;1617684249;snmp-subagent +""" + +CRITICAL_PROC = "AUTO_TECHSUPPORT|PROC_EXIT_EVENTS" +""" +key = "AUTO_TECHSUPPORT|PROC_EXIT_EVENTS" + = +Eg: + = "swss;orchagent" + = "snmp;snmp-subagent" + = "lldp;lldp_syncd" +""" TIME_BUF = 20 SINCE_DEFAULT = "2 days ago" From 1fbe04e4af42d514249b7e2feeca80df5c8b9496 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Mon, 16 Aug 2021 19:28:21 +0000 Subject: [PATCH 18/60] UT's updated for new design Signed-off-by: Vivek Reddy Karri --- scripts/coredump_gen_handler | 73 +++-- scripts/techsupport_cleanup | 44 +-- setup.py | 5 +- .../coredump_gen_handler_test.py | 305 ++++++++++-------- .../shared_state_mock.py | 50 +-- .../techsupport_cleanup_test.py | 45 +-- utilities_common/auto_techsupport_helper.py | 2 + 7 files changed, 291 insertions(+), 233 deletions(-) diff --git a/scripts/coredump_gen_handler b/scripts/coredump_gen_handler index ad72ba5754..a9254f2ca0 100644 --- a/scripts/coredump_gen_handler +++ b/scripts/coredump_gen_handler @@ -14,15 +14,28 @@ import argparse import subprocess import syslog from swsscommon.swsscommon import SonicV2Connector -from utilities_common.auto_techsupport_helper import * +from utilities_common.auto_techsupport_helper import * + def handle_coredump_cleanup(dump_name): - self.db = SonicV2Connector(host="127.0.0.1") - self.db.connect(CFG_DB) - if self.db.get(CFG_DB, AUTO_TS, CFG_STATE) != "enabled": + db = SonicV2Connector(host="127.0.0.1") + db.connect(CFG_DB) + if db.get(CFG_DB, AUTO_TS, CFG_CORE_CLEANUP) != "enabled": + return + + core_usage = db.get(CFG_DB, AUTO_TS, CFG_MAX_TS) + try: + core_usage = float(core_usage) + except: + core_usage = 0.0 + + if not core_usage: + _ , num_bytes = get_stats(os.path.join(CORE_DUMP_DIR, CORE_DUMP_PTRN)) + syslog.syslog(syslog.LOG_INFO, "No Cleanup is performed, current size occupied: {}".format(pretty_size(num_bytes))) return - - + + cleanup_process(max_ts, CORE_DUMP_DIR, CORE_DUMP_PTRN) + class CriticalProcCoreDumpHandle(): """ @@ -34,12 +47,12 @@ class CriticalProcCoreDumpHandle(): self.proc_mp = {} self.core_ts_map = {} self.curr_ts_list = [] - + def handle_core_dump_creation_event(self): file_path = os.path.join(CORE_DUMP_DIR, self.core_name) if not verify_recent_file_creation(file_path): - return - + return + self.db = SonicV2Connector(host="127.0.0.1") self.db.connect(CFG_DB) self.db.connect(STATE_DB) @@ -50,21 +63,21 @@ class CriticalProcCoreDumpHandle(): if proc not in self.proc_mp: print(self.proc_mp) return # Only handles the critical processes - + FEATURE_KEY = FEATURE.format(self.proc_mp[proc]) - if self.db.get(CFG_DB, FEATURE_KEY, TS) != "enabled": + if self.db.get(CFG_DB, FEATURE_KEY, llTS) != "enabled": return # Should be set "enabled" in the FEATURE Table - + global_cooloff = self.db.get(CFG_DB, AUTO_TS, COOLOFF) proc_cooloff = self.db.get(CFG_DB, FEATURE_KEY, COOLOFF) - + cooloff_passed = self.verify_cooloff(global_cooloff, proc_cooloff, proc) if cooloff_passed: since_cfg = self.get_since_arg() new_file = self.invoke_ts_cmd(since_cfg) if new_file: self.db.set(STATE_DB, TS_MAP, os.path.basename(new_file[0]), "{};{}".format(self.core_name, int(time.time()))) - + core_usage = 0 if self.db.hexists(CFG_DB, AUTO_TS, CFG_CORE_USAGE): core_usage = self.db.get(CFG_DB, AUTO_TS, CFG_CORE_USAGE) @@ -72,24 +85,23 @@ class CriticalProcCoreDumpHandle(): core_usage = int(core_usage) except: core_usage = 0 - + if core_usage == 0: _ , num_bytes = get_stats(os.path.join(CORE_DUMP_DIR, CORE_DUMP_PTRN)) syslog.syslog(syslog.LOG_INFO, "No Cleanup is performed, current size occupied: {}".format(pretty_size(num_bytes))) - return - + return + cleanup_process(core_usage, CORE_DUMP_PTRN, CORE_DUMP_DIR) - + def get_since_arg(self): since_cfg = self.db.get(CFG_DB, AUTO_TS, CFG_SINCE) - if not since_cfg: + if not since_cfg: return SINCE_DEFAULT rc, _, _ = subprocess_exec(["date", "--date=\"{}\"".format(since_cfg)]) if rc != 0: return since_cfg - return SINCE_DEFAULT - - + return SINCE_DEFAULT + def invoke_ts_cmd(self, since_cfg): since_cfg = "\"" + since_cfg + "\"" _, out, _ = subprocess_exec(["show", "techsupport", "--since", since_cfg]) @@ -101,7 +113,7 @@ class CriticalProcCoreDumpHandle(): else: syslog.syslog(syslog.LOG_INFO, "'show techsupport' invocation is successful, {} is created".format(diff)) return diff - + def verify_cooloff(self, global_cooloff, proc_cooloff, proc): """Verify both the global cooloff and per-proc cooloff has passed""" self.curr_ts_list = get_ts_dumps(True) @@ -113,10 +125,10 @@ class CriticalProcCoreDumpHandle(): print("Reached Here!!!@") syslog.syslog(syslog.LOG_INFO, "Cooloff period has not yet passed. No Techsupport Invocation is performed ") return False - + ts_map = self.db.get_all(STATE_DB, TS_MAP) self.parse_ts_map(ts_map) - + if proc_cooloff and proc in self.core_ts_map: last_creation_time = self.core_ts_map[proc][0][0] proc_cooloff = float(proc_cooloff) @@ -124,7 +136,7 @@ class CriticalProcCoreDumpHandle(): syslog.syslog(syslog.LOG_INFO, "Cooloff period for {} prcess has not yet passed. No Techsupport Invocation is performed".format(proc)) return False return True - + def parse_ts_map(self, ts_map): """Create core_dump, ts_dump & creation_time map""" print(ts_map) @@ -135,7 +147,7 @@ class CriticalProcCoreDumpHandle(): self.core_ts_map[core_dump].append((int(creation_time), ts_dump)) for core_dump in self.core_ts_map: self.core_ts_map[core_dump].sort() - + def fetch_critical_procs(self): """Fetches the critical_procs and corresponding docker names""" keys = self.db.keys(CFG_DB, FEATURE.format("*")) @@ -148,16 +160,17 @@ class CriticalProcCoreDumpHandle(): procs = stdout.split() for proc in procs: if re.match("program:*", proc): - self.proc_mp[proc.split(":")[-1]] = container + self.proc_mp[proc.split(":")[-1]] = container + def main(): parser = argparse.ArgumentParser(description='Auto Techsupport Invocation and CoreDump Mgmt Script') parser.add_argument('name', type=str, help='Core Dump Name', required=True) args = parser.parse_args() - syslog.openlog(logoption=syslog.LOG_PID) + syslog.openlog(logoption=syslog.LOG_PID) cls = CoreDumpCreateHandle() cls.handle_core_dump_creation_event(args.name) handle_coredump_cleanup(dump_name) - + if __name__ == "__main__": main() \ No newline at end of file diff --git a/scripts/techsupport_cleanup b/scripts/techsupport_cleanup index 28f0c47bcc..6ea0f3801c 100644 --- a/scripts/techsupport_cleanup +++ b/scripts/techsupport_cleanup @@ -2,7 +2,7 @@ """ techsupport_cleanup script. - This script is invoked by the generate_dump script for techsupport cleanup + This script is invoked by the generate_dump script for techsupport cleanup For more info, refer to the Event Driven TechSupport & CoreDump Mgmt HLD """ import os @@ -14,51 +14,51 @@ import subprocess import syslog import shutil from swsscommon.swsscommon import SonicV2Connector -from utilities_common.auto_techsupport_helper import * +from utilities_common.auto_techsupport_helper import * + def clean_state_db_entries(removed_files, db): if not removed_files: - return + return db_conn = db.get_redis_client(STATE_DB) for file in removed_files: db_conn.hdel(TS_MAP, os.path.basename(file)) + def handle_techsupport_creation_event(dump_name): file_path = os.path.join(TS_DIR, dump_name) - - if not verify_recent_file_creation(file_path): - return - + if not verify_recent_file_creation(file_path): + return curr_list = get_ts_dumps() db = SonicV2Connector(host="127.0.0.1") db.connect(CFG_DB) db.connect(STATE_DB) - - print(db.get_all(CFG_DB, AUTO_TS)) - - if db.get(CFG_DB, AUTO_TS, CFG_STATE) != "enabled": + + if db.get(CFG_DB, AUTO_TS, CFG_TS_CLEANUP) != "enabled": return - + max_ts = db.get(CFG_DB, AUTO_TS, CFG_MAX_TS) - if max_ts: - max_ts = int(max_ts) - else: - max_ts = 0 - - if max_ts == 0: + try: + max_ts = float(max_ts) + except: + max_ts = 0.0 + + if not max_ts: _ , num_bytes = get_stats(os.path.join(TS_DIR, TS_PTRN)) syslog.syslog(syslog.LOG_INFO, "No Cleanup is performed, current size occupied: {}".format(pretty_size(num_bytes))) - return - + return + removed_files = cleanup_process(max_ts, TS_PTRN, TS_DIR) clean_state_db_entries(removed_files, db) - + + def main(): parser = argparse.ArgumentParser(description='Auto Techsupport Invocation and CoreDump Mgmt Script') parser.add_argument('name', type=str, help='TechSupport Dump Name', required=True) args = parser.parse_args() syslog.openlog(logoption=syslog.LOG_PID) handle_techsupport_creation_event(args.name) - + + if __name__ == "__main__": main() \ No newline at end of file diff --git a/setup.py b/setup.py index 216328726c..46eee5f925 100644 --- a/setup.py +++ b/setup.py @@ -143,9 +143,7 @@ def run_tests(self): 'scripts/watermarkcfg', 'scripts/sonic-kdump-config', 'scripts/centralize_database', - 'scripts/null_route_helper', - 'scripts/coredump_gen_handler', - 'scripts/techsupport_cleanup' + 'scripts/null_route_helper' ], entry_points={ 'console_scripts': [ @@ -231,5 +229,6 @@ def run_tests(self): 'Topic :: Utilities', ], keywords='sonic SONiC utilities command line cli CLI', + cmdclass={"pytest": PyTest}, test_suite='setup.get_test_suite' ) diff --git a/tests/auto_techsupport_tests/coredump_gen_handler_test.py b/tests/auto_techsupport_tests/coredump_gen_handler_test.py index 0eea8b82c5..06de7768cb 100644 --- a/tests/auto_techsupport_tests/coredump_gen_handler_test.py +++ b/tests/auto_techsupport_tests/coredump_gen_handler_test.py @@ -1,6 +1,6 @@ import os, time import sys -import pyfakefs +import pyfakefs import unittest from pyfakefs.fake_filesystem_unittest import Patcher from swsscommon import swsscommon @@ -23,103 +23,66 @@ # Mock Handle to the data inside the Redis RedisHandle = RedisSingleton.getInstance() + def set_auto_ts_cfg(**kwargs): - state = kwargs[cdump_mod.CFG_STATE] if cdump_mod.CFG_STATE in kwargs else "disabled" + invoke_ts = kwargs[cdump_mod.CFG_INVOC_TS] if cdump_mod.CFG_INVOC_TS in kwargs else "disabled" + core_cleanup = kwargs[cdump_mod.CFG_CORE_CLEANUP] if cdump_mod.CFG_CORE_CLEANUP in kwargs else "disabled" cooloff = kwargs[cdump_mod.COOLOFF] if cdump_mod.COOLOFF in kwargs else "0" core_usage = kwargs[cdump_mod.CFG_CORE_USAGE] if cdump_mod.CFG_CORE_USAGE in kwargs else "0" since_cfg = kwargs[cdump_mod.CFG_SINCE] if cdump_mod.CFG_SINCE in kwargs else "None" if cdump_mod.CFG_DB not in RedisHandle.data: RedisHandle.data[cdump_mod.CFG_DB] = {} - RedisHandle.data[cdump_mod.CFG_DB][cdump_mod.AUTO_TS] = {cdump_mod.CFG_STATE : state, - cdump_mod.COOLOFF : cooloff, - cdump_mod.CFG_CORE_USAGE : core_usage} + RedisHandle.data[cdump_mod.CFG_DB][cdump_mod.AUTO_TS] = {cdump_mod.CFG_INVOC_TS: state, + cdump_mod.COOLOFF: cooloff, + cdump_mod.CFG_CORE_USAGE: core_usage, + cdump_mod.CFG_CORE_CLEANUP: core_cleanup, + cdump_mod.CFG_SINCE: since_cfg} + def set_feature_table_cfg(ts="disabled", cooloff="0", container_name="swss"): if cdump_mod.CFG_DB not in RedisHandle.data: RedisHandle.data[cdump_mod.CFG_DB] = {} - RedisHandle.data[cdump_mod.CFG_DB][cdump_mod.FEATURE.format(container_name)] = {cdump_mod.TS : ts, - cdump_mod.COOLOFF : cooloff} - -swss_critical_proc = """\ -program:orchagent -program:portsyncd -program:neighsyncd -program:vlanmgrd -program:intfmgrd -program:portmgrd -program:buffermgrd -program:vrfmgrd -program:nbrmgrd -program:vxlanmgrd -""" + RedisHandle.data[cdump_mod.CFG_DB][cdump_mod.FEATURE.format(container_name)] = {cdump_mod.TS: ts, + cdump_mod.COOLOFF: cooloff} -syncd_critical_proc = """\ -program:syncd -""" -def mock_generic_cmd(cmd): - if "docker exec -t swss cat /etc/supervisor/critical_processes" in cmd: - return 0, swss_critical_proc, "" - elif "docker exec -t syncd cat /etc/supervisor/critical_processes" in cmd: - return 0, syncd_critical_proc, "" - elif "date --date=\"2 days ago\"" in cmd: - return 0, "", "" - elif "date --date=\"random\"" in cmd: - return 1, "", "Invalid Date Format" - else: - return 1, "", "Invalid Command: " +def populate_state_db(use_default=True, data=None): + if use_default: + data = {cdump_mod.TS_MAP: {"sonic_dump_random1.tar.gz": "portsyncd;1575985", + "sonic_dump_random2.tar.gz": "syncd;1575988"}, + cdump_mod.CRITICAL_PROC: {"swss:orchagent": "123:orchagent"}} + if cdump_mod.CFG_DB not in RedisHandle.data: + RedisHandle.data[cdump_mod.STATE_DB] = {} + for key in data: + RedisHandle.data[cdump_mod.STATE_DB][key] = data[key] + class TestCoreDumpCreationEvent(unittest.TestCase): - - def test_invoc_ts_without_cooloff(self): - """ - Scenario: AUTO_TECHSUPPORT is enabled. No global Cooloff and per process cooloff specified - Check if techsupport is invoked and file is created - """ - RedisSingleton.clearState() - set_auto_ts_cfg(state="enabled") - set_feature_table_cfg(ts="enabled") - with Patcher() as patcher: - def mock_cmd(cmd): - cmd_str = " ".join(cmd) - if "show techsupport" in cmd_str: - patcher.fs.create_file("/var/dump/sonic_dump_random999.tar.gz") - else: - return mock_generic_cmd(cmd_str) - return 0, "", "" - cdump_mod.subprocess_exec = mock_cmd - patcher.fs.create_file("/var/dump/sonic_dump_random998.tar.gz") - patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") - cls = cdump_mod.CoreDumpCreateHandle("orchagent.12345.123.core.gz") - cls.handle_core_dump_creation_event() - assert "sonic_dump_random999.tar.gz" in os.listdir(cdump_mod.TS_DIR) - assert "sonic_dump_random998.tar.gz" in os.listdir(cdump_mod.TS_DIR) - + def test_invoc_ts_state_db_update(self): """ - Scenario: AUTO_TECHSUPPORT is enabled. No global Cooloff and per process cooloff specified - Check if techsupport is invoked, file is created and State DB in updated + Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled and no cooloff is provided + Check if techsupport is invoked, file is created and State DB is updated """ RedisSingleton.clearState() - set_auto_ts_cfg(state="enabled") + set_auto_ts_cfg(auto_invoke_ts="enabled") set_feature_table_cfg(ts="enabled") - RedisHandle.data["STATE_DB"] = {} - RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] = {"sonic_dump_random1.tar.gz" : "portsyncd;1575985", - "sonic_dump_random2.tar.gz" : "syncd;1575988"} + populate_state_db(True) with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) if "show techsupport" in cmd_str: patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") else: - return mock_generic_cmd(cmd_str) + return 1, "", "Command Not Found" return 0, "", "" cdump_mod.subprocess_exec = mock_cmd - patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") cls = cdump_mod.CoreDumpCreateHandle("orchagent.12345.123.core.gz") cls.handle_core_dump_creation_event() + handle_coredump_cleanup("orchagent.12345.123.core.gz") assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random3.tar.gz" in os.listdir(cdump_mod.TS_DIR) @@ -127,32 +90,31 @@ def mock_cmd(cmd): assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] assert "sonic_dump_random3.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] assert "orchagent" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP]["sonic_dump_random3.tar.gz"] - + def test_global_cooloff(self): """ - Scenario: AUTO_TECHSUPPORT is enabled. But global cooloff is not passed - Check if techsupport is not invoked + Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is enabled + Global cooloff is not passed yet. Check if techsupport isn't invoked. """ RedisSingleton.clearState() - set_auto_ts_cfg(state="enabled", cooloff="1") + set_auto_ts_cfg(auto_invoke_ts="enabled", cooloff="1") set_feature_table_cfg(ts="enabled") - RedisHandle.data["STATE_DB"] = {} - RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] = {"sonic_dump_random1.tar.gz" : "portsyncd;1575985", - "sonic_dump_random2.tar.gz" : "syncd;1575988"} + populate_state_db(True) with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) if "show techsupport" in cmd_str: patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") else: - return mock_generic_cmd(cmd_str) + return 1, "", "Command Not Found" return 0, "", "" cdump_mod.subprocess_exec = mock_cmd - patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") cls = cdump_mod.CoreDumpCreateHandle("orchagent.12345.123.core.gz") cls.handle_core_dump_creation_event() + handle_coredump_cleanup("orchagent.12345.123.core.gz") assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random3.tar.gz" not in os.listdir(cdump_mod.TS_DIR) @@ -162,29 +124,27 @@ def mock_cmd(cmd): def test_per_proc_cooloff(self): """ - Scenario: AUTO_TECHSUPPORT is enabled. Global Cooloff is passed but per process isn't - Check if techsupport is not invoked + Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled. Global Cooloff is passed + But Per Proc cooloff is not passed yet. Check if techsupport isn't invoked """ RedisSingleton.clearState() set_auto_ts_cfg(state="enabled", cooloff="0.25") set_feature_table_cfg(ts="enabled", cooloff="10") - RedisHandle.data["STATE_DB"] = {} - RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] = {"sonic_dump_random1.tar.gz" : "orchagent;{}".format(int(time.time())), - "sonic_dump_random2.tar.gz" : "syncd;1575988"} + populate_state_db(True) with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) if "show techsupport" in cmd_str: patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") else: - return mock_generic_cmd(cmd_str) + return 1, "", "Command Not Found" return 0, "", "" cdump_mod.subprocess_exec = mock_cmd patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") cls = cdump_mod.CoreDumpCreateHandle("orchagent.12345.123.core.gz") - time.sleep(0.5) # wait for cooloff to pass + time.sleep(0.25) # wait for global cooloff to pass cls.handle_core_dump_creation_event() assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) @@ -192,32 +152,30 @@ def mock_cmd(cmd): assert "sonic_dump_random1.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] assert "sonic_dump_random3.tar.gz" not in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - + def test_invoc_ts_after_cooloff(self): """ - Scenario: AUTO_TECHSUPPORT is enabled. Global Cooloff and per proc cooloff is passed - Check if techsupport is invoked + Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled. + All the cooloff's are passed. Check if techsupport is invoked """ RedisSingleton.clearState() set_auto_ts_cfg(state="enabled", cooloff="0.1") - set_feature_table_cfg(ts="enabled", cooloff="0.5") - RedisHandle.data["STATE_DB"] = {} - RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] = {"sonic_dump_random1.tar.gz" : "orchagent;{}".format(int(time.time())), - "sonic_dump_random2.tar.gz" : "syncd;1575988"} + set_feature_table_cfg(ts="enabled", cooloff="0.25") + populate_state_db(True) with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) if "show techsupport" in cmd_str: patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") else: - return mock_generic_cmd(cmd_str) + return 1, "", "Command Not Found" return 0, "", "" cdump_mod.subprocess_exec = mock_cmd patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") cls = cdump_mod.CoreDumpCreateHandle("orchagent.12345.123.core.gz") - time.sleep(0.5) # wait for cooloff to pass + time.sleep(0.25) # wait for all the cooloff's to pass cls.handle_core_dump_creation_event() assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) @@ -226,98 +184,137 @@ def mock_cmd(cmd): assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] assert "sonic_dump_random3.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] assert "orchagent" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP]["sonic_dump_random3.tar.gz"] - - def test_non_critical_proc(self): + + def test_core_dump_with_no_exit_event(self): """ - Scenario: AUTO_TECHSUPPORT is enabled. A Non-critical Process dump is used to invoke this script - Check if techsupport is not invoked + Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled. + Core Dump is found but no relevant exit_event entry is found in STATE_DB. """ RedisSingleton.clearState() set_auto_ts_cfg(state="enabled") set_feature_table_cfg(ts="enabled") - RedisHandle.data["STATE_DB"] = {} - RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] = {"sonic_dump_random1.tar.gz" : "portsyncd;1575985", - "sonic_dump_random2.tar.gz" : "syncd;1575988"} with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) if "show techsupport" in cmd_str: patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") else: - return mock_generic_cmd(cmd_str) + return 1, "", "Command Not Found" return 0, "", "" cdump_mod.subprocess_exec = mock_cmd - patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") - patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") patcher.fs.create_file("/var/core/snmpd.12345.123.core.gz") cls = cdump_mod.CoreDumpCreateHandle("snmpd.12345.123.core.gz") cls.handle_core_dump_creation_event() - assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) - assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random3.tar.gz" not in os.listdir(cdump_mod.TS_DIR) - assert "sonic_dump_random1.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] assert "sonic_dump_random3.tar.gz" not in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - + + def test_core_dump_with_exit_event_unknown_cmd(self): + """ + Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled. + Core Dump is found but the comm in exit_event entry is + """ + RedisSingleton.clearState() + set_auto_ts_cfg(state="enabled") + set_feature_table_cfg(ts="enabled", container_name="snmp") + populate_state_db(False, {"snmp:snmp-subagent": "123;"}) + with Patcher() as patcher: + def mock_cmd(cmd): + cmd_str = " ".join(cmd) + if "show techsupport" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + else: + return 1, "", "Command Not Found" + return 0, "", "" + cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/core/python3.12345.123.core.gz") + cls = cdump_mod.CoreDumpCreateHandle("python3.12345.123.core.gz") + cls.handle_core_dump_creation_event() + assert "sonic_dump_random3.tar.gz" in os.listdir(cdump_mod.TS_DIR) + assert "snmp-subagent" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP]["sonic_dump_random3.tar.gz"] + def test_feature_table_not_set(self): """ - Scenario: AUTO_TECHSUPPORT is enabled. A critical Process dump is used to invoke this script - But it is not enabled in FEATURE|* table. Check if techsupport is not invoked + Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled. + The auto-techsupport in Feature table is not enabled for the core-dump generated + Check if techsupport is not invoked """ RedisSingleton.clearState() set_auto_ts_cfg(state="enabled") - set_feature_table_cfg(ts="disabled", cooloff="0.2", container_name="syncd") - RedisHandle.data["STATE_DB"] = {} - RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] = {"sonic_dump_random1.tar.gz" : "portsyncd;{}".format(int(time.time())), - "sonic_dump_random2.tar.gz" : "syncd;1575988"} + set_feature_table_cfg(ts="disabled", container_name="snmp") + populate_state_db(False, {"snmp:snmp-subagent": "123;python3"}) with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) if "show techsupport" in cmd_str: patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") else: - return mock_generic_cmd(cmd_str) + return 1, "", "Command Not Found" + return 0, "", "" + cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/core/python3.12345.123.core.gz") + cls = cdump_mod.CoreDumpCreateHandle("python3.12345.123.core.gz") + cls.handle_core_dump_creation_event() + cdump_mod.handle_coredump_cleanup("python3.12345.123.core.gz") + assert "sonic_dump_random3.tar.gz" not in os.listdir(cdump_mod.TS_DIR) + + def test_since_argument(self): + """ + Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled. + Check if techsupport is invoked and since argument in properly applied + """ + RedisSingleton.clearState() + set_auto_ts_cfg(state="enabled", cooloff="0.1", since="4 days ago") + set_feature_table_cfg(ts="enabled", cooloff="0.2") + populate_state_db(True) + with Patcher() as patcher: + def mock_cmd(cmd): + cmd_str = " ".join(cmd) + if "show techsupport --since \"4 days ago\"" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + else: + return 1, "", "Invalid Command" return 0, "", "" cdump_mod.subprocess_exec = mock_cmd patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") - patcher.fs.create_file("/var/core/portsyncd.12345.123.core.gz") - cls = cdump_mod.CoreDumpCreateHandle("portsyncd.12345.123.core.gz") - time.sleep(0.2) + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") + cls = cdump_mod.CoreDumpCreateHandle("orchagent.12345.123.core.gz") + time.sleep(0.2) # wait for cooloff to pass cls.handle_core_dump_creation_event() + cdump_mod.handle_coredump_cleanup("orchagent.12345.123.core.gz") assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) - assert "sonic_dump_random3.tar.gz" not in os.listdir(cdump_mod.TS_DIR) + assert "sonic_dump_random3.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random1.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - assert "sonic_dump_random3.tar.gz" not in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - - def test_since_argument(self): + assert "sonic_dump_random3.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + assert "orchagent" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP]["sonic_dump_random3.tar.gz"] + + def test_invalid_since_argument(self): """ - Scenario: AUTO_TECHSUPPORT is enabled. Global Cooloff and per proc cooloff is passed - Check if techsupport is invoked and since argument in properly applied + Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled. + Check if techsupport is invoked and an invalid since argument in identified """ RedisSingleton.clearState() - set_auto_ts_cfg(state="enabled", cooloff="0.1", since="random") - set_feature_table_cfg(ts="enabled", cooloff="0.5") - RedisHandle.data["STATE_DB"] = {} - RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] = {"sonic_dump_random1.tar.gz" : "orchagent;{}".format(int(time.time())), - "sonic_dump_random2.tar.gz" : "syncd;1575988"} + set_auto_ts_cfg(state="enabled", cooloff="0.1", since="whatever") + set_feature_table_cfg(ts="enabled", cooloff="0.2") + populate_state_db(True) with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) if "show techsupport --since \"2 days ago\"" in cmd_str: patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") - else: - return mock_generic_cmd(cmd_str) + elif "date --date=\"whatever\"" in cmd: + return 1, "", "Invalid Date Format" return 0, "", "" cdump_mod.subprocess_exec = mock_cmd - patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") cls = cdump_mod.CoreDumpCreateHandle("orchagent.12345.123.core.gz") - time.sleep(0.5) # wait for cooloff to pass + time.sleep(0.2) # wait for cooloff to pass cls.handle_core_dump_creation_event() + cdump_mod.handle_coredump_cleanup("orchagent.12345.123.core.gz") assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random3.tar.gz" in os.listdir(cdump_mod.TS_DIR) @@ -325,4 +322,46 @@ def mock_cmd(cmd): assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] assert "sonic_dump_random3.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] assert "orchagent" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP]["sonic_dump_random3.tar.gz"] - \ No newline at end of file + + def test_core_dump_cleanup(self): + """ + Scenario: CFG_CORE_CLEANUP is enabled. core-dump limit is crossed + Verify Whether is cleanup is performed + """ + RedisSingleton.clearState() + set_auto_ts_cfg(coredump_cleanup="enabled", core_usage="5.0") + with Patcher() as patcher: + patcher.fs.set_disk_usage(1000, path="/var/core/") + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz", st_size=25) + patcher.fs.create_file("/var/core/lldpmgrd.12345.22.core.gz", st_size=25) + patcher.fs.create_file("/var/core/python3.12345.21.core.gz", st_size=25) + cdump_mod.handle_coredump_cleanup("python3.12345.21.core.gz") + current_fs = os.listdir(cdump_mod.CORE_DUMP_DIR) + assert len(current_fs) == 2 + assert "orchagent.12345.123.core.gz" not in current_fs + assert "lldpmgrd.12345.22.core.gz" in current_fs + assert "python3.12345.21.core.gz" in current_fs + + def test_core_usage_limit_not_crossed(self): + """ + Scenario: CFG_CORE_CLEANUP is enabled. core-dump limit is crossed + Verify Whether is cleanup is performed + """ + RedisSingleton.clearState() + set_auto_ts_cfg(coredump_cleanup="enabled", core_usage="5.0") + with Patcher() as patcher: + def mock_cmd(cmd): + cmd_str = " ".join(cmd) + if "show techsupport --since \"2 days ago\"" in cmd_str: + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + return 0, "", "" + patcher.fs.set_disk_usage(2000, path="/var/core/") + patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz", st_size=25) + patcher.fs.create_file("/var/core/lldpmgrd.12345.22.core.gz", st_size=25) + patcher.fs.create_file("/var/core/python3.12345.21.core.gz", st_size=25) + cdump_mod.handle_coredump_cleanup("python3.12345.21.core.gz") + current_fs = os.listdir(cdump_mod.CORE_DUMP_DIR) + assert len(current_fs) == 3 + assert "orchagent.12345.123.core.gz" in current_fs + assert "lldpmgrd.12345.22.core.gz" in current_fs + assert "python3.12345.21.core.gz" in current_fs diff --git a/tests/auto_techsupport_tests/shared_state_mock.py b/tests/auto_techsupport_tests/shared_state_mock.py index 9a978ac350..ff79a6a103 100644 --- a/tests/auto_techsupport_tests/shared_state_mock.py +++ b/tests/auto_techsupport_tests/shared_state_mock.py @@ -1,38 +1,39 @@ import re + class RedisSingleton: """ Introduced to modify/check Redis DB's data outside of the scripts Usage: Clear and Set the state of the mock before every test case """ __instance = None - - @staticmethod + + @staticmethod def getInstance(): - """ Static access method. """ - if RedisSingleton.__instance == None: - RedisSingleton() + """ Static access method.""" + if RedisSingleton.__instance is None: + RedisSingleton() return RedisSingleton.__instance - + @staticmethod def clearState(): """ Clear the Redis State """ - if RedisSingleton.__instance != None: + if RedisSingleton.__instance is not None: RedisSingleton.__instance.data.clear() - + def __init__(self): - if RedisSingleton.__instance != None: + if RedisSingleton.__instance is not None: raise Exception("This class is a singleton!") else: self.data = dict() RedisSingleton.__instance = self - + + class MockConn(object): - """ - SonicV2Connector Mock for the usecases to verify/modify the Redis State outside - of the scope of the connector class """ - def __init__(self, host): + SonicV2Connector Mock + """ + def __init__(self, **kwargs): self.redis = RedisSingleton.getInstance() def connect(self, db_name): @@ -50,45 +51,46 @@ def keys(self, db_name, pattern): for key in all_keys: if re.match(pattern, key): filtered_keys.append(key) - return filtered_keys - + return filtered_keys + def get_all(self, db_name, key): return self.redis.data[db_name].get(key, {}) - + def set(self, db_name, key, field, value, blocking=True): if key not in self.redis.data[db_name]: self.redis.data[db_name][key] = {} self.redis.data[db_name][key][field] = value - + def hmset(self, db_name, key, hash): self.redis.data[db_name][key] = hash - + def hexists(self, db_name, key, field): if key in self.redis.data[db_name]: return True else: return False - + def exists(self, db_name, key): if key in self.redis.data[db_name]: return True else: return False - + def get_redis_client(self, db_name): return MockClient(db_name) - + + class MockClient(object): def __init__(self, db_name): self.redis = RedisSingleton.getInstance() self.db_name = db_name - + def hdel(self, key, field): try: del self.redis.data[self.db_name][key][field] except: pass - + def hset(self, key, field, value): try: self.redis.data[self.db_name][key][field] = value diff --git a/tests/auto_techsupport_tests/techsupport_cleanup_test.py b/tests/auto_techsupport_tests/techsupport_cleanup_test.py index 989e6d627d..31dfc08b02 100644 --- a/tests/auto_techsupport_tests/techsupport_cleanup_test.py +++ b/tests/auto_techsupport_tests/techsupport_cleanup_test.py @@ -23,17 +23,20 @@ # Mock Handle to the data inside the Redis RedisHandle = RedisSingleton.getInstance() + def set_auto_ts_cfg(**kwargs): - state = kwargs[ts_mod.CFG_STATE] if ts_mod.CFG_STATE in kwargs else "disabled" + ts_cleanup = kwargs[ts_mod.CFG_TS_CLEANUP] if ts_mod.CFG_TS_CLEANUP in kwargs else "disabled" max_ts = kwargs[ts_mod.CFG_MAX_TS] if ts_mod.CFG_MAX_TS in kwargs else "0" - RedisHandle.data[ts_mod.CFG_DB] = {ts_mod.AUTO_TS : {ts_mod.CFG_STATE : state, ts_mod.CFG_MAX_TS : max_ts}} - + RedisHandle.data[ts_mod.CFG_DB] = {ts_mod.AUTO_TS : {ts_mod.CFG_TS_CLEANUP : ts_cleanup, + ts_mod.CFG_MAX_TS : max_ts}} + + class TestTechsupportCreationEvent(unittest.TestCase): - + def test_no_cleanup_state_disabled(self): """ - Scenario: AUTO_TECHSUPPORT is disabled. - Check no cleanup is performed, even though the techsupport limit is already crossed + Scenario: TS_CLEANUP is disabled. Check no cleanup is performed, + even though the techsupport limit is already crossed """ RedisSingleton.clearState() set_auto_ts_cfg(max_techsupport_size="5") @@ -41,7 +44,7 @@ def test_no_cleanup_state_disabled(self): patcher.fs.set_disk_usage(1000, path="/var/dump/") patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz", st_size=30) patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz", st_size=30) - patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=30) + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=30) ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz") current_fs = os.listdir(ts_mod.TS_DIR) print(current_fs) @@ -49,19 +52,19 @@ def test_no_cleanup_state_disabled(self): assert "sonic_dump_random1.tar.gz" in current_fs assert "sonic_dump_random2.tar.gz" in current_fs assert "sonic_dump_random3.tar.gz" in current_fs - + def test_no_cleanup_state_enabled(self): """ - Scenario: AUTO_TECHSUPPORT is enabled. + Scenario: TS_CLEANUP is enabled. Verify no cleanup is performed, as the techsupport limit haven't crossed yet """ RedisSingleton.clearState() - set_auto_ts_cfg(state="enabled", max_techsupport_size="10") + set_auto_ts_cfg(techsupport_cleanup="enabled", max_techsupport_size="10") with Patcher() as patcher: patcher.fs.set_disk_usage(1000, path="/var/dump/") patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz", st_size=30) patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz", st_size=30) - patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=30) + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=30) ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz") current_fs = os.listdir(ts_mod.TS_DIR) print(current_fs) @@ -69,41 +72,41 @@ def test_no_cleanup_state_enabled(self): assert "sonic_dump_random1.tar.gz" in current_fs assert "sonic_dump_random2.tar.gz" in current_fs assert "sonic_dump_random3.tar.gz" in current_fs - + def test_dump_cleanup(self): """ - Scenario: AUTO_TECHSUPPORT is enabled. techsupport size limit is crosed + Scenario: TS_CLEANUP is enabled. techsupport size limit is crosed Verify Whether is cleanup is performed or not """ RedisSingleton.clearState() - set_auto_ts_cfg(state="enabled", max_techsupport_size="5") + set_auto_ts_cfg(techsupport_cleanup="enabled", max_techsupport_size="5") with Patcher() as patcher: patcher.fs.set_disk_usage(1000, path="/var/dump/") patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz", st_size=25) patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz", st_size=25) - patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=25) + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=25) ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz") current_fs = os.listdir(ts_mod.TS_DIR) assert len(current_fs) == 2 assert "sonic_dump_random1.tar.gz" not in current_fs assert "sonic_dump_random2.tar.gz" in current_fs assert "sonic_dump_random3.tar.gz" in current_fs - + def test_state_db_update(self): """ - Scenario: AUTO_TECHSUPPORT is enabled. techsupport size limit is crosed + Scenario: TS_CLEANUP is enabled. techsupport size limit is crosed Verify Whether is cleanup is performed and the state_db is updated """ RedisSingleton.clearState() - set_auto_ts_cfg(state="enabled", max_techsupport_size="5") + set_auto_ts_cfg(techsupport_cleanup="enabled", max_techsupport_size="5") RedisHandle.data["STATE_DB"] = {} - RedisHandle.data["STATE_DB"][ts_mod.TS_MAP] = {"sonic_dump_random1.tar.gz" : "orchagent;1575985", - "sonic_dump_random2.tar.gz" : "syncd;1575988"} + RedisHandle.data["STATE_DB"][ts_mod.TS_MAP] = {"sonic_dump_random1.tar.gz": "orchagent;1575985", + "sonic_dump_random2.tar.gz": "syncd;1575988"} with Patcher() as patcher: patcher.fs.set_disk_usage(1000, path="/var/dump/") patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz", st_size=25) patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz", st_size=25) - patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=25) + patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=25) ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz") current_fs = os.listdir(ts_mod.TS_DIR) print(current_fs) diff --git a/utilities_common/auto_techsupport_helper.py b/utilities_common/auto_techsupport_helper.py index d61d191f1c..47aa8b5eb9 100644 --- a/utilities_common/auto_techsupport_helper.py +++ b/utilities_common/auto_techsupport_helper.py @@ -51,6 +51,8 @@ TIME_BUF = 20 SINCE_DEFAULT = "2 days ago" +NO_COMM = "" + ##### Helper methods def subprocess_exec(cmd): From 195b5adb443c6581d8e8ca7c5b17ee9e44f93990 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Tue, 17 Aug 2021 00:20:13 +0000 Subject: [PATCH 19/60] Updated the Script and UT's Signed-off-by: Vivek Reddy Karri --- scripts/coredump_gen_handler | 149 +++++++++--------- .../coredump_gen_handler_test.py | 86 ++++++---- utilities_common/auto_techsupport_helper.py | 37 +++-- 3 files changed, 153 insertions(+), 119 deletions(-) diff --git a/scripts/coredump_gen_handler b/scripts/coredump_gen_handler index a9254f2ca0..d0d8d4de0f 100644 --- a/scripts/coredump_gen_handler +++ b/scripts/coredump_gen_handler @@ -2,8 +2,8 @@ """ coredump_gen_handler script. - This script is invoked by the coredump-compress script - for auto techsupport invocation and cleanup core dumps. + This script is invoked by the coredump-compress script + for auto techsupport invocation and cleanup core dumps. For more info, refer to the Event Driven TechSupport & CoreDump Mgmt HLD """ import os, re @@ -18,28 +18,28 @@ from utilities_common.auto_techsupport_helper import * def handle_coredump_cleanup(dump_name): - db = SonicV2Connector(host="127.0.0.1") + db = SonicV2Connector(use_unix_socket_path=True) db.connect(CFG_DB) if db.get(CFG_DB, AUTO_TS, CFG_CORE_CLEANUP) != "enabled": return - core_usage = db.get(CFG_DB, AUTO_TS, CFG_MAX_TS) + core_usage = db.get(CFG_DB, AUTO_TS, CFG_CORE_USAGE) try: core_usage = float(core_usage) - except: + except Exception as e: core_usage = 0.0 if not core_usage: - _ , num_bytes = get_stats(os.path.join(CORE_DUMP_DIR, CORE_DUMP_PTRN)) + _, num_bytes = get_stats(os.path.join(CORE_DUMP_DIR, CORE_DUMP_PTRN)) syslog.syslog(syslog.LOG_INFO, "No Cleanup is performed, current size occupied: {}".format(pretty_size(num_bytes))) return - cleanup_process(max_ts, CORE_DUMP_DIR, CORE_DUMP_PTRN) + cleanup_process(core_usage, CORE_DUMP_PTRN, CORE_DUMP_DIR) class CriticalProcCoreDumpHandle(): """ - Class to handle coredump creation event for critical processes inside the docker + Class to handle coredump creation event for critical processes """ def __init__(self, core_name): self.core_name = core_name @@ -50,80 +50,79 @@ class CriticalProcCoreDumpHandle(): def handle_core_dump_creation_event(self): file_path = os.path.join(CORE_DUMP_DIR, self.core_name) - if not verify_recent_file_creation(file_path): + if not verify_recent_file_creation(file_path): return - self.db = SonicV2Connector(host="127.0.0.1") + self.db = SonicV2Connector(use_unix_socket_path=True) self.db.connect(CFG_DB) self.db.connect(STATE_DB) - if self.db.get(CFG_DB, AUTO_TS, CFG_STATE) != "enabled": + if self.db.get(CFG_DB, AUTO_TS, CFG_INVOC_TS) != "enabled": return - self.fetch_critical_procs() - proc = self.core_name.split(".")[0] - if proc not in self.proc_mp: - print(self.proc_mp) - return # Only handles the critical processes - FEATURE_KEY = FEATURE.format(self.proc_mp[proc]) - if self.db.get(CFG_DB, FEATURE_KEY, llTS) != "enabled": - return # Should be set "enabled" in the FEATURE Table + container_name, process_name = self.fetch_exit_event() + if not (process_name and container_name): + msg = "No Corresponding Exit Event was found for {}. Techsupport Invocation is skipped".format(self.core_name) + syslog.syslog(syslog.LOG_INFO, msg) + return + + FEATURE_KEY = FEATURE.format(container_name) + if self.db.get(CFG_DB, FEATURE_KEY, TS) != "enabled": + msg = "auto-techsupport feature for {} is not enabled. Techsupport Invocation is skipped. core: {}" + syslog.syslog(syslog.LOG_NOTICE, msg.format(container_name, self.core_name)) + return global_cooloff = self.db.get(CFG_DB, AUTO_TS, COOLOFF) proc_cooloff = self.db.get(CFG_DB, FEATURE_KEY, COOLOFF) - cooloff_passed = self.verify_cooloff(global_cooloff, proc_cooloff, proc) + try: + global_cooloff = float(global_cooloff) + except: + global_cooloff = 0.0 + + try: + proc_cooloff = float(proc_cooloff) + except: + proc_cooloff = 0.0 + + cooloff_passed = self.verify_cooloff(global_cooloff, proc_cooloff, process_name) if cooloff_passed: since_cfg = self.get_since_arg() new_file = self.invoke_ts_cmd(since_cfg) if new_file: - self.db.set(STATE_DB, TS_MAP, os.path.basename(new_file[0]), "{};{}".format(self.core_name, int(time.time()))) - - core_usage = 0 - if self.db.hexists(CFG_DB, AUTO_TS, CFG_CORE_USAGE): - core_usage = self.db.get(CFG_DB, AUTO_TS, CFG_CORE_USAGE) - try: - core_usage = int(core_usage) - except: - core_usage = 0 - - if core_usage == 0: - _ , num_bytes = get_stats(os.path.join(CORE_DUMP_DIR, CORE_DUMP_PTRN)) - syslog.syslog(syslog.LOG_INFO, "No Cleanup is performed, current size occupied: {}".format(pretty_size(num_bytes))) - return - - cleanup_process(core_usage, CORE_DUMP_PTRN, CORE_DUMP_DIR) + field = os.path.basename(new_file[0]) + value = "{};{};{}".format(self.core_name, int(time.time()), process_name) + self.db.set(STATE_DB, TS_MAP, field, value) def get_since_arg(self): since_cfg = self.db.get(CFG_DB, AUTO_TS, CFG_SINCE) if not since_cfg: return SINCE_DEFAULT - rc, _, _ = subprocess_exec(["date", "--date=\"{}\"".format(since_cfg)]) - if rc != 0: + rc, _, stderr = subprocess_exec(["date", "--date='{}'".format(since_cfg)]) + if rc == 0: return since_cfg return SINCE_DEFAULT def invoke_ts_cmd(self, since_cfg): - since_cfg = "\"" + since_cfg + "\"" - _, out, _ = subprocess_exec(["show", "techsupport", "--since", since_cfg]) + since_cfg = "'" + since_cfg + "'" + cmd = " ".join(["show", "techsupport", "--since", since_cfg]) + _, _, _ = subprocess_exec(["show", "techsupport", "--since", since_cfg]) new_list = get_ts_dumps(True) diff = list(set(new_list).difference(set(self.curr_ts_list))) self.curr_ts_list = new_list if len(diff) == 0: - syslog.syslog(syslog.LOG_ERR, "'show techsupport' invocation was successful but no TechSupport Dump was found") + syslog.syslog(syslog.LOG_ERR, "{} was run, but no techsupport dump is found".format(cmd)) else: - syslog.syslog(syslog.LOG_INFO, "'show techsupport' invocation is successful, {} is created".format(diff)) + syslog.syslog(syslog.LOG_INFO, "{} is successful, {} is created".format(cmd, diff)) return diff def verify_cooloff(self, global_cooloff, proc_cooloff, proc): """Verify both the global cooloff and per-proc cooloff has passed""" self.curr_ts_list = get_ts_dumps(True) - if global_cooloff and len(self.curr_ts_list) != 0: - global_cooloff = float(global_cooloff) + if global_cooloff and self.curr_ts_list: last_ts_dump_creation = os.path.getmtime(self.curr_ts_list[-1]) - print(last_ts_dump_creation, global_cooloff, time.time()) if time.time() - last_ts_dump_creation < global_cooloff: - print("Reached Here!!!@") - syslog.syslog(syslog.LOG_INFO, "Cooloff period has not yet passed. No Techsupport Invocation is performed ") + msg = "Global Cooloff period has not passed. Techsupport Invocation is skipped. Core: {}" + syslog.syslog(syslog.LOG_INFO, msg.format(self.core_name)) return False ts_map = self.db.get_all(STATE_DB, TS_MAP) @@ -131,36 +130,44 @@ class CriticalProcCoreDumpHandle(): if proc_cooloff and proc in self.core_ts_map: last_creation_time = self.core_ts_map[proc][0][0] - proc_cooloff = float(proc_cooloff) if time.time() - last_creation_time < proc_cooloff: - syslog.syslog(syslog.LOG_INFO, "Cooloff period for {} prcess has not yet passed. No Techsupport Invocation is performed".format(proc)) + msg = "Process Cooloff period for {} has not passed. Techsupport Invocation is skipped. Core: {}" + syslog.syslog(syslog.LOG_INFO, msg.format(proc, self.core_name)) return False return True def parse_ts_map(self, ts_map): - """Create core_dump, ts_dump & creation_time map""" - print(ts_map) + """Create proc_name, ts_dump & creation_time map""" for ts_dump, tup in ts_map.items(): - core_dump, creation_time = tup.split(";") - if core_dump not in self.core_ts_map: - self.core_ts_map[core_dump] = [] - self.core_ts_map[core_dump].append((int(creation_time), ts_dump)) - for core_dump in self.core_ts_map: - self.core_ts_map[core_dump].sort() - - def fetch_critical_procs(self): - """Fetches the critical_procs and corresponding docker names""" - keys = self.db.keys(CFG_DB, FEATURE.format("*")) - containers = [key.split("|")[-1] for key in keys] - print(keys, containers) - for container in containers: - rc, stdout, _ = subprocess_exec(["docker", "exec", "-t", container, "cat", "/etc/supervisor/critical_processes"]) - if rc != 0: - continue - procs = stdout.split() - for proc in procs: - if re.match("program:*", proc): - self.proc_mp[proc.split(":")[-1]] = container + core_dump, creation_time, proc_name = tup.split(";") + if proc_name not in self.core_ts_map: + self.core_ts_map[proc_name] = [] + self.core_ts_map[proc_name].append((int(creation_time), ts_dump)) + for proc_name in self.core_ts_map: + self.core_ts_map[proc_name].sort() + + def fetch_exit_event(self): + """Fetch the relevant entry in the AUTO_TECHSUPPORT|PROC_EXIT_EVENTS table""" + comm, _, pid, _, _ = self.core_name.split(".") + feature_name, supervisor_proc_name = "", "" + start = time.time() + while time.time() - start <= WAIT_BUFFER: + data = self.db.get_all("STATE_DB", CRITICAL_PROC) + for field in data: + try: + pid_, comm_ = data[field].split(";") + if pid_ == pid and comm in comm_: + feature_name, supervisor_proc_name = field.split(";") + break + elif comm_ == NO_COMM and pid_ == pid: + feature_name, supervisor_proc_name = field.split(";") + continue + except Exception as e: + continue + if feature_name and supervisor_proc_name: + break + time.sleep(SLEEP_FOR) + return feature_name, supervisor_proc_name def main(): diff --git a/tests/auto_techsupport_tests/coredump_gen_handler_test.py b/tests/auto_techsupport_tests/coredump_gen_handler_test.py index 06de7768cb..462f12ffa1 100644 --- a/tests/auto_techsupport_tests/coredump_gen_handler_test.py +++ b/tests/auto_techsupport_tests/coredump_gen_handler_test.py @@ -24,6 +24,12 @@ RedisHandle = RedisSingleton.getInstance() +def mock_syslog(level, msg): + print("SYSLOG: " + msg) + +cdump_mod.syslog.syslog = mock_syslog + + def set_auto_ts_cfg(**kwargs): invoke_ts = kwargs[cdump_mod.CFG_INVOC_TS] if cdump_mod.CFG_INVOC_TS in kwargs else "disabled" core_cleanup = kwargs[cdump_mod.CFG_CORE_CLEANUP] if cdump_mod.CFG_CORE_CLEANUP in kwargs else "disabled" @@ -32,7 +38,7 @@ def set_auto_ts_cfg(**kwargs): since_cfg = kwargs[cdump_mod.CFG_SINCE] if cdump_mod.CFG_SINCE in kwargs else "None" if cdump_mod.CFG_DB not in RedisHandle.data: RedisHandle.data[cdump_mod.CFG_DB] = {} - RedisHandle.data[cdump_mod.CFG_DB][cdump_mod.AUTO_TS] = {cdump_mod.CFG_INVOC_TS: state, + RedisHandle.data[cdump_mod.CFG_DB][cdump_mod.AUTO_TS] = {cdump_mod.CFG_INVOC_TS: invoke_ts, cdump_mod.COOLOFF: cooloff, cdump_mod.CFG_CORE_USAGE: core_usage, cdump_mod.CFG_CORE_CLEANUP: core_cleanup, @@ -48,17 +54,23 @@ def set_feature_table_cfg(ts="disabled", cooloff="0", container_name="swss"): def populate_state_db(use_default=True, data=None): if use_default: - data = {cdump_mod.TS_MAP: {"sonic_dump_random1.tar.gz": "portsyncd;1575985", - "sonic_dump_random2.tar.gz": "syncd;1575988"}, - cdump_mod.CRITICAL_PROC: {"swss:orchagent": "123:orchagent"}} - if cdump_mod.CFG_DB not in RedisHandle.data: + data = {cdump_mod.TS_MAP: {"sonic_dump_random1.tar.gz": "portsyncd;1575985;portsyncd", + "sonic_dump_random2.tar.gz": "syncd;1575988;syncd"}, + cdump_mod.CRITICAL_PROC: {"swss;orchagent": "123;orchagent"}} + if cdump_mod.STATE_DB not in RedisHandle.data: RedisHandle.data[cdump_mod.STATE_DB] = {} + RedisHandle.data[cdump_mod.STATE_DB][cdump_mod.TS_MAP] = {} + RedisHandle.data[cdump_mod.STATE_DB][cdump_mod.CRITICAL_PROC] = {} for key in data: RedisHandle.data[cdump_mod.STATE_DB][key] = data[key] class TestCoreDumpCreationEvent(unittest.TestCase): + def setUp(self): + cdump_mod.WAIT_BUFFER = 1 + cdump_mod.SLEEP_FOR = 0.25 + def test_invoc_ts_state_db_update(self): """ Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled and no cooloff is provided @@ -80,9 +92,9 @@ def mock_cmd(cmd): patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") - cls = cdump_mod.CoreDumpCreateHandle("orchagent.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz") cls.handle_core_dump_creation_event() - handle_coredump_cleanup("orchagent.12345.123.core.gz") + cdump_mod.handle_coredump_cleanup("orchagent.12345.123.core.gz") assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random3.tar.gz" in os.listdir(cdump_mod.TS_DIR) @@ -112,16 +124,16 @@ def mock_cmd(cmd): patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") - cls = cdump_mod.CoreDumpCreateHandle("orchagent.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz") cls.handle_core_dump_creation_event() - handle_coredump_cleanup("orchagent.12345.123.core.gz") + cdump_mod.handle_coredump_cleanup("orchagent.12345.123.core.gz") assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random3.tar.gz" not in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random1.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] assert "sonic_dump_random3.tar.gz" not in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - + def test_per_proc_cooloff(self): """ Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled. Global Cooloff is passed @@ -143,7 +155,7 @@ def mock_cmd(cmd): patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") - cls = cdump_mod.CoreDumpCreateHandle("orchagent.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz") time.sleep(0.25) # wait for global cooloff to pass cls.handle_core_dump_creation_event() assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) @@ -159,7 +171,7 @@ def test_invoc_ts_after_cooloff(self): All the cooloff's are passed. Check if techsupport is invoked """ RedisSingleton.clearState() - set_auto_ts_cfg(state="enabled", cooloff="0.1") + set_auto_ts_cfg(auto_invoke_ts="enabled", cooloff="0.1") set_feature_table_cfg(ts="enabled", cooloff="0.25") populate_state_db(True) with Patcher() as patcher: @@ -171,11 +183,11 @@ def mock_cmd(cmd): return 1, "", "Command Not Found" return 0, "", "" cdump_mod.subprocess_exec = mock_cmd - patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") - cls = cdump_mod.CoreDumpCreateHandle("orchagent.12345.123.core.gz") - time.sleep(0.25) # wait for all the cooloff's to pass + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz") + time.sleep(0.25) # wait for all the cooloff's to pass cls.handle_core_dump_creation_event() assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) @@ -191,8 +203,9 @@ def test_core_dump_with_no_exit_event(self): Core Dump is found but no relevant exit_event entry is found in STATE_DB. """ RedisSingleton.clearState() - set_auto_ts_cfg(state="enabled") + set_auto_ts_cfg(auto_invoke_ts="enabled") set_feature_table_cfg(ts="enabled") + populate_state_db(False, {}) with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) @@ -202,8 +215,9 @@ def mock_cmd(cmd): return 1, "", "Command Not Found" return 0, "", "" cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/core/snmpd.12345.123.core.gz") - cls = cdump_mod.CoreDumpCreateHandle("snmpd.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("snmpd.12345.123.core.gz") cls.handle_core_dump_creation_event() assert "sonic_dump_random3.tar.gz" not in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random3.tar.gz" not in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] @@ -214,9 +228,9 @@ def test_core_dump_with_exit_event_unknown_cmd(self): Core Dump is found but the comm in exit_event entry is """ RedisSingleton.clearState() - set_auto_ts_cfg(state="enabled") + set_auto_ts_cfg(auto_invoke_ts="enabled") set_feature_table_cfg(ts="enabled", container_name="snmp") - populate_state_db(False, {"snmp:snmp-subagent": "123;"}) + populate_state_db(False, {cdump_mod.CRITICAL_PROC: {"snmp;snmp-subagent": "123;"}}) with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) @@ -226,8 +240,9 @@ def mock_cmd(cmd): return 1, "", "Command Not Found" return 0, "", "" cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/core/python3.12345.123.core.gz") - cls = cdump_mod.CoreDumpCreateHandle("python3.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("python3.12345.123.core.gz") cls.handle_core_dump_creation_event() assert "sonic_dump_random3.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "snmp-subagent" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP]["sonic_dump_random3.tar.gz"] @@ -239,7 +254,7 @@ def test_feature_table_not_set(self): Check if techsupport is not invoked """ RedisSingleton.clearState() - set_auto_ts_cfg(state="enabled") + set_auto_ts_cfg(auto_invoke_ts="enabled") set_feature_table_cfg(ts="disabled", container_name="snmp") populate_state_db(False, {"snmp:snmp-subagent": "123;python3"}) with Patcher() as patcher: @@ -251,8 +266,9 @@ def mock_cmd(cmd): return 1, "", "Command Not Found" return 0, "", "" cdump_mod.subprocess_exec = mock_cmd + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/core/python3.12345.123.core.gz") - cls = cdump_mod.CoreDumpCreateHandle("python3.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("python3.12345.123.core.gz") cls.handle_core_dump_creation_event() cdump_mod.handle_coredump_cleanup("python3.12345.123.core.gz") assert "sonic_dump_random3.tar.gz" not in os.listdir(cdump_mod.TS_DIR) @@ -263,22 +279,24 @@ def test_since_argument(self): Check if techsupport is invoked and since argument in properly applied """ RedisSingleton.clearState() - set_auto_ts_cfg(state="enabled", cooloff="0.1", since="4 days ago") + set_auto_ts_cfg(auto_invoke_ts="enabled", cooloff="0.1", since="4 days ago") set_feature_table_cfg(ts="enabled", cooloff="0.2") populate_state_db(True) with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) - if "show techsupport --since \"4 days ago\"" in cmd_str: + if "show techsupport --since '4 days ago'" in cmd_str: patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") + return 0, "", "" + elif "date --date='4 days ago'" in cmd_str: + return 0, "", "" else: return 1, "", "Invalid Command" - return 0, "", "" cdump_mod.subprocess_exec = mock_cmd patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") - cls = cdump_mod.CoreDumpCreateHandle("orchagent.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz") time.sleep(0.2) # wait for cooloff to pass cls.handle_core_dump_creation_event() cdump_mod.handle_coredump_cleanup("orchagent.12345.123.core.gz") @@ -296,22 +314,24 @@ def test_invalid_since_argument(self): Check if techsupport is invoked and an invalid since argument in identified """ RedisSingleton.clearState() - set_auto_ts_cfg(state="enabled", cooloff="0.1", since="whatever") + set_auto_ts_cfg(auto_invoke_ts="enabled", cooloff="0.1", since="whatever") set_feature_table_cfg(ts="enabled", cooloff="0.2") populate_state_db(True) with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) - if "show techsupport --since \"2 days ago\"" in cmd_str: + if "show techsupport --since '2 days ago'" in cmd_str: patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") - elif "date --date=\"whatever\"" in cmd: + return 0, "", "" + elif "date --date='whatever'" in cmd_str: return 1, "", "Invalid Date Format" - return 0, "", "" + else: + return 1, "", "" cdump_mod.subprocess_exec = mock_cmd patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") - cls = cdump_mod.CoreDumpCreateHandle("orchagent.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz") time.sleep(0.2) # wait for cooloff to pass cls.handle_core_dump_creation_event() cdump_mod.handle_coredump_cleanup("orchagent.12345.123.core.gz") @@ -329,7 +349,7 @@ def test_core_dump_cleanup(self): Verify Whether is cleanup is performed """ RedisSingleton.clearState() - set_auto_ts_cfg(coredump_cleanup="enabled", core_usage="5.0") + set_auto_ts_cfg(coredump_cleanup="enabled", core_usage="6.0") with Patcher() as patcher: patcher.fs.set_disk_usage(1000, path="/var/core/") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz", st_size=25) @@ -352,7 +372,7 @@ def test_core_usage_limit_not_crossed(self): with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) - if "show techsupport --since \"2 days ago\"" in cmd_str: + if "show techsupport" in cmd_str: patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz") return 0, "", "" patcher.fs.set_disk_usage(2000, path="/var/core/") diff --git a/utilities_common/auto_techsupport_helper.py b/utilities_common/auto_techsupport_helper.py index 47aa8b5eb9..efa3cdbe4b 100644 --- a/utilities_common/auto_techsupport_helper.py +++ b/utilities_common/auto_techsupport_helper.py @@ -52,9 +52,11 @@ TIME_BUF = 20 SINCE_DEFAULT = "2 days ago" NO_COMM = "" +WAIT_BUFFER = 40 +SLEEP_FOR = 4 -##### Helper methods +##### Helper methods def subprocess_exec(cmd): output = subprocess.run( cmd, @@ -63,6 +65,7 @@ def subprocess_exec(cmd): ) return output.returncode, output.stdout, output.stderr + def get_ts_dumps(full_path=False): """ Get the list of TS dumps in the TS_DIR, sorted by the creation time """ curr_list = glob.glob(os.path.join(TS_DIR, TS_PTRN)) @@ -71,6 +74,7 @@ def get_ts_dumps(full_path=False): return curr_list return [os.path.basename(name) for name in curr_list] + def verify_recent_file_creation(file_path, in_last_sec=TIME_BUF): """ Verify if the file exists and is created within the last TIME_BUF sec """ curr = time.time() @@ -79,10 +83,11 @@ def verify_recent_file_creation(file_path, in_last_sec=TIME_BUF): except: return False if curr - was_created_on < in_last_sec: - return True + return True else: return False + def get_stats(ptrn, collect_stats=True): """ Returns the size of the files (matched by the ptrn) occupied. @@ -98,17 +103,18 @@ def get_stats(ptrn, collect_stats=True): total_size += file_size if collect_stats: # Sort by the Descending order of file_creation_time, size_of_file - file_stats = sorted(file_stats, key = lambda sub: (-sub[0], sub[1], sub[2])) + file_stats = sorted(file_stats, key=lambda sub: (-sub[0], sub[1], sub[2])) return (file_stats, total_size) + def pretty_size(bytes): """Get human-readable file sizes""" UNITS_MAPPING = [ - (1<<50, ' PB'), - (1<<40, ' TB'), - (1<<30, ' GB'), - (1<<20, ' MB'), - (1<<10, ' KB'), + (1 << 50, ' PB'), + (1 << 40, ' TB'), + (1 << 30, ' GB'), + (1 << 20, ' MB'), + (1 << 10, ' KB'), (1, (' byte', ' bytes')), ] for factor, suffix in UNITS_MAPPING: @@ -124,20 +130,21 @@ def pretty_size(bytes): suffix = multiple return str(amount) + suffix + def cleanup_process(limit, file_ptrn, dir): """Deletes the oldest files incrementally until the size is under limit""" - if not(1 <= limit and limit <= 100): + if not(0 < limit and limit < 100): syslog.syslog(syslog.LOG_ERR, "core_usage_limit can only be between 1 and 100, whereas the configured value is: {}".format(limit)) - return - + return + fs_stats, curr_size = get_stats(os.path.join(dir, file_ptrn)) orig_dumps = len(fs_stats) - disk_stats = shutil.disk_usage(dir) + disk_stats = shutil.disk_usage(dir) max_limit_bytes = math.floor((limit*disk_stats.total/100)) - + if curr_size <= max_limit_bytes: - return - + return + num_bytes_to_del = curr_size - max_limit_bytes num_deleted = 0 removed_files = [] From 9a50c0fcc4d94126bdd6af66a5d427bab7a217ca Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Tue, 17 Aug 2021 02:14:32 +0000 Subject: [PATCH 20/60] Minor Change to test Signed-off-by: Vivek Reddy Karri --- tests/auto_techsupport_tests/coredump_gen_handler_test.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/auto_techsupport_tests/coredump_gen_handler_test.py b/tests/auto_techsupport_tests/coredump_gen_handler_test.py index 462f12ffa1..ffed2c0e8d 100644 --- a/tests/auto_techsupport_tests/coredump_gen_handler_test.py +++ b/tests/auto_techsupport_tests/coredump_gen_handler_test.py @@ -24,12 +24,6 @@ RedisHandle = RedisSingleton.getInstance() -def mock_syslog(level, msg): - print("SYSLOG: " + msg) - -cdump_mod.syslog.syslog = mock_syslog - - def set_auto_ts_cfg(**kwargs): invoke_ts = kwargs[cdump_mod.CFG_INVOC_TS] if cdump_mod.CFG_INVOC_TS in kwargs else "disabled" core_cleanup = kwargs[cdump_mod.CFG_CORE_CLEANUP] if cdump_mod.CFG_CORE_CLEANUP in kwargs else "disabled" From 3e66b70ef5cb13f5e3e28ac19bb6e6b7087d5103 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Tue, 17 Aug 2021 06:35:33 +0000 Subject: [PATCH 21/60] Auto GEN CLI's added Signed-off-by: Vivek Reddy Karri --- config/plugins/auto_techsupport.py | 374 +++++++++++++++++++++++++++++ show/plugins/auto_techsupport.py | 132 ++++++++++ 2 files changed, 506 insertions(+) create mode 100644 config/plugins/auto_techsupport.py create mode 100644 show/plugins/auto_techsupport.py diff --git a/config/plugins/auto_techsupport.py b/config/plugins/auto_techsupport.py new file mode 100644 index 0000000000..7db3bf7fa5 --- /dev/null +++ b/config/plugins/auto_techsupport.py @@ -0,0 +1,374 @@ +""" +Autogenerated config CLI plugin. + + +""" + +import click +import utilities_common.cli as clicommon +import utilities_common.general as general +from config import config_mgmt + + +# Load sonic-cfggen from source since /usr/local/bin/sonic-cfggen does not have .py extension. +sonic_cfggen = general.load_module_from_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') + + +def exit_with_error(*args, **kwargs): + """ Print a message and abort CLI. """ + + click.secho(*args, **kwargs) + raise click.Abort() + + +def validate_config_or_raise(cfg): + """ Validate config db data using ConfigMgmt """ + + try: + cfg = sonic_cfggen.FormatConverter.to_serialized(cfg) + config_mgmt.ConfigMgmt().loadData(cfg) + except Exception as err: + raise Exception('Failed to validate configuration: {}'.format(err)) + + +def add_entry_validated(db, table, key, data): + """ Add new entry in table and validate configuration """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key in cfg[table]: + raise Exception(f"{key} already exists") + + cfg[table][key] = data + + validate_config_or_raise(cfg) + db.set_entry(table, key, data) + + +def update_entry_validated(db, table, key, data, create_if_not_exists=False): + """ Update entry in table and validate configuration. + If attribute value in data is None, the attribute is deleted. + """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + + if create_if_not_exists: + cfg[table].setdefault(key, {}) + + if key not in cfg[table]: + raise Exception(f"{key} does not exist") + + for attr, value in data.items(): + if value is None and attr in cfg[table][key]: + cfg[table][key].pop(attr) + else: + cfg[table][key][attr] = value + + validate_config_or_raise(cfg) + db.set_entry(table, key, cfg[table][key]) + + +def del_entry_validated(db, table, key): + """ Delete entry in table and validate configuration """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key not in cfg[table]: + raise Exception(f"{key} does not exist") + + cfg[table].pop(key) + + validate_config_or_raise(cfg) + db.set_entry(table, key, None) + + +def add_list_entry_validated(db, table, key, attr, data): + """ Add new entry into list in table and validate configuration""" + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key not in cfg[table]: + raise Exception(f"{key} does not exist") + cfg[table][key].setdefault(attr, []) + for entry in data: + if entry in cfg[table][key][attr]: + raise Exception(f"{entry} already exists") + cfg[table][key][attr].append(entry) + + validate_config_or_raise(cfg) + db.set_entry(table, key, cfg[table][key]) + + +def del_list_entry_validated(db, table, key, attr, data): + """ Delete entry from list in table and validate configuration""" + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key not in cfg[table]: + raise Exception(f"{key} does not exist") + cfg[table][key].setdefault(attr, []) + for entry in data: + if entry not in cfg[table][key][attr]: + raise Exception(f"{entry} does not exist") + cfg[table][key][attr].remove(entry) + if not cfg[table][key][attr]: + cfg[table][key].pop(attr) + + validate_config_or_raise(cfg) + db.set_entry(table, key, cfg[table][key]) + + +def clear_list_entry_validated(db, table, key, attr): + """ Clear list in object and validate configuration""" + + update_entry_validated(db, table, key, {attr: None}) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@click.group(name="auto-techsupport", + cls=clicommon.AliasedGroup) +def AUTO_TECHSUPPORT(): + """ AUTO_TECHSUPPORT part of config_db.json """ + + pass + + + + +@AUTO_TECHSUPPORT.group(name="global", + cls=clicommon.AliasedGroup) +@clicommon.pass_db +def AUTO_TECHSUPPORT_global(db): + """ """ + + pass + + + + +@AUTO_TECHSUPPORT_global.command(name="auto-invoke-ts") + +@click.argument( + "auto-invoke-ts", + nargs=1, + required=True, +) +@clicommon.pass_db +def AUTO_TECHSUPPORT_global_auto_invoke_ts(db, auto_invoke_ts): + """ """ + + table = "AUTO_TECHSUPPORT" + key = "global" + data = { + "auto_invoke_ts": auto_invoke_ts, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + + +@AUTO_TECHSUPPORT_global.command(name="coredump-cleanup") + +@click.argument( + "coredump-cleanup", + nargs=1, + required=True, +) +@clicommon.pass_db +def AUTO_TECHSUPPORT_global_coredump_cleanup(db, coredump_cleanup): + """ """ + + table = "AUTO_TECHSUPPORT" + key = "global" + data = { + "coredump_cleanup": coredump_cleanup, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + + +@AUTO_TECHSUPPORT_global.command(name="techsupport-cleanup") + +@click.argument( + "techsupport-cleanup", + nargs=1, + required=True, +) +@clicommon.pass_db +def AUTO_TECHSUPPORT_global_techsupport_cleanup(db, techsupport_cleanup): + """ """ + + table = "AUTO_TECHSUPPORT" + key = "global" + data = { + "techsupport_cleanup": techsupport_cleanup, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + + +@AUTO_TECHSUPPORT_global.command(name="cooloff") + +@click.argument( + "cooloff", + nargs=1, + required=True, +) +@clicommon.pass_db +def AUTO_TECHSUPPORT_global_cooloff(db, cooloff): + """ """ + + table = "AUTO_TECHSUPPORT" + key = "global" + data = { + "cooloff": cooloff, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + + +@AUTO_TECHSUPPORT_global.command(name="max-techsupport-size") + +@click.argument( + "max-techsupport-size", + nargs=1, + required=True, +) +@clicommon.pass_db +def AUTO_TECHSUPPORT_global_max_techsupport_size(db, max_techsupport_size): + """ """ + + table = "AUTO_TECHSUPPORT" + key = "global" + data = { + "max_techsupport_size": max_techsupport_size, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + + +@AUTO_TECHSUPPORT_global.command(name="core-usage") + +@click.argument( + "core-usage", + nargs=1, + required=True, +) +@clicommon.pass_db +def AUTO_TECHSUPPORT_global_core_usage(db, core_usage): + """ """ + + table = "AUTO_TECHSUPPORT" + key = "global" + data = { + "core_usage": core_usage, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + + +@AUTO_TECHSUPPORT_global.command(name="since") + +@click.argument( + "since", + nargs=1, + required=True, +) +@clicommon.pass_db +def AUTO_TECHSUPPORT_global_since(db, since): + """ """ + + table = "AUTO_TECHSUPPORT" + key = "global" + data = { + "since": since, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + + + + + + + + + + + + + + + + + + + + + + + + + + + +def register(cli): + cli_node = AUTO_TECHSUPPORT + if cli_node.name in cli.commands: + raise Exception(f"{cli_node.name} already exists in CLI") + cli.add_command(AUTO_TECHSUPPORT) \ No newline at end of file diff --git a/show/plugins/auto_techsupport.py b/show/plugins/auto_techsupport.py new file mode 100644 index 0000000000..a11bdcb11a --- /dev/null +++ b/show/plugins/auto_techsupport.py @@ -0,0 +1,132 @@ +""" +Auto-generated show CLI plugin. + + +""" + +import click +import tabulate +import natsort +import utilities_common.cli as clicommon + + + + + +def format_attr_value(entry, attr): + """ Helper that formats attribute to be presented in the table output. + + Args: + entry (Dict[str, str]): CONFIG DB entry configuration. + attr (Dict): Attribute metadata. + + Returns: + str: fomatted attribute value. + """ + + if attr["is-leaf-list"]: + return "\n".join(entry.get(attr["name"], [])) + return entry.get(attr["name"], "N/A") + + +def format_group_value(entry, attrs): + """ Helper that formats grouped attribute to be presented in the table output. + + Args: + entry (Dict[str, str]): CONFIG DB entry configuration. + attrs (List[Dict]): Attributes metadata that belongs to the same group. + + Returns: + str: fomatted group attributes. + """ + + data = [] + for attr in attrs: + if entry.get(attr["name"]): + data.append((attr["name"] + ":", format_attr_value(entry, attr))) + return tabulate.tabulate(data, tablefmt="plain") + + + + + + + + + + + + +@click.group(name="auto-techsupport", + cls=clicommon.AliasedGroup) +def AUTO_TECHSUPPORT(): + """ AUTO_TECHSUPPORT part of config_db.json """ + + pass + + + +@AUTO_TECHSUPPORT.command(name="global") +@clicommon.pass_db +def AUTO_TECHSUPPORT_global(db): + """ """ + + header = [ + +"AUTO INVOKE TS", +"COREDUMP CLEANUP", +"TECHSUPPORT CLEANUP", +"COOLOFF", +"MAX TECHSUPPORT SIZE", +"CORE USAGE", +"SINCE", + +] + + body = [] + + table = db.cfgdb.get_table("AUTO_TECHSUPPORT") + entry = table.get("global", {}) + row = [ + format_attr_value( + entry, + {'name': 'auto_invoke_ts', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'coredump_cleanup', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'techsupport_cleanup', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'cooloff', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'max_techsupport_size', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'core_usage', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'since', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), +] + + body.append(row) + click.echo(tabulate.tabulate(body, header)) + + + + + +def register(cli): + cli_node = AUTO_TECHSUPPORT + if cli_node.name in cli.commands: + raise Exception(f"{cli_node.name} already exists in CLI") + cli.add_command(AUTO_TECHSUPPORT) \ No newline at end of file From b6ae7bbe3b54d7f3e653a1b8f02b6732777eac90 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Tue, 17 Aug 2021 06:41:55 +0000 Subject: [PATCH 22/60] Updated Setup.py Signed-off-by: Vivek Reddy Karri --- setup.py | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.py b/setup.py index 46eee5f925..cf505b64d5 100644 --- a/setup.py +++ b/setup.py @@ -229,6 +229,5 @@ def run_tests(self): 'Topic :: Utilities', ], keywords='sonic SONiC utilities command line cli CLI', - cmdclass={"pytest": PyTest}, test_suite='setup.get_test_suite' ) From e8bfd2ecf27ca3124e1daabfadf2e6d96609618d Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Wed, 18 Aug 2021 19:27:34 +0000 Subject: [PATCH 23/60] Tests Updated to use default mock infra Signed-off-by: Vivek Reddy Karri --- ...mp_gen_handler => coredump_gen_handler.py} | 56 ++-- ...support_cleanup => techsupport_cleanup.py} | 14 +- setup.py | 5 +- tests/auto_techsupport_tests/__init__.py | 0 .../shared_state_mock.py | 98 ------- .../coredump_gen_handler_test.py | 258 +++++++++--------- .../techsupport_cleanup_test.py | 68 ++--- utilities_common/auto_techsupport_helper.py | 9 +- 8 files changed, 199 insertions(+), 309 deletions(-) rename scripts/{coredump_gen_handler => coredump_gen_handler.py} (84%) rename scripts/{techsupport_cleanup => techsupport_cleanup.py} (89%) delete mode 100644 tests/auto_techsupport_tests/__init__.py delete mode 100644 tests/auto_techsupport_tests/shared_state_mock.py rename tests/{auto_techsupport_tests => }/coredump_gen_handler_test.py (62%) rename tests/{auto_techsupport_tests => }/techsupport_cleanup_test.py (66%) diff --git a/scripts/coredump_gen_handler b/scripts/coredump_gen_handler.py similarity index 84% rename from scripts/coredump_gen_handler rename to scripts/coredump_gen_handler.py index d0d8d4de0f..2bbfa58815 100644 --- a/scripts/coredump_gen_handler +++ b/scripts/coredump_gen_handler.py @@ -1,12 +1,11 @@ -#!/usr/bin/env python3 - """ coredump_gen_handler script. This script is invoked by the coredump-compress script for auto techsupport invocation and cleanup core dumps. For more info, refer to the Event Driven TechSupport & CoreDump Mgmt HLD """ -import os, re +import os +import re import sys import glob import time @@ -17,9 +16,7 @@ from utilities_common.auto_techsupport_helper import * -def handle_coredump_cleanup(dump_name): - db = SonicV2Connector(use_unix_socket_path=True) - db.connect(CFG_DB) +def handle_coredump_cleanup(dump_name, db): if db.get(CFG_DB, AUTO_TS, CFG_CORE_CLEANUP) != "enabled": return @@ -41,9 +38,9 @@ class CriticalProcCoreDumpHandle(): """ Class to handle coredump creation event for critical processes """ - def __init__(self, core_name): + def __init__(self, core_name, db): self.core_name = core_name - self.db = None + self.db = db self.proc_mp = {} self.core_ts_map = {} self.curr_ts_list = [] @@ -51,11 +48,9 @@ def __init__(self, core_name): def handle_core_dump_creation_event(self): file_path = os.path.join(CORE_DUMP_DIR, self.core_name) if not verify_recent_file_creation(file_path): + syslog.syslog(syslog.LOG_INFO, "Spurious Invocation. {} is not created within last {} sec".format(file_path, TIME_BUF)) return - self.db = SonicV2Connector(use_unix_socket_path=True) - self.db.connect(CFG_DB) - self.db.connect(STATE_DB) if self.db.get(CFG_DB, AUTO_TS, CFG_INVOC_TS) != "enabled": return @@ -85,6 +80,7 @@ def handle_core_dump_creation_event(self): proc_cooloff = 0.0 cooloff_passed = self.verify_cooloff(global_cooloff, proc_cooloff, process_name) + print(cooloff_passed) if cooloff_passed: since_cfg = self.get_since_arg() new_file = self.invoke_ts_cmd(since_cfg) @@ -109,7 +105,7 @@ def invoke_ts_cmd(self, since_cfg): new_list = get_ts_dumps(True) diff = list(set(new_list).difference(set(self.curr_ts_list))) self.curr_ts_list = new_list - if len(diff) == 0: + if not diff: syslog.syslog(syslog.LOG_ERR, "{} was run, but no techsupport dump is found".format(cmd)) else: syslog.syslog(syslog.LOG_INFO, "{} is successful, {} is created".format(cmd, diff)) @@ -127,7 +123,7 @@ def verify_cooloff(self, global_cooloff, proc_cooloff, proc): ts_map = self.db.get_all(STATE_DB, TS_MAP) self.parse_ts_map(ts_map) - + print(self.core_ts_map) if proc_cooloff and proc in self.core_ts_map: last_creation_time = self.core_ts_map[proc][0][0] if time.time() - last_creation_time < proc_cooloff: @@ -138,6 +134,8 @@ def verify_cooloff(self, global_cooloff, proc_cooloff, proc): def parse_ts_map(self, ts_map): """Create proc_name, ts_dump & creation_time map""" + if not ts_map: + return for ts_dump, tup in ts_map.items(): core_dump, creation_time, proc_name = tup.split(";") if proc_name not in self.core_ts_map: @@ -153,17 +151,18 @@ def fetch_exit_event(self): start = time.time() while time.time() - start <= WAIT_BUFFER: data = self.db.get_all("STATE_DB", CRITICAL_PROC) - for field in data: - try: - pid_, comm_ = data[field].split(";") - if pid_ == pid and comm in comm_: - feature_name, supervisor_proc_name = field.split(";") - break - elif comm_ == NO_COMM and pid_ == pid: - feature_name, supervisor_proc_name = field.split(";") + if data: + for field in data: + try: + pid_, comm_ = data[field].split(";") + if pid_ == pid and comm in comm_: + feature_name, supervisor_proc_name = field.split(";") + break + elif comm_ == NO_COMM and pid_ == pid: + feature_name, supervisor_proc_name = field.split(";") + continue + except Exception as e: continue - except Exception as e: - continue if feature_name and supervisor_proc_name: break time.sleep(SLEEP_FOR) @@ -172,12 +171,15 @@ def fetch_exit_event(self): def main(): parser = argparse.ArgumentParser(description='Auto Techsupport Invocation and CoreDump Mgmt Script') - parser.add_argument('name', type=str, help='Core Dump Name', required=True) + parser.add_argument('name', type=str, help='Core Dump Name') args = parser.parse_args() syslog.openlog(logoption=syslog.LOG_PID) - cls = CoreDumpCreateHandle() - cls.handle_core_dump_creation_event(args.name) - handle_coredump_cleanup(dump_name) + db = SonicV2Connector(use_unix_socket_path=True) + db.connect(CFG_DB) + db.connect(STATE_DB) + cls = CriticalProcCoreDumpHandle(args.name, db) + cls.handle_core_dump_creation_event() + handle_coredump_cleanup(args.name, db) if __name__ == "__main__": main() \ No newline at end of file diff --git a/scripts/techsupport_cleanup b/scripts/techsupport_cleanup.py similarity index 89% rename from scripts/techsupport_cleanup rename to scripts/techsupport_cleanup.py index 6ea0f3801c..9fa30a698a 100644 --- a/scripts/techsupport_cleanup +++ b/scripts/techsupport_cleanup.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python3 - """ techsupport_cleanup script. This script is invoked by the generate_dump script for techsupport cleanup @@ -25,14 +23,11 @@ def clean_state_db_entries(removed_files, db): db_conn.hdel(TS_MAP, os.path.basename(file)) -def handle_techsupport_creation_event(dump_name): +def handle_techsupport_creation_event(dump_name, db): file_path = os.path.join(TS_DIR, dump_name) if not verify_recent_file_creation(file_path): return curr_list = get_ts_dumps() - db = SonicV2Connector(host="127.0.0.1") - db.connect(CFG_DB) - db.connect(STATE_DB) if db.get(CFG_DB, AUTO_TS, CFG_TS_CLEANUP) != "enabled": return @@ -54,10 +49,13 @@ def handle_techsupport_creation_event(dump_name): def main(): parser = argparse.ArgumentParser(description='Auto Techsupport Invocation and CoreDump Mgmt Script') - parser.add_argument('name', type=str, help='TechSupport Dump Name', required=True) + parser.add_argument('name', type=str, help='TechSupport Dump Name') args = parser.parse_args() syslog.openlog(logoption=syslog.LOG_PID) - handle_techsupport_creation_event(args.name) + db = SonicV2Connector(use_unix_socket_path=True) + db.connect(CFG_DB) + db.connect(STATE_DB) + handle_techsupport_creation_event(args.name, db) if __name__ == "__main__": diff --git a/setup.py b/setup.py index cf505b64d5..9b212f5ea6 100644 --- a/setup.py +++ b/setup.py @@ -143,7 +143,9 @@ def run_tests(self): 'scripts/watermarkcfg', 'scripts/sonic-kdump-config', 'scripts/centralize_database', - 'scripts/null_route_helper' + 'scripts/null_route_helper', + 'scripts/coredump_gen_handler.py', + 'scripts/techsupport_cleanup.py' ], entry_points={ 'console_scripts': [ @@ -229,5 +231,6 @@ def run_tests(self): 'Topic :: Utilities', ], keywords='sonic SONiC utilities command line cli CLI', + cmdclass={"pytest": PyTest}, test_suite='setup.get_test_suite' ) diff --git a/tests/auto_techsupport_tests/__init__.py b/tests/auto_techsupport_tests/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/auto_techsupport_tests/shared_state_mock.py b/tests/auto_techsupport_tests/shared_state_mock.py deleted file mode 100644 index ff79a6a103..0000000000 --- a/tests/auto_techsupport_tests/shared_state_mock.py +++ /dev/null @@ -1,98 +0,0 @@ -import re - - -class RedisSingleton: - """ - Introduced to modify/check Redis DB's data outside of the scripts - Usage: Clear and Set the state of the mock before every test case - """ - __instance = None - - @staticmethod - def getInstance(): - """ Static access method.""" - if RedisSingleton.__instance is None: - RedisSingleton() - return RedisSingleton.__instance - - @staticmethod - def clearState(): - """ Clear the Redis State """ - if RedisSingleton.__instance is not None: - RedisSingleton.__instance.data.clear() - - def __init__(self): - if RedisSingleton.__instance is not None: - raise Exception("This class is a singleton!") - else: - self.data = dict() - RedisSingleton.__instance = self - - -class MockConn(object): - """ - SonicV2Connector Mock - """ - def __init__(self, **kwargs): - self.redis = RedisSingleton.getInstance() - - def connect(self, db_name): - if db_name not in self.redis.data: - self.redis.data[db_name] = {} - - def get(self, db_name, key, field): - return self.redis.data[db_name].get(key, {}).get(field, "") - - def keys(self, db_name, pattern): - pattern = re.escape(pattern) - pattern = pattern.replace("\\*", ".*") - filtered_keys = [] - all_keys = self.redis.data[db_name].keys() - for key in all_keys: - if re.match(pattern, key): - filtered_keys.append(key) - return filtered_keys - - def get_all(self, db_name, key): - return self.redis.data[db_name].get(key, {}) - - def set(self, db_name, key, field, value, blocking=True): - if key not in self.redis.data[db_name]: - self.redis.data[db_name][key] = {} - self.redis.data[db_name][key][field] = value - - def hmset(self, db_name, key, hash): - self.redis.data[db_name][key] = hash - - def hexists(self, db_name, key, field): - if key in self.redis.data[db_name]: - return True - else: - return False - - def exists(self, db_name, key): - if key in self.redis.data[db_name]: - return True - else: - return False - - def get_redis_client(self, db_name): - return MockClient(db_name) - - -class MockClient(object): - def __init__(self, db_name): - self.redis = RedisSingleton.getInstance() - self.db_name = db_name - - def hdel(self, key, field): - try: - del self.redis.data[self.db_name][key][field] - except: - pass - - def hset(self, key, field, value): - try: - self.redis.data[self.db_name][key][field] = value - except: - pass \ No newline at end of file diff --git a/tests/auto_techsupport_tests/coredump_gen_handler_test.py b/tests/coredump_gen_handler_test.py similarity index 62% rename from tests/auto_techsupport_tests/coredump_gen_handler_test.py rename to tests/coredump_gen_handler_test.py index ffed2c0e8d..af777ae919 100644 --- a/tests/auto_techsupport_tests/coredump_gen_handler_test.py +++ b/tests/coredump_gen_handler_test.py @@ -5,58 +5,38 @@ from pyfakefs.fake_filesystem_unittest import Patcher from swsscommon import swsscommon from utilities_common.general import load_module_from_source -from .shared_state_mock import RedisSingleton, MockConn +from utilities_common.db import Db +from .mock_tables import dbconnector -curr_test_path = os.path.abspath(os.path.join(os.path.abspath(__file__), "../")) -test_dir_path = os.path.dirname(curr_test_path) -modules_path = os.path.dirname(test_dir_path) -scripts_path = os.path.join(modules_path, 'scripts') -sys.path.insert(0, modules_path) +sys.path.append("scripts") +import coredump_gen_handler as cdump_mod -# Load the file under test -script_path = os.path.join(scripts_path, 'coredump_gen_handler') -cdump_mod = load_module_from_source('coredump_gen_handler', script_path) -# Mock the SonicV2Connector -cdump_mod.SonicV2Connector = MockConn +def set_auto_ts_cfg(redis_mock, auto_invoke_ts="disabled", + core_cleanup="disabled", + cooloff="0", + core_usage="0", + since_cfg="None"): + redis_mock.set(cdump_mod.CFG_DB, cdump_mod.AUTO_TS, cdump_mod.CFG_INVOC_TS, auto_invoke_ts) + redis_mock.set(cdump_mod.CFG_DB, cdump_mod.AUTO_TS, cdump_mod.COOLOFF, cooloff) + redis_mock.set(cdump_mod.CFG_DB, cdump_mod.AUTO_TS, cdump_mod.CFG_CORE_USAGE, core_usage) + redis_mock.set(cdump_mod.CFG_DB, cdump_mod.AUTO_TS, cdump_mod.CFG_CORE_CLEANUP, core_cleanup) + redis_mock.set(cdump_mod.CFG_DB, cdump_mod.AUTO_TS, cdump_mod.CFG_SINCE, since_cfg) -# Mock Handle to the data inside the Redis -RedisHandle = RedisSingleton.getInstance() +def set_feature_table_cfg(redis_mock, ts="disabled", cooloff="0", container_name="swss"): + redis_mock.set(cdump_mod.CFG_DB, cdump_mod.FEATURE.format(container_name), cdump_mod.TS, ts) + redis_mock.set(cdump_mod.CFG_DB, cdump_mod.FEATURE.format(container_name), cdump_mod.COOLOFF, cooloff) -def set_auto_ts_cfg(**kwargs): - invoke_ts = kwargs[cdump_mod.CFG_INVOC_TS] if cdump_mod.CFG_INVOC_TS in kwargs else "disabled" - core_cleanup = kwargs[cdump_mod.CFG_CORE_CLEANUP] if cdump_mod.CFG_CORE_CLEANUP in kwargs else "disabled" - cooloff = kwargs[cdump_mod.COOLOFF] if cdump_mod.COOLOFF in kwargs else "0" - core_usage = kwargs[cdump_mod.CFG_CORE_USAGE] if cdump_mod.CFG_CORE_USAGE in kwargs else "0" - since_cfg = kwargs[cdump_mod.CFG_SINCE] if cdump_mod.CFG_SINCE in kwargs else "None" - if cdump_mod.CFG_DB not in RedisHandle.data: - RedisHandle.data[cdump_mod.CFG_DB] = {} - RedisHandle.data[cdump_mod.CFG_DB][cdump_mod.AUTO_TS] = {cdump_mod.CFG_INVOC_TS: invoke_ts, - cdump_mod.COOLOFF: cooloff, - cdump_mod.CFG_CORE_USAGE: core_usage, - cdump_mod.CFG_CORE_CLEANUP: core_cleanup, - cdump_mod.CFG_SINCE: since_cfg} - -def set_feature_table_cfg(ts="disabled", cooloff="0", container_name="swss"): - if cdump_mod.CFG_DB not in RedisHandle.data: - RedisHandle.data[cdump_mod.CFG_DB] = {} - RedisHandle.data[cdump_mod.CFG_DB][cdump_mod.FEATURE.format(container_name)] = {cdump_mod.TS: ts, - cdump_mod.COOLOFF: cooloff} - - -def populate_state_db(use_default=True, data=None): - if use_default: - data = {cdump_mod.TS_MAP: {"sonic_dump_random1.tar.gz": "portsyncd;1575985;portsyncd", - "sonic_dump_random2.tar.gz": "syncd;1575988;syncd"}, - cdump_mod.CRITICAL_PROC: {"swss;orchagent": "123;orchagent"}} - if cdump_mod.STATE_DB not in RedisHandle.data: - RedisHandle.data[cdump_mod.STATE_DB] = {} - RedisHandle.data[cdump_mod.STATE_DB][cdump_mod.TS_MAP] = {} - RedisHandle.data[cdump_mod.STATE_DB][cdump_mod.CRITICAL_PROC] = {} - for key in data: - RedisHandle.data[cdump_mod.STATE_DB][key] = data[key] +def populate_state_db(redis_mock, + ts_map={"sonic_dump_random1.tar.gz": "orchagent;1575985;orchagent", + "sonic_dump_random2.tar.gz": "syncd;1575988;syncd"}, + crit_proc={"swss;orchagent": "123;orchagent"}): + for field, value in ts_map.items(): + redis_mock.set(cdump_mod.STATE_DB, cdump_mod.TS_MAP, field, value) + for field, value in crit_proc.items(): + redis_mock.set(cdump_mod.STATE_DB, cdump_mod.CRITICAL_PROC, field, value) class TestCoreDumpCreationEvent(unittest.TestCase): @@ -70,10 +50,11 @@ def test_invoc_ts_state_db_update(self): Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled and no cooloff is provided Check if techsupport is invoked, file is created and State DB is updated """ - RedisSingleton.clearState() - set_auto_ts_cfg(auto_invoke_ts="enabled") - set_feature_table_cfg(ts="enabled") - populate_state_db(True) + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, auto_invoke_ts="enabled") + set_feature_table_cfg(redis_mock, ts="enabled") + populate_state_db(redis_mock) with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) @@ -86,26 +67,28 @@ def mock_cmd(cmd): patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") - cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz", redis_mock) cls.handle_core_dump_creation_event() - cdump_mod.handle_coredump_cleanup("orchagent.12345.123.core.gz") + cdump_mod.handle_coredump_cleanup("orchagent.12345.123.core.gz", redis_mock) assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random3.tar.gz" in os.listdir(cdump_mod.TS_DIR) - assert "sonic_dump_random1.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - assert "sonic_dump_random3.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - assert "orchagent" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP]["sonic_dump_random3.tar.gz"] + final_state = redis_mock.get_all(cdump_mod.STATE_DB, cdump_mod.TS_MAP) + assert "sonic_dump_random1.tar.gz" in final_state + assert "sonic_dump_random2.tar.gz" in final_state + assert "sonic_dump_random3.tar.gz" in final_state + assert "orchagent" in final_state["sonic_dump_random3.tar.gz"] def test_global_cooloff(self): """ Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is enabled Global cooloff is not passed yet. Check if techsupport isn't invoked. """ - RedisSingleton.clearState() - set_auto_ts_cfg(auto_invoke_ts="enabled", cooloff="1") - set_feature_table_cfg(ts="enabled") - populate_state_db(True) + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, auto_invoke_ts="enabled", cooloff="1") + set_feature_table_cfg(redis_mock, ts="enabled") + populate_state_db(redis_mock) with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) @@ -118,25 +101,28 @@ def mock_cmd(cmd): patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") - cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz", redis_mock) cls.handle_core_dump_creation_event() - cdump_mod.handle_coredump_cleanup("orchagent.12345.123.core.gz") + cdump_mod.handle_coredump_cleanup("orchagent.12345.123.core.gz", redis_mock) assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random3.tar.gz" not in os.listdir(cdump_mod.TS_DIR) - assert "sonic_dump_random1.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - assert "sonic_dump_random3.tar.gz" not in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + final_state = redis_mock.get_all(cdump_mod.STATE_DB, cdump_mod.TS_MAP) + assert "sonic_dump_random1.tar.gz" in final_state + assert "sonic_dump_random2.tar.gz" in final_state + assert "sonic_dump_random3.tar.gz" not in final_state def test_per_proc_cooloff(self): """ Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled. Global Cooloff is passed But Per Proc cooloff is not passed yet. Check if techsupport isn't invoked """ - RedisSingleton.clearState() - set_auto_ts_cfg(state="enabled", cooloff="0.25") - set_feature_table_cfg(ts="enabled", cooloff="10") - populate_state_db(True) + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, auto_invoke_ts="enabled", cooloff="0.25") + set_feature_table_cfg(redis_mock, ts="enabled", cooloff="10") + populate_state_db(redis_mock, ts_map={"sonic_dump_random1.tar.gz": + "orchagent;{};orchagent".format(int(time.time()))}) with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) @@ -146,28 +132,28 @@ def mock_cmd(cmd): return 1, "", "Command Not Found" return 0, "", "" cdump_mod.subprocess_exec = mock_cmd - patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") - patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") - cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz", redis_mock) time.sleep(0.25) # wait for global cooloff to pass cls.handle_core_dump_creation_event() assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) - assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random3.tar.gz" not in os.listdir(cdump_mod.TS_DIR) - assert "sonic_dump_random1.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - assert "sonic_dump_random3.tar.gz" not in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + final_state = redis_mock.get_all(cdump_mod.STATE_DB, cdump_mod.TS_MAP) + assert "sonic_dump_random1.tar.gz" in final_state + assert "sonic_dump_random3.tar.gz" not in final_state def test_invoc_ts_after_cooloff(self): """ Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled. All the cooloff's are passed. Check if techsupport is invoked """ - RedisSingleton.clearState() - set_auto_ts_cfg(auto_invoke_ts="enabled", cooloff="0.1") - set_feature_table_cfg(ts="enabled", cooloff="0.25") - populate_state_db(True) + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, auto_invoke_ts="enabled", cooloff="0.1") + set_feature_table_cfg(redis_mock, ts="enabled", cooloff="0.25") + populate_state_db(redis_mock, ts_map={"sonic_dump_random1.tar.gz": + "orchagent;{};orchagent".format(int(time.time()))}) with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) @@ -180,26 +166,26 @@ def mock_cmd(cmd): patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") - cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz", redis_mock) time.sleep(0.25) # wait for all the cooloff's to pass cls.handle_core_dump_creation_event() assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) - assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random3.tar.gz" in os.listdir(cdump_mod.TS_DIR) - assert "sonic_dump_random1.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - assert "sonic_dump_random3.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - assert "orchagent" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP]["sonic_dump_random3.tar.gz"] + final_state = redis_mock.get_all(cdump_mod.STATE_DB, cdump_mod.TS_MAP) + assert "sonic_dump_random1.tar.gz" in final_state + assert "sonic_dump_random3.tar.gz" in final_state + assert "orchagent" in final_state["sonic_dump_random3.tar.gz"] def test_core_dump_with_no_exit_event(self): """ Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled. Core Dump is found but no relevant exit_event entry is found in STATE_DB. """ - RedisSingleton.clearState() - set_auto_ts_cfg(auto_invoke_ts="enabled") - set_feature_table_cfg(ts="enabled") - populate_state_db(False, {}) + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, auto_invoke_ts="enabled") + set_feature_table_cfg(redis_mock, ts="enabled", container_name="snmp") + populate_state_db(redis_mock, {}, {}) with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) @@ -211,20 +197,22 @@ def mock_cmd(cmd): cdump_mod.subprocess_exec = mock_cmd patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/core/snmpd.12345.123.core.gz") - cls = cdump_mod.CriticalProcCoreDumpHandle("snmpd.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("snmpd.12345.123.core.gz", redis_mock) cls.handle_core_dump_creation_event() assert "sonic_dump_random3.tar.gz" not in os.listdir(cdump_mod.TS_DIR) - assert "sonic_dump_random3.tar.gz" not in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] + final_state = redis_mock.get_all(cdump_mod.STATE_DB, cdump_mod.TS_MAP) + assert not final_state def test_core_dump_with_exit_event_unknown_cmd(self): """ Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled. Core Dump is found but the comm in exit_event entry is """ - RedisSingleton.clearState() - set_auto_ts_cfg(auto_invoke_ts="enabled") - set_feature_table_cfg(ts="enabled", container_name="snmp") - populate_state_db(False, {cdump_mod.CRITICAL_PROC: {"snmp;snmp-subagent": "123;"}}) + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, auto_invoke_ts="enabled") + set_feature_table_cfg(redis_mock, ts="enabled", container_name="snmp") + populate_state_db(redis_mock, {}, {"snmp;snmp-subagent": "123;"}) with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) @@ -236,10 +224,11 @@ def mock_cmd(cmd): cdump_mod.subprocess_exec = mock_cmd patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/core/python3.12345.123.core.gz") - cls = cdump_mod.CriticalProcCoreDumpHandle("python3.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("python3.12345.123.core.gz", redis_mock) cls.handle_core_dump_creation_event() assert "sonic_dump_random3.tar.gz" in os.listdir(cdump_mod.TS_DIR) - assert "snmp-subagent" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP]["sonic_dump_random3.tar.gz"] + final_state = redis_mock.get_all(cdump_mod.STATE_DB, cdump_mod.TS_MAP) + assert "snmp-subagent" in final_state["sonic_dump_random3.tar.gz"] def test_feature_table_not_set(self): """ @@ -247,10 +236,11 @@ def test_feature_table_not_set(self): The auto-techsupport in Feature table is not enabled for the core-dump generated Check if techsupport is not invoked """ - RedisSingleton.clearState() - set_auto_ts_cfg(auto_invoke_ts="enabled") - set_feature_table_cfg(ts="disabled", container_name="snmp") - populate_state_db(False, {"snmp:snmp-subagent": "123;python3"}) + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, auto_invoke_ts="enabled") + set_feature_table_cfg(redis_mock, ts="disabled", container_name="snmp") + populate_state_db(redis_mock, {}, {"snmp:snmp-subagent": "123;python3"}) with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) @@ -262,9 +252,9 @@ def mock_cmd(cmd): cdump_mod.subprocess_exec = mock_cmd patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/core/python3.12345.123.core.gz") - cls = cdump_mod.CriticalProcCoreDumpHandle("python3.12345.123.core.gz") + cls = cdump_mod.CriticalProcCoreDumpHandle("python3.12345.123.core.gz", redis_mock) cls.handle_core_dump_creation_event() - cdump_mod.handle_coredump_cleanup("python3.12345.123.core.gz") + cdump_mod.handle_coredump_cleanup("python3.12345.123.core.gz", redis_mock) assert "sonic_dump_random3.tar.gz" not in os.listdir(cdump_mod.TS_DIR) def test_since_argument(self): @@ -272,10 +262,11 @@ def test_since_argument(self): Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled. Check if techsupport is invoked and since argument in properly applied """ - RedisSingleton.clearState() - set_auto_ts_cfg(auto_invoke_ts="enabled", cooloff="0.1", since="4 days ago") - set_feature_table_cfg(ts="enabled", cooloff="0.2") - populate_state_db(True) + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, auto_invoke_ts="enabled", since_cfg="4 days ago") + set_feature_table_cfg(redis_mock, ts="enabled") + populate_state_db(redis_mock) with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) @@ -287,30 +278,31 @@ def mock_cmd(cmd): else: return 1, "", "Invalid Command" cdump_mod.subprocess_exec = mock_cmd - patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") + patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") - cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz") - time.sleep(0.2) # wait for cooloff to pass + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz", redis_mock) cls.handle_core_dump_creation_event() - cdump_mod.handle_coredump_cleanup("orchagent.12345.123.core.gz") + cdump_mod.handle_coredump_cleanup("orchagent.12345.123.core.gz", redis_mock) assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random3.tar.gz" in os.listdir(cdump_mod.TS_DIR) - assert "sonic_dump_random1.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - assert "sonic_dump_random3.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - assert "orchagent" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP]["sonic_dump_random3.tar.gz"] + final_state = redis_mock.get_all(cdump_mod.STATE_DB, cdump_mod.TS_MAP) + assert "sonic_dump_random1.tar.gz" in final_state + assert "sonic_dump_random2.tar.gz" in final_state + assert "sonic_dump_random3.tar.gz" in final_state + assert "orchagent" in final_state["sonic_dump_random3.tar.gz"] def test_invalid_since_argument(self): """ Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled. Check if techsupport is invoked and an invalid since argument in identified """ - RedisSingleton.clearState() - set_auto_ts_cfg(auto_invoke_ts="enabled", cooloff="0.1", since="whatever") - set_feature_table_cfg(ts="enabled", cooloff="0.2") - populate_state_db(True) + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, auto_invoke_ts="enabled", since_cfg="whatever") + set_feature_table_cfg(redis_mock, ts="enabled") + populate_state_db(redis_mock) with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) @@ -325,31 +317,32 @@ def mock_cmd(cmd): patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") - cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz") - time.sleep(0.2) # wait for cooloff to pass + cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz", redis_mock) cls.handle_core_dump_creation_event() - cdump_mod.handle_coredump_cleanup("orchagent.12345.123.core.gz") + cdump_mod.handle_coredump_cleanup("orchagent.12345.123.core.gz", redis_mock) assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random2.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random3.tar.gz" in os.listdir(cdump_mod.TS_DIR) - assert "sonic_dump_random1.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - assert "sonic_dump_random3.tar.gz" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP] - assert "orchagent" in RedisHandle.data["STATE_DB"][cdump_mod.TS_MAP]["sonic_dump_random3.tar.gz"] + final_state = redis_mock.get_all(cdump_mod.STATE_DB, cdump_mod.TS_MAP) + assert "sonic_dump_random1.tar.gz" in final_state + assert "sonic_dump_random2.tar.gz" in final_state + assert "sonic_dump_random3.tar.gz" in final_state + assert "orchagent" in final_state["sonic_dump_random3.tar.gz"] def test_core_dump_cleanup(self): """ Scenario: CFG_CORE_CLEANUP is enabled. core-dump limit is crossed Verify Whether is cleanup is performed """ - RedisSingleton.clearState() - set_auto_ts_cfg(coredump_cleanup="enabled", core_usage="6.0") + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, core_cleanup="enabled", core_usage="6.0") with Patcher() as patcher: patcher.fs.set_disk_usage(1000, path="/var/core/") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz", st_size=25) patcher.fs.create_file("/var/core/lldpmgrd.12345.22.core.gz", st_size=25) patcher.fs.create_file("/var/core/python3.12345.21.core.gz", st_size=25) - cdump_mod.handle_coredump_cleanup("python3.12345.21.core.gz") + cdump_mod.handle_coredump_cleanup("python3.12345.21.core.gz", redis_mock) current_fs = os.listdir(cdump_mod.CORE_DUMP_DIR) assert len(current_fs) == 2 assert "orchagent.12345.123.core.gz" not in current_fs @@ -361,8 +354,9 @@ def test_core_usage_limit_not_crossed(self): Scenario: CFG_CORE_CLEANUP is enabled. core-dump limit is crossed Verify Whether is cleanup is performed """ - RedisSingleton.clearState() - set_auto_ts_cfg(coredump_cleanup="enabled", core_usage="5.0") + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, core_cleanup="enabled", core_usage="5.0") with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) @@ -373,7 +367,7 @@ def mock_cmd(cmd): patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz", st_size=25) patcher.fs.create_file("/var/core/lldpmgrd.12345.22.core.gz", st_size=25) patcher.fs.create_file("/var/core/python3.12345.21.core.gz", st_size=25) - cdump_mod.handle_coredump_cleanup("python3.12345.21.core.gz") + cdump_mod.handle_coredump_cleanup("python3.12345.21.core.gz", redis_mock) current_fs = os.listdir(cdump_mod.CORE_DUMP_DIR) assert len(current_fs) == 3 assert "orchagent.12345.123.core.gz" in current_fs diff --git a/tests/auto_techsupport_tests/techsupport_cleanup_test.py b/tests/techsupport_cleanup_test.py similarity index 66% rename from tests/auto_techsupport_tests/techsupport_cleanup_test.py rename to tests/techsupport_cleanup_test.py index 31dfc08b02..1fc82336c8 100644 --- a/tests/auto_techsupport_tests/techsupport_cleanup_test.py +++ b/tests/techsupport_cleanup_test.py @@ -1,34 +1,20 @@ import os import sys -import pyfakefs +import pyfakefs import unittest from pyfakefs.fake_filesystem_unittest import Patcher from swsscommon import swsscommon from utilities_common.general import load_module_from_source -from .shared_state_mock import RedisSingleton, MockConn +from utilities_common.db import Db +from .mock_tables import dbconnector -curr_test_path = os.path.abspath(os.path.join(os.path.abspath(__file__), "../")) -test_dir_path = os.path.dirname(curr_test_path) -modules_path = os.path.dirname(test_dir_path) -scripts_path = os.path.join(modules_path, 'scripts') -sys.path.insert(0, modules_path) +sys.path.append("scripts") +import techsupport_cleanup as ts_mod -# Load the file under test -script_path = os.path.join(scripts_path, 'techsupport_cleanup') -ts_mod = load_module_from_source('techsupport_cleanup', script_path) -# Mock the SonicV2Connector -ts_mod.SonicV2Connector = MockConn - -# Mock Handle to the data inside the Redis -RedisHandle = RedisSingleton.getInstance() - - -def set_auto_ts_cfg(**kwargs): - ts_cleanup = kwargs[ts_mod.CFG_TS_CLEANUP] if ts_mod.CFG_TS_CLEANUP in kwargs else "disabled" - max_ts = kwargs[ts_mod.CFG_MAX_TS] if ts_mod.CFG_MAX_TS in kwargs else "0" - RedisHandle.data[ts_mod.CFG_DB] = {ts_mod.AUTO_TS : {ts_mod.CFG_TS_CLEANUP : ts_cleanup, - ts_mod.CFG_MAX_TS : max_ts}} +def set_auto_ts_cfg(redis_mock, ts_cleanup="disabled", max_ts="0"): + redis_mock.set(ts_mod.CFG_DB, ts_mod.AUTO_TS, ts_mod.CFG_TS_CLEANUP, ts_cleanup) + redis_mock.set(ts_mod.CFG_DB, ts_mod.AUTO_TS, ts_mod.CFG_MAX_TS, max_ts) class TestTechsupportCreationEvent(unittest.TestCase): @@ -38,14 +24,15 @@ def test_no_cleanup_state_disabled(self): Scenario: TS_CLEANUP is disabled. Check no cleanup is performed, even though the techsupport limit is already crossed """ - RedisSingleton.clearState() - set_auto_ts_cfg(max_techsupport_size="5") + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, max_ts="5") with Patcher() as patcher: patcher.fs.set_disk_usage(1000, path="/var/dump/") patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz", st_size=30) patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz", st_size=30) patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=30) - ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz") + ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz", redis_mock) current_fs = os.listdir(ts_mod.TS_DIR) print(current_fs) assert len(current_fs) == 3 @@ -58,14 +45,15 @@ def test_no_cleanup_state_enabled(self): Scenario: TS_CLEANUP is enabled. Verify no cleanup is performed, as the techsupport limit haven't crossed yet """ - RedisSingleton.clearState() - set_auto_ts_cfg(techsupport_cleanup="enabled", max_techsupport_size="10") + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, ts_cleanup="enabled", max_ts="10") with Patcher() as patcher: patcher.fs.set_disk_usage(1000, path="/var/dump/") patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz", st_size=30) patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz", st_size=30) patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=30) - ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz") + ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz", redis_mock) current_fs = os.listdir(ts_mod.TS_DIR) print(current_fs) assert len(current_fs) == 3 @@ -78,14 +66,15 @@ def test_dump_cleanup(self): Scenario: TS_CLEANUP is enabled. techsupport size limit is crosed Verify Whether is cleanup is performed or not """ - RedisSingleton.clearState() - set_auto_ts_cfg(techsupport_cleanup="enabled", max_techsupport_size="5") + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, ts_cleanup="enabled", max_ts="5") with Patcher() as patcher: patcher.fs.set_disk_usage(1000, path="/var/dump/") patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz", st_size=25) patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz", st_size=25) patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=25) - ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz") + ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz", redis_mock) current_fs = os.listdir(ts_mod.TS_DIR) assert len(current_fs) == 2 assert "sonic_dump_random1.tar.gz" not in current_fs @@ -97,22 +86,23 @@ def test_state_db_update(self): Scenario: TS_CLEANUP is enabled. techsupport size limit is crosed Verify Whether is cleanup is performed and the state_db is updated """ - RedisSingleton.clearState() - set_auto_ts_cfg(techsupport_cleanup="enabled", max_techsupport_size="5") - RedisHandle.data["STATE_DB"] = {} - RedisHandle.data["STATE_DB"][ts_mod.TS_MAP] = {"sonic_dump_random1.tar.gz": "orchagent;1575985", - "sonic_dump_random2.tar.gz": "syncd;1575988"} + db_wrap = Db() + redis_mock = db_wrap.db + set_auto_ts_cfg(redis_mock, ts_cleanup="enabled", max_ts="5") + redis_mock.set(ts_mod.STATE_DB, ts_mod.TS_MAP, "sonic_dump_random1.tar.gz", "orchagent;1575985;orchagent") + redis_mock.set(ts_mod.STATE_DB, ts_mod.TS_MAP, "sonic_dump_random2.tar.gz", "syncd;1575988;syncd") with Patcher() as patcher: patcher.fs.set_disk_usage(1000, path="/var/dump/") patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz", st_size=25) patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz", st_size=25) patcher.fs.create_file("/var/dump/sonic_dump_random3.tar.gz", st_size=25) - ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz") + ts_mod.handle_techsupport_creation_event("/var/dump/sonic_dump_random3.tar.gz", redis_mock) current_fs = os.listdir(ts_mod.TS_DIR) print(current_fs) assert len(current_fs) == 2 assert "sonic_dump_random1.tar.gz" not in current_fs assert "sonic_dump_random2.tar.gz" in current_fs assert "sonic_dump_random3.tar.gz" in current_fs - assert "sonic_dump_random2.tar.gz" in RedisHandle.data["STATE_DB"][ts_mod.TS_MAP] - assert "sonic_dump_random1.tar.gz" not in RedisHandle.data["STATE_DB"][ts_mod.TS_MAP] \ No newline at end of file + final_state = redis_mock.get_all(ts_mod.STATE_DB, ts_mod.TS_MAP) + assert "sonic_dump_random2.tar.gz" in final_state + assert "sonic_dump_random1.tar.gz" not in final_state \ No newline at end of file diff --git a/utilities_common/auto_techsupport_helper.py b/utilities_common/auto_techsupport_helper.py index efa3cdbe4b..456be71cc4 100644 --- a/utilities_common/auto_techsupport_helper.py +++ b/utilities_common/auto_techsupport_helper.py @@ -53,15 +53,16 @@ SINCE_DEFAULT = "2 days ago" NO_COMM = "" WAIT_BUFFER = 40 -SLEEP_FOR = 4 +SLEEP_FOR = 5 ##### Helper methods -def subprocess_exec(cmd): +def subprocess_exec(cmd, env=None): output = subprocess.run( cmd, capture_output=True, - text=True + text=True, + env=env ) return output.returncode, output.stdout, output.stderr @@ -90,7 +91,7 @@ def verify_recent_file_creation(file_path, in_last_sec=TIME_BUF): def get_stats(ptrn, collect_stats=True): """ - Returns the size of the files (matched by the ptrn) occupied. + Returns the size of the files (matched by the ptrn) occupied. Also returns the list of files Sorted by the Descending order of creation time & size """ files = glob.glob(ptrn) From 68f7e5cd36f5602dcf60f5358ac85ff4ce819dff Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Wed, 18 Aug 2021 19:44:39 +0000 Subject: [PATCH 24/60] scripts updated Signed-off-by: Vivek Reddy Karri --- scripts/coredump-compress | 7 ++++++- scripts/generate_dump | 13 +++---------- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/scripts/coredump-compress b/scripts/coredump-compress index 783bc9e02d..4bafc66200 100755 --- a/scripts/coredump-compress +++ b/scripts/coredump-compress @@ -16,4 +16,9 @@ fi /bin/gzip -1 - > /var/core/${PREFIX}core.gz -nohup /usr/local/bin/coredump_gen_handler ${PREFIX}core.gz & \ No newline at end of file +# coredump_gen_handler invokes techsupport if all the other required conditions are met +# explicitly passing in the env vars because coredump-compress's namespace doesn't have these set by default +setsid $(echo > /tmp/coredump_gen_handler.log; + export PYTHONPATH=$PYTHONPATH:/usr/local/lib/python3.7/dist-packages/; + export PATH=$PATH:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin; + python3 /usr/local/bin/coredump_gen_handler.py ${PREFIX}core.gz &>> /tmp/coredump_gen_handler.log) & \ No newline at end of file diff --git a/scripts/generate_dump b/scripts/generate_dump index c94d810e6e..302571068f 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -39,14 +39,6 @@ USER=${USER:-root} TIMEOUT_MIN="5" SKIP_BCMCMD=0 -handle_signal() -{ - echo "Generate Dump received interrupt" >&2 - $RM $V -rf $TARDIR - exit 1 -} -trap 'handle_signal' SIGINT - save_bcmcmd() { local start_t=$(date +%s%3N) local end_t=0 @@ -1036,7 +1028,6 @@ handle_error() { # None ############################################################################### main() { - trap 'handle_error $? $LINENO' ERR local start_t=0 local end_t=0 if [ `whoami` != root ] && ! $NOOP; @@ -1218,7 +1209,9 @@ main() { fi fi - nohup /usr/local/bin/techsupport_cleanup ${TARFILE} & + # Invoke the TechSupport Cleanup Hook + setsid $(echo > /tmp/techsupport_cleanup.log; + python3 /usr/local/bin/techsupport_cleanup.py ${TARFILE} &>> /tmp/techsupport_cleanup.log) & echo ${TARFILE} } From c446e5f7f37ca005be7f524d8465f125624067d9 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Thu, 19 Aug 2021 00:28:55 +0000 Subject: [PATCH 25/60] CLI added Signed-off-by: Vivek Reddy Karri --- config/feature.py | 12 ++++---- show/feature.py | 23 +++++++++++++++ show/plugins/auto_techsupport.py | 36 +++++------------------ tests/feature_test.py | 50 ++++++++++++++++++++++++++++++++ tests/mock_tables/config_db.json | 22 ++++++++++++++ 5 files changed, 109 insertions(+), 34 deletions(-) diff --git a/config/feature.py b/config/feature.py index c41ae09e3e..4dc3d482b8 100644 --- a/config/feature.py +++ b/config/feature.py @@ -102,13 +102,13 @@ def feature_autorestart(db, name, autorestart): cfgdb.mod_entry('FEATURE', name, {'auto_restart': autorestart}) # -# 'auto_techsupport' command ('config feature auto_techsupport ...') +# 'auto_techsupport' command ('config feature autotechsupport ...') # -@feature.command(name='auto_techsupport', short_help="Enable/disable auto_techsupport capability for the processes running inside of this feature") +@feature.command(name='autotechsupport', short_help="Enable/disable auto_techsupport capability for the processes running inside of this feature") @click.argument('name', metavar='', required=True) -@click.argument('auto_techsupport', metavar='', required=True, type=click.Choice(["enabled", "disabled"])) +@click.argument('autotechsupport', metavar='', required=True, type=click.Choice(["enabled", "disabled"])) @pass_db -def feature_auto_techsupport(db, name, auto_techsupport): +def feature_autotechsupport(db, name, autotechsupport): """Enable/disable auto_techsupport capability for the processes running inside of this feature""" entry_data_set = set() @@ -124,14 +124,14 @@ def feature_auto_techsupport(db, name, auto_techsupport): sys.exit(1) for ns, cfgdb in db.cfgdb_clients.items(): - cfgdb.mod_entry('FEATURE', name, {'auto_techsupport': auto_techsupport}) + cfgdb.mod_entry('FEATURE', name, {'auto_techsupport': autotechsupport}) # # 'cooloff' command ('config feature cooloff ...') # @feature.command(name='cooloff', short_help="Set the cooloff period in seconds for the auto_techsupport capability") @click.argument('name', metavar='', required=True) -@click.argument('auto_techsupport', metavar='', required=True, type=int) +@click.argument('cooloff', metavar='', required=True, type=int) @pass_db def feature_cooloff(db, name, cooloff): """Set the cooloff period in seconds for the auto_techsupport capability""" diff --git a/show/feature.py b/show/feature.py index f6aab9cf6b..2c08f48250 100644 --- a/show/feature.py +++ b/show/feature.py @@ -162,3 +162,26 @@ def feature_autorestart(db, feature_name): for name in natsorted(list(feature_table.keys())): body.append([name, feature_table[name]['auto_restart']]) click.echo(tabulate(body, header)) + +# +# 'auto_techsupport' subcommand (show feature auto_techsupport) +# +@feature.command('autotechsupport', short_help="Show auto_techsupport state and cooloff for a feature") +@click.argument('feature_name', required=False) +@pass_db +def feature_autotechsupport(db, feature_name): + header = ['Feature', 'Auto Techsupport', 'Cooloff (Sec)'] + body = [] + feature_table = db.cfgdb.get_table('FEATURE') + if feature_name: + if feature_table and feature_name in feature_table: + body.append([feature_name, feature_table.get(feature_name, {}).get('auto_techsupport', ""), + feature_table.get(feature_name, {}).get('cooloff', "")]) + else: + click.echo("Can not find feature {}".format(feature_name)) + sys.exit(1) + else: + for name in natsorted(list(feature_table.keys())): + body.append([name, feature_table.get(name, {}).get('auto_techsupport', ""), + feature_table.get(name, {}).get('cooloff', "")]) + click.echo(tabulate(body, header)) diff --git a/show/plugins/auto_techsupport.py b/show/plugins/auto_techsupport.py index a11bdcb11a..ab280ea783 100644 --- a/show/plugins/auto_techsupport.py +++ b/show/plugins/auto_techsupport.py @@ -1,7 +1,5 @@ """ Auto-generated show CLI plugin. - - """ import click @@ -10,9 +8,6 @@ import utilities_common.cli as clicommon - - - def format_attr_value(entry, attr): """ Helper that formats attribute to be presented in the table output. @@ -48,43 +43,28 @@ def format_group_value(entry, attrs): - - - - - - - - - @click.group(name="auto-techsupport", cls=clicommon.AliasedGroup) def AUTO_TECHSUPPORT(): """ AUTO_TECHSUPPORT part of config_db.json """ - pass - @AUTO_TECHSUPPORT.command(name="global") @clicommon.pass_db def AUTO_TECHSUPPORT_global(db): """ """ - header = [ - -"AUTO INVOKE TS", -"COREDUMP CLEANUP", -"TECHSUPPORT CLEANUP", -"COOLOFF", -"MAX TECHSUPPORT SIZE", -"CORE USAGE", -"SINCE", - -] + "AUTO INVOKE TS", + "COREDUMP CLEANUP", + "TECHSUPPORT CLEANUP", + "COOLOFF", + "MAX TECHSUPPORT SIZE", + "CORE USAGE", + "SINCE", + ] body = [] - table = db.cfgdb.get_table("AUTO_TECHSUPPORT") entry = table.get("global", {}) row = [ diff --git a/tests/feature_test.py b/tests/feature_test.py index be01eede12..7347e4d5c1 100644 --- a/tests/feature_test.py +++ b/tests/feature_test.py @@ -153,6 +153,31 @@ Feature 'bgp' auto-restart is not consistent across namespaces """ +config_auto_techsupport="""\ +Feature Auto Techsupport Cooloff (Sec) +--------- ------------------ --------------- +telemetry enabled 200 +""" + +show_auto_techsupport="""\ +Feature Auto Techsupport Cooloff (Sec) +---------- ------------------ --------------- +bgp enabled 300 +database enabled 300 +dhcp_relay enabled 300 +lldp enabled 300 +nat enabled 300 +pmon enabled 120 +radv disabled 120 +restapi 80 +sflow disabled +snmp disabled +swss disabled +syncd disabled +teamd disabled +telemetry disabled 120 +""" + class TestFeature(object): @classmethod def setup_class(cls): @@ -383,6 +408,31 @@ def test_config_unknown_feature(self, get_cmd_module): print(result.exit_code) assert result.exit_code == 1 + def test_config_auto_techsupport(self, get_cmd_module): + (config, show) = get_cmd_module + db = Db() + runner = CliRunner() + result = runner.invoke(config.config.commands["feature"].commands["autotechsupport"], ["telemetry", "enabled"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + result = runner.invoke(config.config.commands["feature"].commands["cooloff"], ["telemetry", "200"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + result = runner.invoke(show.cli.commands["feature"].commands["autotechsupport"], ["telemetry"], obj=db) + print(result.output) + assert result.exit_code == 0 + assert result.output == config_auto_techsupport + + def test_show_auto_techsupport(self, get_cmd_module): + (config, show) = get_cmd_module + runner = CliRunner() + result = runner.invoke(show.cli.commands["feature"].commands["autotechsupport"], []) + print(result.output) + assert result.exit_code == 0 + assert result.output == show_auto_techsupport + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index 4c60cf992a..11bd1da684 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -688,84 +688,106 @@ "state": "enabled", "auto_restart": "enabled", "high_mem_alert": "disabled", + "auto_techsupport": "enabled", + "cooloff": "300", "set_owner": "local" }, "FEATURE|database": { "state": "always_enabled", "auto_restart": "always_enabled", "high_mem_alert": "disabled", + "auto_techsupport": "enabled", + "cooloff": "300", "set_owner": "local" }, "FEATURE|dhcp_relay": { "state": "enabled", "auto_restart": "enabled", "high_mem_alert": "disabled", + "auto_techsupport": "enabled", + "cooloff": "300", "set_owner": "kube" }, "FEATURE|lldp": { "state": "enabled", "auto_restart": "enabled", "high_mem_alert": "disabled", + "auto_techsupport": "enabled", + "cooloff": "300", "set_owner": "kube" }, "FEATURE|nat": { "state": "enabled", "auto_restart": "enabled", "high_mem_alert": "disabled", + "auto_techsupport": "enabled", + "cooloff": "300", "set_owner": "local" }, "FEATURE|pmon": { "state": "enabled", "auto_restart": "enabled", "high_mem_alert": "disabled", + "auto_techsupport": "enabled", + "cooloff": "120", "set_owner": "kube" }, "FEATURE|radv": { "state": "enabled", "auto_restart": "enabled", "high_mem_alert": "disabled", + "auto_techsupport": "disabled", + "cooloff": "120", "set_owner": "kube" }, "FEATURE|restapi": { "state": "disabled", "auto_restart": "enabled", "high_mem_alert": "disabled", + "cooloff": "80", "set_owner": "local" }, "FEATURE|sflow": { "state": "disabled", "auto_restart": "enabled", "high_mem_alert": "disabled", + "auto_techsupport": "disabled", "set_owner": "local" }, "FEATURE|snmp": { "state": "enabled", "auto_restart": "enabled", "high_mem_alert": "disabled", + "auto_techsupport": "disabled", "set_owner": "kube" }, "FEATURE|swss": { "state": "enabled", "auto_restart": "enabled", "high_mem_alert": "disabled", + "auto_techsupport": "disabled", "set_owner": "local" }, "FEATURE|syncd": { "state": "enabled", "auto_restart": "enabled", "high_mem_alert": "disabled", + "auto_techsupport": "disabled", "set_owner": "local" }, "FEATURE|teamd": { "state": "enabled", "auto_restart": "enabled", "high_mem_alert": "disabled", + "auto_techsupport": "disabled", "set_owner": "local" }, "FEATURE|telemetry": { "state": "enabled", "auto_restart": "enabled", "high_mem_alert": "disabled", + "auto_techsupport": "disabled", + "cooloff": "120", "set_owner": "kube" }, "DEVICE_METADATA|localhost": { From a5cf16e7d98b5af6e255257b08ea093bca9d8145 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Thu, 19 Aug 2021 03:02:30 +0000 Subject: [PATCH 26/60] show auto_ts history added Signed-off-by: Vivek Reddy Karri --- show/plugins/auto_techsupport.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/show/plugins/auto_techsupport.py b/show/plugins/auto_techsupport.py index ab280ea783..18b7b2954d 100644 --- a/show/plugins/auto_techsupport.py +++ b/show/plugins/auto_techsupport.py @@ -97,12 +97,20 @@ def AUTO_TECHSUPPORT_global(db): {'name': 'since', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} ), ] - body.append(row) click.echo(tabulate.tabulate(body, header)) - +@AUTO_TECHSUPPORT.command(name="history") +@clicommon.pass_db +def AUTO_TECHSUPPORT_history(db): + fv = db.db.get_all("STATE_DB", "AUTO_TECHSUPPORT|TS_CORE_MAP") + header = ["Techsupport Dump", "Triggered By", "Critical Process"] + body = [] + for field, value in fv.items(): + core_dump, _, supervisor_crit_proc = value.split(";") + body.append([field, core_dump, supervisor_crit_proc]) + click.echo(tabulate.tabulate(body, header)) def register(cli): From 903a2f0a3b51061898b94c4d5656012f0f10cbd4 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Thu, 19 Aug 2021 03:11:10 +0000 Subject: [PATCH 27/60] Revert "Revert "[config][generic-update] Implementing patch sorting (#1599)"" This reverts commit e6122e9b27ebc0dfcd148824c069b59522833253. --- generic_config_updater/generic_updater.py | 8 +- generic_config_updater/gu_common.py | 539 ++++- generic_config_updater/patch_sorter.py | 1010 ++++++++++ setup.py | 1 + .../files/any_config_db.json | 2 + .../files/any_other_config_db.json | 4 + .../files/config_db_after_multi_patch.json | 2 +- .../config_db_after_single_operation.json | 83 + .../files/config_db_choice.json | 17 + .../files/config_db_no_dependencies.json | 39 + .../files/config_db_with_crm.json | 9 + .../files/config_db_with_device_metadata.json | 16 + .../files/config_db_with_interface.json | 20 + .../config_db_with_portchannel_and_acl.json | 25 + .../config_db_with_portchannel_interface.json | 10 + .../contrainer_with_container_config_db.json | 7 + .../files/dpb_1_split_full_config.json | 35 + .../files/dpb_1_to_4.json-patch | 88 + .../files/dpb_4_splits_full_config.json | 65 + .../files/dpb_4_to_1.json-patch | 58 + .../files/empty_config_db.json | 2 + .../files/simple_config_db_inc_deps.json | 20 + .../generic_config_updater/gu_common_test.py | 310 ++- .../patch_sorter_test.py | 1730 +++++++++++++++++ 24 files changed, 4077 insertions(+), 23 deletions(-) create mode 100644 generic_config_updater/patch_sorter.py create mode 100644 tests/generic_config_updater/files/any_config_db.json create mode 100644 tests/generic_config_updater/files/any_other_config_db.json create mode 100644 tests/generic_config_updater/files/config_db_after_single_operation.json create mode 100644 tests/generic_config_updater/files/config_db_choice.json create mode 100644 tests/generic_config_updater/files/config_db_no_dependencies.json create mode 100644 tests/generic_config_updater/files/config_db_with_crm.json create mode 100644 tests/generic_config_updater/files/config_db_with_device_metadata.json create mode 100644 tests/generic_config_updater/files/config_db_with_interface.json create mode 100644 tests/generic_config_updater/files/config_db_with_portchannel_and_acl.json create mode 100644 tests/generic_config_updater/files/config_db_with_portchannel_interface.json create mode 100644 tests/generic_config_updater/files/contrainer_with_container_config_db.json create mode 100644 tests/generic_config_updater/files/dpb_1_split_full_config.json create mode 100644 tests/generic_config_updater/files/dpb_1_to_4.json-patch create mode 100644 tests/generic_config_updater/files/dpb_4_splits_full_config.json create mode 100644 tests/generic_config_updater/files/dpb_4_to_1.json-patch create mode 100644 tests/generic_config_updater/files/empty_config_db.json create mode 100644 tests/generic_config_updater/files/simple_config_db_inc_deps.json create mode 100644 tests/generic_config_updater/patch_sorter_test.py diff --git a/generic_config_updater/generic_updater.py b/generic_config_updater/generic_updater.py index 079d7ab742..061376b032 100644 --- a/generic_config_updater/generic_updater.py +++ b/generic_config_updater/generic_updater.py @@ -3,6 +3,7 @@ from enum import Enum from .gu_common import GenericConfigUpdaterError, ConfigWrapper, \ DryRunConfigWrapper, PatchWrapper +from .patch_sorter import PatchSorter CHECKPOINTS_DIR = "/etc/sonic/checkpoints" CHECKPOINT_EXT = ".cp.json" @@ -16,11 +17,6 @@ def release_lock(self): # TODO: Implement ConfigLock pass -class PatchSorter: - def sort(self, patch): - # TODO: Implement patch sorter - raise NotImplementedError("PatchSorter.sort(patch) is not implemented yet") - class ChangeApplier: def apply(self, change): # TODO: Implement change applier @@ -36,7 +32,7 @@ def __init__(self, changeapplier=None, config_wrapper=None, patch_wrapper=None): - self.patchsorter = patchsorter if patchsorter is not None else PatchSorter() + self.patchsorter = patchsorter if patchsorter is not None else PatchSorter(config_wrapper, patch_wrapper) self.changeapplier = changeapplier if changeapplier is not None else ChangeApplier() self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper() self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper() diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index 2aa6a36d8a..66d9b0d7d9 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -1,8 +1,12 @@ import json import jsonpatch +from jsonpointer import JsonPointer import sonic_yang import subprocess +import yang as ly import copy +import re +from enum import Enum YANG_DIR = "/usr/local/yang-models" @@ -10,8 +14,26 @@ class GenericConfigUpdaterError(Exception): pass class JsonChange: - # TODO: Implement JsonChange - pass + """ + A class that describes a partial change to a JSON object. + It is is similar to JsonPatch, but the order of updating the configs is unknown. + Only the final outcome of the update can be retrieved. + It provides a single function to apply the change to a given JSON object. + """ + def __init__(self, patch): + self.patch = patch + + def apply(self, config): + return self.patch.apply(config) + + def __str__(self): + return f'{self.patch}' + + def __eq__(self, other): + """Overrides the default implementation""" + if isinstance(other, JsonChange): + return self.patch == other.patch + return False class ConfigWrapper: def __init__(self, yang_dir = YANG_DIR): @@ -110,14 +132,6 @@ def crop_tables_without_yang(self, config_db_as_json): return sy.jIn - def _create_and_connect_config_db(self): - if self.default_config_db_connector != None: - return self.default_config_db_connector - - config_db = ConfigDBConnector() - config_db.connect() - return config_db - class DryRunConfigWrapper(ConfigWrapper): # TODO: implement DryRunConfigWrapper # This class will simulate all read/write operations to ConfigDB on a virtual storage unit. @@ -126,11 +140,12 @@ class DryRunConfigWrapper(ConfigWrapper): class PatchWrapper: def __init__(self, config_wrapper=None): self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper() + self.path_addressing = PathAddressing() def validate_config_db_patch_has_yang_models(self, patch): config_db = {} for operation in patch: - tokens = operation['path'].split('/')[1:] + tokens = self.path_addressing.get_path_tokens(operation[OperationWrapper.PATH_KEYWORD]) if len(tokens) == 0: # Modifying whole config_db tables_dict = {table_name: {} for table_name in operation['value']} config_db.update(tables_dict) @@ -174,3 +189,505 @@ def convert_sonic_yang_patch_to_config_db_patch(self, patch): target_config_db = self.config_wrapper.convert_sonic_yang_to_config_db(target_yang) return self.generate_patch(current_config_db, target_config_db) + +class OperationType(Enum): + ADD = 1 + REMOVE = 2 + REPLACE = 3 + +class OperationWrapper: + OP_KEYWORD = "op" + PATH_KEYWORD = "path" + VALUE_KEYWORD = "value" + + def create(self, operation_type, path, value=None): + op_type = operation_type.name.lower() + + operation = {OperationWrapper.OP_KEYWORD: op_type, OperationWrapper.PATH_KEYWORD: path} + + if operation_type in [OperationType.ADD, OperationType.REPLACE]: + operation[OperationWrapper.VALUE_KEYWORD] = value + + return operation + +class PathAddressing: + """ + Path refers to the 'path' in JsonPatch operations: https://tools.ietf.org/html/rfc6902 + The path corresponds to JsonPointer: https://tools.ietf.org/html/rfc6901 + + All xpath operations in this class are only relevent to ConfigDb and the conversion to YANG xpath. + It is not meant to support all the xpath functionalities, just the ones relevent to ConfigDb/YANG. + """ + PATH_SEPARATOR = "/" + XPATH_SEPARATOR = "/" + def get_path_tokens(self, path): + return JsonPointer(path).parts + + def create_path(self, tokens): + return JsonPointer.from_parts(tokens).path + + def get_xpath_tokens(self, xpath): + """ + Splits the given xpath into tokens by '/'. + + Example: + xpath: /sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode + tokens: sonic-vlan:sonic-vlan, VLAN_MEMBER, VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8'], tagging_mode + """ + if xpath == "": + raise ValueError("xpath cannot be empty") + + if xpath == "/": + return [] + + idx = 0 + tokens = [] + while idx < len(xpath): + end = self._get_xpath_token_end(idx+1, xpath) + token = xpath[idx+1:end] + tokens.append(token) + idx = end + + return tokens + + def _get_xpath_token_end(self, start, xpath): + idx = start + while idx < len(xpath): + if xpath[idx] == PathAddressing.XPATH_SEPARATOR: + break + elif xpath[idx] == "[": + idx = self._get_xpath_predicate_end(idx, xpath) + idx = idx+1 + + return idx + + def _get_xpath_predicate_end(self, start, xpath): + idx = start + while idx < len(xpath): + if xpath[idx] == "]": + break + elif xpath[idx] == "'": + idx = self._get_xpath_single_quote_str_end(idx, xpath) + elif xpath[idx] == '"': + idx = self._get_xpath_double_quote_str_end(idx, xpath) + + idx = idx+1 + + return idx + + def _get_xpath_single_quote_str_end(self, start, xpath): + idx = start+1 # skip first single quote + while idx < len(xpath): + if xpath[idx] == "'": + break + # libyang implements XPATH 1.0 which does not escape single quotes + # libyang src: https://netopeer.liberouter.org/doc/libyang/master/html/howtoxpath.html + # XPATH 1.0 src: https://www.w3.org/TR/1999/REC-xpath-19991116/#NT-Literal + idx = idx+1 + + return idx + + def _get_xpath_double_quote_str_end(self, start, xpath): + idx = start+1 # skip first single quote + while idx < len(xpath): + if xpath[idx] == '"': + break + # libyang implements XPATH 1.0 which does not escape double quotes + # libyang src: https://netopeer.liberouter.org/doc/libyang/master/html/howtoxpath.html + # XPATH 1.0 src: https://www.w3.org/TR/1999/REC-xpath-19991116/#NT-Literal + idx = idx+1 + + return idx + + def create_xpath(self, tokens): + """ + Creates an xpath by combining the given tokens using '/' + Example: + tokens: module, container, list[key='value'], leaf + xpath: /module/container/list[key='value']/leaf + """ + if len(tokens) == 0: + return "/" + + return f"{PathAddressing.XPATH_SEPARATOR}{PathAddressing.XPATH_SEPARATOR.join(str(t) for t in tokens)}" + + def find_ref_paths(self, path, config): + """ + Finds the paths referencing any line under the given 'path' within the given 'config'. + Example: + path: /PORT + config: + { + "VLAN_MEMBER": { + "Vlan1000|Ethernet0": {}, + "Vlan1000|Ethernet4": {} + }, + "ACL_TABLE": { + "EVERFLOW": { + "ports": [ + "Ethernet4" + ], + }, + "EVERFLOWV6": { + "ports": [ + "Ethernet4", + "Ethernet8" + ] + } + }, + "PORT": { + "Ethernet0": {}, + "Ethernet4": {}, + "Ethernet8": {} + } + } + return: + /VLAN_MEMBER/Vlan1000|Ethernet0 + /VLAN_MEMBER/Vlan1000|Ethernet4 + /ACL_TABLE/EVERFLOW/ports/0 + /ACL_TABLE/EVERFLOW6/ports/0 + /ACL_TABLE/EVERFLOW6/ports/1 + """ + # TODO: Also fetch references by must statement (check similar statements) + return self._find_leafref_paths(path, config) + + def _find_leafref_paths(self, path, config): + sy = sonic_yang.SonicYang(YANG_DIR) + sy.loadYangModel() + + sy.loadData(config) + + xpath = self.convert_path_to_xpath(path, config, sy) + + leaf_xpaths = self._get_inner_leaf_xpaths(xpath, sy) + + ref_xpaths = [] + for xpath in leaf_xpaths: + ref_xpaths.extend(sy.find_data_dependencies(xpath)) + + ref_paths = [] + for ref_xpath in ref_xpaths: + ref_path = self.convert_xpath_to_path(ref_xpath, config, sy) + ref_paths.append(ref_path) + + return set(ref_paths) + + def _get_inner_leaf_xpaths(self, xpath, sy): + if xpath == "/": # Point to Root element which contains all xpaths + nodes = sy.root.tree_for() + else: # Otherwise get all nodes that match xpath + nodes = sy.root.find_path(xpath).data() + + for node in nodes: + for inner_node in node.tree_dfs(): + # TODO: leaflist also can be used as the 'path' argument in 'leafref' so add support to leaflist + if self._is_leaf_node(inner_node): + yield inner_node.path() + + def _is_leaf_node(self, node): + schema = node.schema() + return ly.LYS_LEAF == schema.nodetype() + + def convert_path_to_xpath(self, path, config, sy): + """ + Converts the given JsonPatch path (i.e. JsonPointer) to XPATH. + Example: + path: /VLAN_MEMBER/Vlan1000|Ethernet8/tagging_mode + xpath: /sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode + """ + self.convert_xpath_to_path + tokens = self.get_path_tokens(path) + if len(tokens) == 0: + return self.create_xpath(tokens) + + xpath_tokens = [] + table = tokens[0] + + cmap = sy.confDbYangMap[table] + + # getting the top level element : + xpath_tokens.append(cmap['module']+":"+cmap['topLevelContainer']) + + xpath_tokens.extend(self._get_xpath_tokens_from_container(cmap['container'], 0, tokens, config)) + + return self.create_xpath(xpath_tokens) + + def _get_xpath_tokens_from_container(self, model, token_index, path_tokens, config): + token = path_tokens[token_index] + xpath_tokens = [token] + + if len(path_tokens)-1 == token_index: + return xpath_tokens + + # check if the configdb token is referring to a list + list_model = self._get_list_model(model, token_index, path_tokens) + if list_model: + new_xpath_tokens = self._get_xpath_tokens_from_list(list_model, token_index+1, path_tokens, config[path_tokens[token_index]]) + xpath_tokens.extend(new_xpath_tokens) + return xpath_tokens + + # check if it is targetting a child container + child_container_model = self._get_model(model.get('container'), path_tokens[token_index+1]) + if child_container_model: + new_xpath_tokens = self._get_xpath_tokens_from_container(child_container_model, token_index+1, path_tokens, config[path_tokens[token_index]]) + xpath_tokens.extend(new_xpath_tokens) + return xpath_tokens + + new_xpath_tokens = self._get_xpath_tokens_from_leaf(model, token_index+1, path_tokens, config[path_tokens[token_index]]) + xpath_tokens.extend(new_xpath_tokens) + + return xpath_tokens + + def _get_xpath_tokens_from_list(self, model, token_index, path_tokens, config): + list_name = model['@name'] + + tableKey = path_tokens[token_index] + listKeys = model['key']['@value'] + keyDict = self._extractKey(tableKey, listKeys) + keyTokens = [f"[{key}='{keyDict[key]}']" for key in keyDict] + item_token = f"{list_name}{''.join(keyTokens)}" + + xpath_tokens = [item_token] + + # if whole list-item is needed i.e. if in the path is not referencing child leaf items + # Example: + # path: /VLAN/Vlan1000 + # xpath: /sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000'] + if len(path_tokens)-1 == token_index: + return xpath_tokens + + new_xpath_tokens = self._get_xpath_tokens_from_leaf(model, token_index+1, path_tokens,config[path_tokens[token_index]]) + xpath_tokens.extend(new_xpath_tokens) + return xpath_tokens + + def _get_xpath_tokens_from_leaf(self, model, token_index, path_tokens, config): + token = path_tokens[token_index] + + # checking all leaves + leaf_model = self._get_model(model.get('leaf'), token) + if leaf_model: + return [token] + + # checking choice + choices = model.get('choice') + if choices: + for choice in choices: + cases = choice['case'] + for case in cases: + leaf_model = self._get_model(case.get('leaf'), token) + if leaf_model: + return [token] + + # checking leaf-list (i.e. arrays of string, number or bool) + leaf_list_model = self._get_model(model.get('leaf-list'), token) + if leaf_list_model: + # if whole-list is to be returned, just return the token without checking the list items + # Example: + # path: /VLAN/Vlan1000/dhcp_servers + # xpath: /sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers + if len(path_tokens)-1 == token_index: + return [token] + list_config = config[token] + value = list_config[int(path_tokens[token_index+1])] + # To get a leaf-list instance with the value 'val' + # /module-name:container/leaf-list[.='val'] + # Source: Check examples in https://netopeer.liberouter.org/doc/libyang/master/html/howto_x_path.html + return [f"{token}[.='{value}']"] + + raise ValueError("Token not found") + + def _extractKey(self, tableKey, keys): + keyList = keys.split() + # get the value groups + value = tableKey.split("|") + # match lens + if len(keyList) != len(value): + raise ValueError("Value not found for {} in {}".format(keys, tableKey)) + # create the keyDict + keyDict = dict() + for i in range(len(keyList)): + keyDict[keyList[i]] = value[i].strip() + + return keyDict + + def _get_list_model(self, model, token_index, path_tokens): + parent_container_name = path_tokens[token_index] + clist = model.get('list') + # Container contains a single list, just return it + # TODO: check if matching also by name is necessary + if isinstance(clist, dict): + return clist + + if isinstance(clist, list): + configdb_values_str = path_tokens[token_index+1] + # Format: "value1|value2|value|..." + configdb_values = configdb_values_str.split("|") + for list_model in clist: + yang_keys_str = list_model['key']['@value'] + # Format: "key1 key2 key3 ..." + yang_keys = yang_keys_str.split() + # if same number of values and keys, this is the intended list-model + # TODO: Match also on types and not only the length of the keys/values + if len(yang_keys) == len(configdb_values): + return list_model + raise GenericConfigUpdaterError(f"Container {parent_container_name} has multiple lists, " + f"but none of them match the config_db value {configdb_values_str}") + + return None + + def convert_xpath_to_path(self, xpath, config, sy): + """ + Converts the given XPATH to JsonPatch path (i.e. JsonPointer). + Example: + xpath: /sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode + path: /VLAN_MEMBER/Vlan1000|Ethernet8/tagging_mode + """ + tokens = self.get_xpath_tokens(xpath) + if len(tokens) == 0: + return self.create_path([]) + + if len(tokens) == 1: + raise GenericConfigUpdaterError("xpath cannot be just the module-name, there is no mapping to path") + + table = tokens[1] + cmap = sy.confDbYangMap[table] + + path_tokens = self._get_path_tokens_from_container(cmap['container'], 1, tokens, config) + return self.create_path(path_tokens) + + def _get_path_tokens_from_container(self, model, token_index, xpath_tokens, config): + token = xpath_tokens[token_index] + path_tokens = [token] + + if len(xpath_tokens)-1 == token_index: + return path_tokens + + # check child list + list_name = xpath_tokens[token_index+1].split("[")[0] + list_model = self._get_model(model.get('list'), list_name) + if list_model: + new_path_tokens = self._get_path_tokens_from_list(list_model, token_index+1, xpath_tokens, config[token]) + path_tokens.extend(new_path_tokens) + return path_tokens + + container_name = xpath_tokens[token_index+1] + container_model = self._get_model(model.get('container'), container_name) + if container_model: + new_path_tokens = self._get_path_tokens_from_container(container_model, token_index+1, xpath_tokens, config[token]) + path_tokens.extend(new_path_tokens) + return path_tokens + + new_path_tokens = self._get_path_tokens_from_leaf(model, token_index+1, xpath_tokens, config[token]) + path_tokens.extend(new_path_tokens) + + return path_tokens + + def _get_path_tokens_from_list(self, model, token_index, xpath_tokens, config): + token = xpath_tokens[token_index] + key_dict = self._extract_key_dict(token) + + # If no keys specified return empty tokens, as we are already inside the correct table. + # Also note that the list name in SonicYang has no correspondence in ConfigDb and is ignored. + # Example where VLAN_MEMBER_LIST has no specific key/value: + # xpath: /sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST + # path: /VLAN_MEMBER + if not(key_dict): + return [] + + listKeys = model['key']['@value'] + key_list = listKeys.split() + + if len(key_list) != len(key_dict): + raise GenericConfigUpdaterError(f"Keys in configDb not matching keys in SonicYang. ConfigDb keys: {key_dict.keys()}. SonicYang keys: {key_list}") + + values = [key_dict[k] for k in key_list] + path_token = '|'.join(values) + path_tokens = [path_token] + + if len(xpath_tokens)-1 == token_index: + return path_tokens + + next_token = xpath_tokens[token_index+1] + # if the target node is a key, then it does not have a correspondene to path. + # Just return the current 'key1|key2|..' token as it already refers to the keys + # Example where the target node is 'name' which is a key in VLAN_MEMBER_LIST: + # xpath: /sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/name + # path: /VLAN_MEMBER/Vlan1000|Ethernet8 + if next_token in key_dict: + return path_tokens + + new_path_tokens = self._get_path_tokens_from_leaf(model, token_index+1, xpath_tokens, config[path_token]) + path_tokens.extend(new_path_tokens) + return path_tokens + + def _get_path_tokens_from_leaf(self, model, token_index, xpath_tokens, config): + token = xpath_tokens[token_index] + + # checking all leaves + leaf_model = self._get_model(model.get('leaf'), token) + if leaf_model: + return [token] + + # checking choices + choices = model.get('choice') + if choices: + for choice in choices: + cases = choice['case'] + for case in cases: + leaf_model = self._get_model(case.get('leaf'), token) + if leaf_model: + return [token] + + # checking leaf-list + leaf_list_tokens = token.split("[", 1) # split once on the first '[', a regex is used later to fetch keys/values + leaf_list_name = leaf_list_tokens[0] + leaf_list_model = self._get_model(model.get('leaf-list'), leaf_list_name) + if leaf_list_model: + # if whole-list is to be returned, just return the list-name without checking the list items + # Example: + # xpath: /sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers + # path: /VLAN/Vlan1000/dhcp_servers + if len(leaf_list_tokens) == 1: + return [leaf_list_name] + leaf_list_pattern = "^[^\[]+(?:\[\.='([^']*)'\])?$" + leaf_list_regex = re.compile(leaf_list_pattern) + match = leaf_list_regex.match(token) + # leaf_list_name = match.group(1) + leaf_list_value = match.group(1) + list_config = config[leaf_list_name] + list_idx = list_config.index(leaf_list_value) + return [leaf_list_name, list_idx] + + raise Exception("no leaf") + + def _extract_key_dict(self, list_token): + # Example: VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8'] + # the groups would be ('VLAN_MEMBER'), ("[name='Vlan1000'][port='Ethernet8']") + table_keys_pattern = "^([^\[]+)(.*)$" + text = list_token + table_keys_regex = re.compile(table_keys_pattern) + match = table_keys_regex.match(text) + # list_name = match.group(1) + all_key_value = match.group(2) + + # Example: [name='Vlan1000'][port='Ethernet8'] + # the findall groups would be ('name', 'Vlan1000'), ('port', 'Ethernet8') + key_value_pattern = "\[([^=]+)='([^']*)'\]" + matches = re.findall(key_value_pattern, all_key_value) + key_dict = {} + for item in matches: + key = item[0] + value = item[1] + key_dict[key] = value + + return key_dict + + def _get_model(self, model, name): + if isinstance(model, dict) and model['@name'] == name: + return model + if isinstance(model, list): + for submodel in model: + if submodel['@name'] == name: + return submodel + + return None diff --git a/generic_config_updater/patch_sorter.py b/generic_config_updater/patch_sorter.py new file mode 100644 index 0000000000..8bf99ba004 --- /dev/null +++ b/generic_config_updater/patch_sorter.py @@ -0,0 +1,1010 @@ +import copy +import json +import jsonpatch +from collections import deque +from enum import Enum +from .gu_common import OperationWrapper, OperationType, GenericConfigUpdaterError, JsonChange, PathAddressing + +class Diff: + """ + A class that contains the diff info between current and target configs. + """ + def __init__(self, current_config, target_config): + self.current_config = current_config + self.target_config = target_config + + def __hash__(self): + cc = json.dumps(self.current_config, sort_keys=True) + tc = json.dumps(self.target_config, sort_keys=True) + return hash((cc,tc)) + + def __eq__(self, other): + """Overrides the default implementation""" + if isinstance(other, Diff): + return self.current_config == other.current_config and self.target_config == other.target_config + + return False + + # TODO: Can be optimized to apply the move in place. JsonPatch supports that using the option 'in_place=True' + # Check: https://python-json-patch.readthedocs.io/en/latest/tutorial.html#applying-a-patch + # NOTE: in case move is applied in place, we will need to support `undo_move` as well. + def apply_move(self, move): + new_current_config = move.apply(self.current_config) + return Diff(new_current_config, self.target_config) + + def has_no_diff(self): + return self.current_config == self.target_config + +class JsonMove: + """ + A class similar to JsonPatch operation, but it allows the path to refer to non-existing middle elements. + + JsonPatch operation fails to update json if the path in the patch refers to element that do not exist. + For example, assume json to be: + {} + The following path will be rejected: + /elem1/key1 + The reason is 'elem1' does not exist in the json + + JsonMove on the other hand allows that given the target_config_tokens i.e. the target_config path, + and current_config_tokens i.e. current_config path where the update needs to happen. + """ + def __init__(self, diff, op_type, current_config_tokens, target_config_tokens=None): + operation = JsonMove._to_jsonpatch_operation(diff, op_type, current_config_tokens, target_config_tokens) + self.patch = jsonpatch.JsonPatch([operation]) + self.op_type = operation[OperationWrapper.OP_KEYWORD] + self.path = operation[OperationWrapper.PATH_KEYWORD] + self.value = operation.get(OperationWrapper.VALUE_KEYWORD, None) + + self.op_type = op_type + self.current_config_tokens = current_config_tokens + self.target_config_tokens = target_config_tokens + + @staticmethod + def _to_jsonpatch_operation(diff, op_type, current_config_tokens, target_config_tokens): + operation_wrapper = OperationWrapper() + path_addressing = PathAddressing() + + if op_type == OperationType.REMOVE: + path = path_addressing.create_path(current_config_tokens) + return operation_wrapper.create(op_type, path) + + if op_type == OperationType.REPLACE: + path = path_addressing.create_path(current_config_tokens) + value = JsonMove._get_value(diff.target_config, target_config_tokens) + return operation_wrapper.create(op_type, path, value) + + if op_type == OperationType.ADD: + return JsonMove._to_jsonpatch_add_operation(diff, current_config_tokens, target_config_tokens) + + raise ValueError(f"OperationType {op_type} is not supported") + + @staticmethod + def _get_value(config, tokens): + for token in tokens: + config = config[token] + + return copy.deepcopy(config) + + @staticmethod + def _to_jsonpatch_add_operation(diff, current_config_tokens, target_config_tokens): + """ + Check description of JsonMove class first. + + ADD operation path can refer to elements that do not exist, so to convert JsonMove to JsonPatch operation + We need to remove the non-existing tokens from the current_config path and move them to the value. + + Example: + Assume Target Config: + { + "dict1":{ + "key11": "value11" + } + } + Assume Current Config: + { + } + Assume JsonMove: + op_type=add, current_config_tokens=[dict1, key11], target_config_tokens=[dict1, key11] + + Converting this to operation directly would result in: + {"op":"add", "path":"/dict1/key11", "value":"value11"} + BUT this is not correct since 'dict1' which does not exist in Current Config. + Instead we convert to: + {"op":"add", "path":"/dict1", "value":{"key11": "value11"}} + """ + operation_wrapper = OperationWrapper() + path_addressing = PathAddressing() + + # if path refers to whole config i.e. no tokens, then just create the operation + if not current_config_tokens: + path = path_addressing.create_path(current_config_tokens) + value = JsonMove._get_value(diff.target_config, target_config_tokens) + return operation_wrapper.create(OperationType.ADD, path, value) + + # Start with getting target-config that match the path all the way to the value in json format + # Example: + # Assume target-config: + # { + # "dict1":{ + # "key11": "value11", + # "list12": [ + # "value121", + # "value122" + # ] + # }, + # "dict2":{ + # "key21": "value21" + # } + # } + # Assume target config tokens: + # dict1, list12, 1 + # filtered_config will be + # { + # "dict1":{ + # "list12": [ + # "value122" + # ] + # } + # } + target_ptr = diff.target_config + filtered_config = {} + filtered_config_ptr = filtered_config + for token_index in range(len(target_config_tokens)): + token = target_config_tokens[token_index] + + # Tokens are expected to be of the correct data-type i.e. string, int (list-index) + # So not checking the type of the token before consuming it + target_ptr = target_ptr[token] + + # if it is the last item, then just return the last target_ptr + if token_index == len(target_config_tokens)-1: + filtered_value = target_ptr + elif isinstance(target_ptr, list): + filtered_value = [] + else: + filtered_value = {} + + if isinstance(filtered_config_ptr, list): + filtered_config_ptr.append(filtered_value) # filtered_config list will contain only 1 value + else: # otherwise it is a dict + filtered_config_ptr[token] = filtered_value + + filtered_config_ptr = filtered_value + + # Then from the filtered_config get the all the tokens that exist in current_config + # This will be the new path, and the new value will be the corresponding filtered_config + # Example: + # Assume filtered_config + # { + # "dict1":{ + # "key11": "value11" + # } + # } + # Assume current-config + # { + # "dict1":{ + # "list12": [ + # "value122" + # ] + # } + # } + # Then the JsonPatch path would be: + # /dict1/list12 + # And JsonPatch value would be: + # [ "value122" ] + current_ptr = diff.current_config + new_tokens = [] + for token in current_config_tokens: + new_tokens.append(token) + was_list = isinstance(filtered_config, list) + if was_list: + # filtered_config list can only have 1 item + filtered_config = filtered_config[0] + else: + filtered_config = filtered_config[token] + + if was_list and token >= len(current_ptr): + break + if not(was_list) and token not in current_ptr: + break + current_ptr = current_ptr[token] + + op_type = OperationType.ADD + new_path = path_addressing.create_path(new_tokens) + new_value = copy.deepcopy(filtered_config) + + return operation_wrapper.create(op_type, new_path, new_value) + + @staticmethod + def from_patch(patch): + ops = list(patch) + if len(ops) != 1: + raise GenericConfigUpdaterError( + f"Only a patch of a single operation be converted to JsonMove. Patch has {len(ops)} operation/s") + + return JsonMove.from_operation(ops[0]) + + @staticmethod + def from_operation(operation): + path_addressing = PathAddressing() + op_type = OperationType[operation[OperationWrapper.OP_KEYWORD].upper()] + path = operation[OperationWrapper.PATH_KEYWORD] + if op_type in [OperationType.ADD, OperationType.REPLACE]: + value = operation[OperationWrapper.VALUE_KEYWORD] + else: + value = None + + tokens = path_addressing.get_path_tokens(path) + + target_config = {} + target_config_ptr = target_config + current_config = {} + current_config_ptr = current_config + for token in tokens[:-1]: + target_config_ptr[token] = {} + current_config_ptr[token] = {} + target_config_ptr = target_config_ptr[token] + current_config_ptr = current_config_ptr[token] + + if tokens: + target_config_ptr[tokens[-1]] = value + else: + # whole-config, just use value + target_config = value + + current_config_tokens = tokens + if op_type in [OperationType.ADD, OperationType.REPLACE]: + target_config_tokens = tokens + else: + target_config_tokens = None + + diff = Diff(current_config, target_config) + + return JsonMove(diff, op_type, current_config_tokens, target_config_tokens) + + def apply(self, config): + return self.patch.apply(config) + + def __str__(self): + return str(self.patch) + + def __repr__(self): + return str(self.patch) + + def __eq__(self, other): + """Overrides the default implementation""" + if isinstance(other, JsonMove): + return self.patch == other.patch + return False + + def __hash__(self): + return hash((self.op_type, self.path, json.dumps(self.value))) + +class MoveWrapper: + def __init__(self, move_generators, move_extenders, move_validators): + self.move_generators = move_generators + self.move_extenders = move_extenders + self.move_validators = move_validators + + def generate(self, diff): + processed_moves = set() + moves = deque([]) + + for move in self._generate_moves(diff): + if move in processed_moves: + continue + processed_moves.add(move) + yield move + moves.extend(self._extend_moves(move, diff)) + + while moves: + move = moves.popleft() + if move in processed_moves: + continue + processed_moves.add(move) + yield move + moves.extend(self._extend_moves(move, diff)) + + def validate(self, move, diff): + for validator in self.move_validators: + if not validator.validate(move, diff): + return False + return True + + def simulate(self, move, diff): + return diff.apply_move(move) + + def _generate_moves(self, diff): + for generator in self.move_generators: + for move in generator.generate(diff): + yield move + + def _extend_moves(self, move, diff): + for extender in self.move_extenders: + for newmove in extender.extend(move, diff): + yield newmove + +class DeleteWholeConfigMoveValidator: + """ + A class to validate not deleting whole config as it is not supported by JsonPatch lib. + """ + def validate(self, move, diff): + if move.op_type == OperationType.REMOVE and move.path == "": + return False + return True + +class FullConfigMoveValidator: + """ + A class to validate that full config is valid according to YANG models after applying the move. + """ + def __init__(self, config_wrapper): + self.config_wrapper = config_wrapper + + def validate(self, move, diff): + simulated_config = move.apply(diff.current_config) + return self.config_wrapper.validate_config_db_config(simulated_config) + +# TODO: Add this validation to YANG models instead +class UniqueLanesMoveValidator: + """ + A class to validate lanes and any port are unique between all ports. + """ + def validate(self, move, diff): + simulated_config = move.apply(diff.current_config) + + if "PORT" not in simulated_config: + return True + + ports = simulated_config["PORT"] + existing = set() + for port in ports: + attrs = ports[port] + if "lanes" in attrs: + lanes_str = attrs["lanes"] + lanes = lanes_str.split(", ") + for lane in lanes: + if lane in existing: + return False + existing.add(lane) + return True + +class CreateOnlyMoveValidator: + """ + A class to validate create-only fields are only added/removed but never replaced. + Parents of create-only fields are also only added/removed but never replaced when they contain + a modified create-only field. + """ + def __init__(self, path_addressing): + self.path_addressing = path_addressing + + def validate(self, move, diff): + if move.op_type != OperationType.REPLACE: + return True + + # The 'create-only' field needs to be common between current and simulated anyway but different. + # This means it is enough to just get the paths from current_config, paths that are not common can be ignored. + paths = self._get_create_only_paths(diff.current_config) + simulated_config = move.apply(diff.current_config) + + for path in paths: + tokens = self.path_addressing.get_path_tokens(path) + if self._value_exist_but_different(tokens, diff.current_config, simulated_config): + return False + + return True + + # TODO: create-only fields are hard-coded for now, it should be moved to YANG models + def _get_create_only_paths(self, config): + if "PORT" not in config: + return + + ports = config["PORT"] + + for port in ports: + attrs = ports[port] + if "lanes" in attrs: + yield f"/PORT/{port}/lanes" + + def _value_exist_but_different(self, tokens, current_config_ptr, simulated_config_ptr): + for token in tokens: + mod_token = int(token) if isinstance(current_config_ptr, list) else token + + if mod_token not in current_config_ptr: + return False + + if mod_token not in simulated_config_ptr: + return False + + current_config_ptr = current_config_ptr[mod_token] + simulated_config_ptr = simulated_config_ptr[mod_token] + + return current_config_ptr != simulated_config_ptr + +class NoDependencyMoveValidator: + """ + A class to validate that the modified configs do not have dependency on each other. This should prevent + moves that update whole config in a single step where multiple changed nodes are dependent on each. This + way dependent configs are never updated together. + """ + def __init__(self, path_addressing, config_wrapper): + self.path_addressing = path_addressing + self.config_wrapper = config_wrapper + + def validate(self, move, diff): + operation_type = move.op_type + path = move.path + + if operation_type == OperationType.ADD: + simulated_config = move.apply(diff.current_config) + # For add operation, we check the simulated config has no dependencies between nodes under the added path + if not self._validate_paths_config([path], simulated_config): + return False + elif operation_type == OperationType.REMOVE: + # For remove operation, we check the current config has no dependencies between nodes under the removed path + if not self._validate_paths_config([path], diff.current_config): + return False + elif operation_type == OperationType.REPLACE: + if not self._validate_replace(move, diff): + return False + + return True + + # NOTE: this function can be used for validating JsonChange as well which might have more than one move. + def _validate_replace(self, move, diff): + """ + The table below shows how mixed deletion/addition within replace affect this validation. + + The table is answring the question whether the change is valid: + Y = Yes + N = No + n/a = not applicable as the change itself is not valid + + symbols meaning; + +A, -A: adding, removing config A + +refA, -refA: adding, removing a reference to A config + + + +refA|-refA|refA + --|-----|-----|---- + +A| N | n/a | n/a + -A| n/a | N | n/a + A| Y | Y | Y + + The conclusion is that: + +A, +refA is invalid because there is a dependency and a single move should not have dependency + -A, -refA is invalid because there is a dependency and a single move should not have dependency + A kept unchanged can be ignored, as it is always OK regardless of what happens to its reference + Other states are all non applicable since they are invalid to begin with + + So verification would be: + if A is deleted and refA is deleted: return False + if A is added and refA is added: return False + return True + """ + simulated_config = move.apply(diff.current_config) + deleted_paths, added_paths = self._get_paths(diff.current_config, simulated_config, []) + + if not self._validate_paths_config(deleted_paths, diff.current_config): + return False + + if not self._validate_paths_config(added_paths, diff.target_config): + return False + + return True + + def _get_paths(self, current_ptr, target_ptr, tokens): + deleted_paths = [] + added_paths = [] + + if isinstance(current_ptr, list) or isinstance(target_ptr, list): + tmp_deleted_paths, tmp_added_paths = self._get_list_paths(current_ptr, target_ptr, tokens) + deleted_paths.extend(tmp_deleted_paths) + added_paths.extend(tmp_added_paths) + return deleted_paths, added_paths + + if isinstance(current_ptr, dict): + for token in current_ptr: + tokens.append(token) + if token not in target_ptr: + deleted_paths.append(self.path_addressing.create_path(tokens)) + else: + tmp_deleted_paths, tmp_added_paths = self._get_paths(current_ptr[token], target_ptr[token], tokens) + deleted_paths.extend(tmp_deleted_paths) + added_paths.extend(tmp_added_paths) + tokens.pop() + + for token in target_ptr: + tokens.append(token) + if token not in current_ptr: + added_paths.append(self.path_addressing.create_path(tokens)) + tokens.pop() + + return deleted_paths, added_paths + + # current/target configs are not dict nor list, so handle them as string, int, bool, float + if current_ptr != target_ptr: + # tokens.append(token) + deleted_paths.append(self.path_addressing.create_path(tokens)) + added_paths.append(self.path_addressing.create_path(tokens)) + # tokens.pop() + + return deleted_paths, added_paths + + def _get_list_paths(self, current_list, target_list, tokens): + """ + Gets all paths within the given list, assume list items are unique + """ + deleted_paths = [] + added_paths = [] + + hashed_target = set(target_list) + for index, value in enumerate(current_list): + if value not in hashed_target: + tokens.append(index) + deleted_paths.append(self.path_addressing.create_path(tokens)) + tokens.pop() + + hashed_current = set(current_list) + for index, value in enumerate(target_list): + if value not in hashed_current: + tokens.append(index) + # added_paths refer to paths in the target config and not necessarily the current config + added_paths.append(self.path_addressing.create_path(tokens)) + tokens.pop() + + return deleted_paths, added_paths + + def _validate_paths_config(self, paths, config): + """ + validates all config under paths do not have config and its references + """ + refs = self._find_ref_paths(paths, config) + for ref in refs: + for path in paths: + if ref.startswith(path): + return False + + return True + + def _find_ref_paths(self, paths, config): + refs = [] + for path in paths: + refs.extend(self.path_addressing.find_ref_paths(path, config)) + return refs + +class LowLevelMoveGenerator: + """ + A class to generate the low level moves i.e. moves corresponding to differences between current/target config + where the path of the move does not have children. + """ + def __init__(self, path_addressing): + self.path_addressing = path_addressing + def generate(self, diff): + single_run_generator = SingleRunLowLevelMoveGenerator(diff, self.path_addressing) + for move in single_run_generator.generate(): + yield move + +class SingleRunLowLevelMoveGenerator: + """ + A class that can only run once to assist LowLevelMoveGenerator with generating the moves. + """ + def __init__(self, diff, path_addressing): + self.diff = diff + self.path_addressing = path_addressing + + def generate(self): + current_ptr = self.diff.current_config + target_ptr = self.diff.target_config + current_tokens = [] + target_tokens = [] + + for move in self._traverse(current_ptr, target_ptr, current_tokens, target_tokens): + yield move + + def _traverse(self, current_ptr, target_ptr, current_tokens, target_tokens): + """ + Traverses the current/target config trees. + The given ptrs can be: + dict + list of string, number, boolean, int + string, number, boolean, int + + list of dict is not allowed + """ + if isinstance(current_ptr, list) or isinstance(target_ptr, list): + for move in self._traverse_list(current_ptr, target_ptr, current_tokens, target_tokens): + yield move + return + + if isinstance(current_ptr, dict) or isinstance(target_ptr, dict): + for key in current_ptr: + current_tokens.append(key) + if key in target_ptr: + target_tokens.append(key) + for move in self._traverse(current_ptr[key], target_ptr[key], current_tokens, target_tokens): + yield move + target_tokens.pop() + else: + for move in self._traverse_current(current_ptr[key], current_tokens): + yield move + + current_tokens.pop() + + for key in target_ptr: + if key in current_ptr: + continue # Already tried in the previous loop + + target_tokens.append(key) + current_tokens.append(key) + for move in self._traverse_target(target_ptr[key], current_tokens, target_tokens): + yield move + current_tokens.pop() + target_tokens.pop() + + return + + # The current/target ptr are neither dict nor list, so they might be string, int, float, bool + for move in self._traverse_value(current_ptr, target_ptr, current_tokens, target_tokens): + yield move + + def _traverse_list(self, current_ptr, target_ptr, current_tokens, target_tokens): + # if same elements different order, just sort by replacing whole list + # Example: + # current: [1, 2, 3, 4] + # target: [4, 3, 2, 1] + # returned move: REPLACE, current, target + current_dict_cnts = self._list_to_dict_with_count(current_ptr) + target_dict_cnts = self._list_to_dict_with_count(target_ptr) + if current_dict_cnts == target_dict_cnts: + for move in self._traverse_value(current_ptr, target_ptr, current_tokens, target_tokens): + yield move + return + + # Otherwise try add missing and remove additional elements + # Try remove + if current_ptr is not None: + for current_index, current_item in enumerate(current_ptr): + if current_dict_cnts[current_item] > target_dict_cnts.get(current_item, 0): + current_tokens.append(current_index) + for move in self._traverse_current_value(current_item, current_tokens): + yield move + current_tokens.pop() + # Try add + if target_ptr is not None: + current_cnt = len(current_ptr) if current_ptr is not None else 0 + for target_index, target_item in enumerate(target_ptr): + if target_dict_cnts[target_item] > current_dict_cnts.get(target_item, 0): + index = min(current_cnt, target_index) + current_tokens.append(index) + target_tokens.append(target_index) + for move in self._traverse_target_value(target_item, current_tokens, target_tokens): + yield move + target_tokens.pop() + current_tokens.pop() + + # Try replace + if current_ptr is not None and target_ptr is not None: + for current_index, current_item in enumerate(current_ptr): + for target_index, target_item in enumerate(target_ptr): + if current_dict_cnts[current_item] > target_dict_cnts.get(current_item, 0) and \ + target_dict_cnts[target_item] > current_dict_cnts.get(target_item, 0): + current_tokens.append(current_index) + target_tokens.append(target_index) + for move in self._traverse_value(current_item, target_item, current_tokens, target_tokens): + yield move + target_tokens.pop() + current_tokens.pop() + + def _traverse_value(self, current_value, target_value, current_tokens, target_tokens): + if current_value == target_value: + return + + yield JsonMove(self.diff, OperationType.REPLACE, current_tokens, target_tokens) + + def _traverse_current(self, ptr, current_tokens): + if isinstance(ptr, list): + for move in self._traverse_current_list(ptr, current_tokens): + yield move + return + + if isinstance(ptr, dict): + if len(ptr) == 0: + yield JsonMove(self.diff, OperationType.REMOVE, current_tokens) + return + + for key in ptr: + current_tokens.append(key) + for move in self._traverse_current(ptr[key], current_tokens): + yield move + current_tokens.pop() + + return + + # ptr is not a dict nor a list, it can be string, int, float, bool + for move in self._traverse_current_value(ptr, current_tokens): + yield move + + def _traverse_current_list(self, ptr, current_tokens): + if len(ptr) == 0: + yield JsonMove(self.diff, OperationType.REMOVE, current_tokens) + return + + for index, val in enumerate(ptr): + current_tokens.append(index) + for move in self._traverse_current_value(val, current_tokens): + yield move + current_tokens.pop() + + def _traverse_current_value(self, val, current_tokens): + yield JsonMove(self.diff, OperationType.REMOVE, current_tokens) + + def _traverse_target(self, ptr, current_tokens, target_tokens): + if isinstance(ptr, list): + for move in self._traverse_target_list(ptr, current_tokens, target_tokens): + yield move + return + + if isinstance(ptr, dict): + if len(ptr) == 0: + yield JsonMove(self.diff, OperationType.ADD, current_tokens, target_tokens) + return + + for key in ptr: + current_tokens.append(key) + target_tokens.append(key) + for move in self._traverse_target(ptr[key], current_tokens, target_tokens): + yield move + target_tokens.pop() + current_tokens.pop() + + return + + # target configs are not dict nor list, so handle them as string, int, bool, float + for move in self._traverse_target_value(ptr, current_tokens, target_tokens): + yield move + + def _traverse_target_list(self, ptr, current_tokens, target_tokens): + if len(ptr) == 0: + yield JsonMove(self.diff, OperationType.ADD, current_tokens, target_tokens) + return + + for index, val in enumerate(ptr): + # _traverse_target_list is called when the whole list is missing + # in such case any item should be added at first location i.e. 0 + current_tokens.append(0) + target_tokens.append(index) + for move in self._traverse_target_value(val, current_tokens, target_tokens): + yield move + target_tokens.pop() + current_tokens.pop() + + def _traverse_target_value(self, val, current_tokens, target_tokens): + yield JsonMove(self.diff, OperationType.ADD, current_tokens, target_tokens) + + def _list_to_dict_with_count(self, items): + counts = dict() + + if items is None: + return counts + + for item in items: + counts[item] = counts.get(item, 0) + 1 + + return counts + +class UpperLevelMoveExtender: + """ + A class to extend the given move by including its parent. It has 3 cases: + 1) If parent was in current and target, then replace the parent + 2) If parent was in current but not target, then delete the parent + 3) If parent was in target but not current, then add the parent + """ + def extend(self, move, diff): + # if no tokens i.e. whole config + if not move.current_config_tokens: + return + + upper_current_tokens = move.current_config_tokens[:-1] + operation_type = self._get_upper_operation(upper_current_tokens, diff) + + upper_target_tokens = None + if operation_type in [OperationType.ADD, OperationType.REPLACE]: + upper_target_tokens = upper_current_tokens + + yield JsonMove(diff, operation_type, upper_current_tokens, upper_target_tokens) + + # get upper operation assumes ConfigDb to not have list-of-objects, only list-of-values + def _get_upper_operation(self, tokens, diff): + current_ptr = diff.current_config + target_ptr = diff.target_config + + for token in tokens: + if token not in current_ptr: + return OperationType.ADD + current_ptr = current_ptr[token] + if token not in target_ptr: + return OperationType.REMOVE + target_ptr = target_ptr[token] + + return OperationType.REPLACE + +class DeleteInsteadOfReplaceMoveExtender: + """ + A class to extend the given REPLACE move by adding a REMOVE move. + """ + def extend(self, move, diff): + operation_type = move.op_type + + if operation_type != OperationType.REPLACE: + return + + new_move = JsonMove(diff, OperationType.REMOVE, move.current_config_tokens) + + yield new_move + +class DeleteRefsMoveExtender: + """ + A class to extend the given DELETE move by adding DELETE moves to configs referring to the path in the move. + """ + def __init__(self, path_addressing): + self.path_addressing = path_addressing + + def extend(self, move, diff): + operation_type = move.op_type + + if operation_type != OperationType.REMOVE: + return + + for ref_path in self.path_addressing.find_ref_paths(move.path, diff.current_config): + yield JsonMove(diff, OperationType.REMOVE, self.path_addressing.get_path_tokens(ref_path)) + +class DfsSorter: + def __init__(self, move_wrapper): + self.visited = {} + self.move_wrapper = move_wrapper + + def sort(self, diff): + if diff.has_no_diff(): + return [] + + diff_hash = hash(diff) + if diff_hash in self.visited: + return None + self.visited[diff_hash] = True + + moves = self.move_wrapper.generate(diff) + + for move in moves: + if self.move_wrapper.validate(move, diff): + new_diff = self.move_wrapper.simulate(move, diff) + new_moves = self.sort(new_diff) + if new_moves is not None: + return [move] + new_moves + + return None + +class BfsSorter: + def __init__(self, move_wrapper): + self.visited = {} + self.move_wrapper = move_wrapper + + def sort(self, diff): + diff_queue = deque([]) + prv_moves_queue = deque([]) + + diff_queue.append(diff) + prv_moves_queue.append([]) + + while len(diff_queue): + diff = diff_queue.popleft() + prv_moves = prv_moves_queue.popleft() + + diff_hash = hash(diff) + if diff_hash in self.visited: + continue + self.visited[diff_hash] = True + + if diff.has_no_diff(): + return prv_moves + + moves = self.move_wrapper.generate(diff) + for move in moves: + if self.move_wrapper.validate(move, diff): + new_diff = self.move_wrapper.simulate(move, diff) + new_prv_moves = prv_moves + [move] + + diff_queue.append(new_diff) + prv_moves_queue.append(new_prv_moves) + + return None + +class MemoizationSorter: + def __init__(self, move_wrapper): + self.visited = {} + self.move_wrapper = move_wrapper + self.mem = {} + + def rec(self, diff): + if diff.has_no_diff(): + return [] + + diff_hash = hash(diff) + if diff_hash in self.mem: + return self.mem[diff_hash] + if diff_hash in self.visited: + return None + self.visited[diff_hash] = True + + moves = self.move_wrapper.generate(diff) + + bst_moves = None + for move in moves: + if self.move_wrapper.validate(move, diff): + new_diff = self.move_wrapper.simulate(move, diff) + new_moves = self.sort(new_diff) + if new_moves != None and (bst_moves is None or len(bst_moves) > len(new_moves)+1): + bst_moves = [move] + new_moves + + self.mem[diff_hash] = bst_moves + return bst_moves + +class Algorithm(Enum): + DFS = 1 + BFS = 2 + MEMOIZATION = 3 + +class SortAlgorithmFactory: + def __init__(self, operation_wrapper, config_wrapper, path_addressing): + self.operation_wrapper = operation_wrapper + self.config_wrapper = config_wrapper + self.path_addressing = path_addressing + + def create(self, algorithm=Algorithm.DFS): + move_generators = [LowLevelMoveGenerator(self.path_addressing)] + move_extenders = [UpperLevelMoveExtender(), + DeleteInsteadOfReplaceMoveExtender(), + DeleteRefsMoveExtender(self.path_addressing)] + move_validators = [DeleteWholeConfigMoveValidator(), + FullConfigMoveValidator(self.config_wrapper), + NoDependencyMoveValidator(self.path_addressing, self.config_wrapper), + UniqueLanesMoveValidator(), + CreateOnlyMoveValidator(self.path_addressing) ] + + move_wrapper = MoveWrapper(move_generators, move_extenders, move_validators) + + if algorithm == Algorithm.DFS: + sorter = DfsSorter(move_wrapper) + elif algorithm == Algorithm.BFS: + sorter = BfsSorter(move_wrapper) + elif algorithm == Algorithm.MEMOIZATION: + sorter = MemoizationSorter(move_wrapper) + else: + raise ValueError(f"Algorithm {algorithm} is not supported") + + return sorter + +class PatchSorter: + def __init__(self, config_wrapper, patch_wrapper, sort_algorithm_factory=None): + self.config_wrapper = config_wrapper + self.patch_wrapper = patch_wrapper + self.operation_wrapper = OperationWrapper() + self.path_addressing = PathAddressing() + self.sort_algorithm_factory = sort_algorithm_factory if sort_algorithm_factory else \ + SortAlgorithmFactory(self.operation_wrapper, config_wrapper, self.path_addressing) + + def sort(self, patch, algorithm=Algorithm.DFS): + current_config = self.config_wrapper.get_config_db_as_json() + target_config = self.patch_wrapper.simulate_patch(patch, current_config) + + diff = Diff(current_config, target_config) + + sort_algorithm = self.sort_algorithm_factory.create(algorithm) + moves = sort_algorithm.sort(diff) + + if moves is None: + raise GenericConfigUpdaterError("There is no possible sorting") + + changes = [JsonChange(move.patch) for move in moves] + + return changes diff --git a/setup.py b/setup.py index 9b212f5ea6..7d89b643d4 100644 --- a/setup.py +++ b/setup.py @@ -189,6 +189,7 @@ def run_tests(self): 'jinja2>=2.11.3', 'jsondiff>=1.2.0', 'jsonpatch>=1.32.0', + 'jsonpointer>=1.9', 'm2crypto>=0.31.0', 'natsort>=6.2.1', # 6.2.1 is the last version which supports Python 2. Can update once we no longer support Python 2 'netaddr>=0.8.0', diff --git a/tests/generic_config_updater/files/any_config_db.json b/tests/generic_config_updater/files/any_config_db.json new file mode 100644 index 0000000000..2c63c08510 --- /dev/null +++ b/tests/generic_config_updater/files/any_config_db.json @@ -0,0 +1,2 @@ +{ +} diff --git a/tests/generic_config_updater/files/any_other_config_db.json b/tests/generic_config_updater/files/any_other_config_db.json new file mode 100644 index 0000000000..c258f768cf --- /dev/null +++ b/tests/generic_config_updater/files/any_other_config_db.json @@ -0,0 +1,4 @@ +{ + "VLAN": { + } +} diff --git a/tests/generic_config_updater/files/config_db_after_multi_patch.json b/tests/generic_config_updater/files/config_db_after_multi_patch.json index 042bf1d51b..39dff7d688 100644 --- a/tests/generic_config_updater/files/config_db_after_multi_patch.json +++ b/tests/generic_config_updater/files/config_db_after_multi_patch.json @@ -119,4 +119,4 @@ "key12": "value12" } } -} \ No newline at end of file +} diff --git a/tests/generic_config_updater/files/config_db_after_single_operation.json b/tests/generic_config_updater/files/config_db_after_single_operation.json new file mode 100644 index 0000000000..0f2f447537 --- /dev/null +++ b/tests/generic_config_updater/files/config_db_after_single_operation.json @@ -0,0 +1,83 @@ +{ + "VLAN_MEMBER": { + "Vlan1000|Ethernet0": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet4": { + "tagging_mode": "untagged" + } + }, + "VLAN": { + "Vlan1000": { + "vlanid": "1000", + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + }, + "ACL_TABLE": { + "NO-NSW-PACL-V4": { + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0" + ] + }, + "DATAACL": { + "policy_desc": "DATAACL", + "ports": [ + "Ethernet4" + ], + "stage": "ingress", + "type": "L3" + }, + "EVERFLOW": { + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }, + "EVERFLOWV6": { + "policy_desc": "EVERFLOWV6", + "ports": [ + "Ethernet4", + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + }, + "PORT": { + "Ethernet0": { + "alias": "Eth1", + "lanes": "65, 66, 67, 68", + "description": "Ethernet0 100G link", + "speed": "100000" + }, + "Ethernet4": { + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": "1", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet8": { + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": "2", + "lanes": "33,34,35,36", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + } + } +} diff --git a/tests/generic_config_updater/files/config_db_choice.json b/tests/generic_config_updater/files/config_db_choice.json new file mode 100644 index 0000000000..eaece3248f --- /dev/null +++ b/tests/generic_config_updater/files/config_db_choice.json @@ -0,0 +1,17 @@ +{ + "ACL_RULE": { + "SSH_ONLY|RULE1": { + "L4_SRC_PORT":"65174-6530" + } + }, + "ACL_TABLE": { + "SSH_ONLY": { + "policy_desc": "SSH_ONLY", + "type": "CTRLPLANE", + "stage": "ingress", + "services": [ + "SSH" + ] + } + } +} diff --git a/tests/generic_config_updater/files/config_db_no_dependencies.json b/tests/generic_config_updater/files/config_db_no_dependencies.json new file mode 100644 index 0000000000..12bdd464a5 --- /dev/null +++ b/tests/generic_config_updater/files/config_db_no_dependencies.json @@ -0,0 +1,39 @@ +{ + "VLAN": { + "Vlan1000": { + "vlanid": "1000", + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + }, + "ACL_TABLE": { + "EVERFLOW": { + "policy_desc": "EVERFLOW", + "ports": [ + "" + ], + "stage": "ingress", + "type": "MIRROR" + }, + "EVERFLOWV6": { + "policy_desc": "EVERFLOWV6", + "ports": [ + "" + ], + "stage": "ingress", + "type": "MIRRORV6" + } + }, + "PORT": { + "Ethernet0": { + "alias": "Eth1", + "lanes": "65, 66, 67, 68", + "description": "Ethernet0 100G link", + "speed": "100000" + } + } +} diff --git a/tests/generic_config_updater/files/config_db_with_crm.json b/tests/generic_config_updater/files/config_db_with_crm.json new file mode 100644 index 0000000000..5fd324d988 --- /dev/null +++ b/tests/generic_config_updater/files/config_db_with_crm.json @@ -0,0 +1,9 @@ +{ + "CRM": { + "Config": { + "acl_counter_high_threshold": "90", + "acl_counter_low_threshold": "70", + "acl_counter_threshold_type": "free" + } + } +} \ No newline at end of file diff --git a/tests/generic_config_updater/files/config_db_with_device_metadata.json b/tests/generic_config_updater/files/config_db_with_device_metadata.json new file mode 100644 index 0000000000..34def579f6 --- /dev/null +++ b/tests/generic_config_updater/files/config_db_with_device_metadata.json @@ -0,0 +1,16 @@ +{ + "DEVICE_METADATA": { + "localhost": { + "default_bgp_status": "up", + "default_pfcwd_status": "disable", + "bgp_asn": "65100", + "deployment_id": "1", + "docker_routing_config_mode": "separated", + "hostname": "vlab-01", + "hwsku": "Force10-S6000", + "type": "ToRRouter", + "platform": "x86_64-kvm_x86_64-r0", + "mac": "52:54:00:99:7e:85" + } + } +} \ No newline at end of file diff --git a/tests/generic_config_updater/files/config_db_with_interface.json b/tests/generic_config_updater/files/config_db_with_interface.json new file mode 100644 index 0000000000..2e1c488a4a --- /dev/null +++ b/tests/generic_config_updater/files/config_db_with_interface.json @@ -0,0 +1,20 @@ +{ + "INTERFACE": { + "Ethernet8": {}, + "Ethernet8|10.0.0.1/30": { + "family": "IPv4", + "scope": "global" + } + }, + "PORT": { + "Ethernet8": { + "admin_status": "up", + "alias": "eth8", + "description": "Ethernet8", + "fec": "rs", + "lanes": "65", + "mtu": "9000", + "speed": "25000" + } + } +} diff --git a/tests/generic_config_updater/files/config_db_with_portchannel_and_acl.json b/tests/generic_config_updater/files/config_db_with_portchannel_and_acl.json new file mode 100644 index 0000000000..23d33890f3 --- /dev/null +++ b/tests/generic_config_updater/files/config_db_with_portchannel_and_acl.json @@ -0,0 +1,25 @@ +{ + "PORT": { + "Ethernet0": { + "alias": "Eth1/1", + "lanes": "65", + "description": "", + "speed": "10000" + } + }, + "PORTCHANNEL": { + "PortChannel0001": { + "admin_status": "up" + } + }, + "ACL_TABLE": { + "NO-NSW-PACL-V4": { + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0", + "PortChannel0001" + ] + } + } +} diff --git a/tests/generic_config_updater/files/config_db_with_portchannel_interface.json b/tests/generic_config_updater/files/config_db_with_portchannel_interface.json new file mode 100644 index 0000000000..4e05639dc5 --- /dev/null +++ b/tests/generic_config_updater/files/config_db_with_portchannel_interface.json @@ -0,0 +1,10 @@ +{ + "PORTCHANNEL": { + "PortChannel0001": { + "admin_status": "up" + } + }, + "PORTCHANNEL_INTERFACE": { + "PortChannel0001|1.1.1.1/24": {} + } +} diff --git a/tests/generic_config_updater/files/contrainer_with_container_config_db.json b/tests/generic_config_updater/files/contrainer_with_container_config_db.json new file mode 100644 index 0000000000..b0680b22b5 --- /dev/null +++ b/tests/generic_config_updater/files/contrainer_with_container_config_db.json @@ -0,0 +1,7 @@ +{ + "FLEX_COUNTER_TABLE": { + "BUFFER_POOL_WATERMARK": { + "FLEX_COUNTER_STATUS": "enable" + } + } +} diff --git a/tests/generic_config_updater/files/dpb_1_split_full_config.json b/tests/generic_config_updater/files/dpb_1_split_full_config.json new file mode 100644 index 0000000000..2097289606 --- /dev/null +++ b/tests/generic_config_updater/files/dpb_1_split_full_config.json @@ -0,0 +1,35 @@ +{ + "PORT": { + "Ethernet0": { + "alias": "Eth1", + "lanes": "65, 66, 67, 68", + "description": "Ethernet0 100G link", + "speed": "100000" + } + }, + "ACL_TABLE": { + "NO-NSW-PACL-V4": { + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0" + ] + } + }, + "VLAN_MEMBER": { + "Vlan100|Ethernet0": { + "tagging_mode": "untagged" + } + }, + "VLAN": { + "Vlan100": { + "vlanid": "100", + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + } +} diff --git a/tests/generic_config_updater/files/dpb_1_to_4.json-patch b/tests/generic_config_updater/files/dpb_1_to_4.json-patch new file mode 100644 index 0000000000..8eddd7a19d --- /dev/null +++ b/tests/generic_config_updater/files/dpb_1_to_4.json-patch @@ -0,0 +1,88 @@ +[ + { + "op": "add", + "path": "/PORT/Ethernet3", + "value": { + "alias": "Eth1/4", + "lanes": "68", + "description": "", + "speed": "10000" + } + }, + { + "op": "add", + "path": "/PORT/Ethernet1", + "value": { + "alias": "Eth1/2", + "lanes": "66", + "description": "", + "speed": "10000" + } + }, + { + "op": "add", + "path": "/PORT/Ethernet2", + "value": { + "alias": "Eth1/3", + "lanes": "67", + "description": "", + "speed": "10000" + } + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/lanes", + "value": "65" + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/alias", + "value": "Eth1/1" + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/description", + "value": "" + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/speed", + "value": "10000" + }, + { + "op": "add", + "path": "/VLAN_MEMBER/Vlan100|Ethernet2", + "value": { + "tagging_mode": "untagged" + } + }, + { + "op": "add", + "path": "/VLAN_MEMBER/Vlan100|Ethernet3", + "value": { + "tagging_mode": "untagged" + } + }, + { + "op": "add", + "path": "/VLAN_MEMBER/Vlan100|Ethernet1", + "value": { + "tagging_mode": "untagged" + } + }, + { + "op": "add", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1", + "value": "Ethernet1" + }, + { + "op": "add", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/2", + "value": "Ethernet2" + }, + { + "op": "add", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/3", + "value": "Ethernet3" + } +] diff --git a/tests/generic_config_updater/files/dpb_4_splits_full_config.json b/tests/generic_config_updater/files/dpb_4_splits_full_config.json new file mode 100644 index 0000000000..23d1b9ecfc --- /dev/null +++ b/tests/generic_config_updater/files/dpb_4_splits_full_config.json @@ -0,0 +1,65 @@ +{ + "PORT": { + "Ethernet0": { + "alias": "Eth1/1", + "lanes": "65", + "description": "", + "speed": "10000" + }, + "Ethernet1": { + "alias": "Eth1/2", + "lanes": "66", + "description": "", + "speed": "10000" + }, + "Ethernet2": { + "alias": "Eth1/3", + "lanes": "67", + "description": "", + "speed": "10000" + }, + "Ethernet3": { + "alias": "Eth1/4", + "lanes": "68", + "description": "", + "speed": "10000" + } + }, + "ACL_TABLE": { + "NO-NSW-PACL-V4": { + "type": "L3", + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0", + "Ethernet1", + "Ethernet2", + "Ethernet3" + ] + } + }, + "VLAN_MEMBER": { + "Vlan100|Ethernet0": { + "tagging_mode": "untagged" + }, + "Vlan100|Ethernet1": { + "tagging_mode": "untagged" + }, + "Vlan100|Ethernet2": { + "tagging_mode": "untagged" + }, + "Vlan100|Ethernet3": { + "tagging_mode": "untagged" + } + }, + "VLAN": { + "Vlan100": { + "vlanid": "100", + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ] + } + } +} diff --git a/tests/generic_config_updater/files/dpb_4_to_1.json-patch b/tests/generic_config_updater/files/dpb_4_to_1.json-patch new file mode 100644 index 0000000000..33addd290d --- /dev/null +++ b/tests/generic_config_updater/files/dpb_4_to_1.json-patch @@ -0,0 +1,58 @@ +[ + { + "op": "remove", + "path": "/PORT/Ethernet2" + }, + { + "op": "remove", + "path": "/PORT/Ethernet1" + }, + { + "op": "remove", + "path": "/PORT/Ethernet3" + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/alias", + "value": "Eth1" + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/lanes", + "value": "65, 66, 67, 68" + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/description", + "value": "Ethernet0 100G link" + }, + { + "op": "replace", + "path": "/PORT/Ethernet0/speed", + "value": "100000" + }, + { + "op": "remove", + "path": "/VLAN_MEMBER/Vlan100|Ethernet1" + }, + { + "op": "remove", + "path": "/VLAN_MEMBER/Vlan100|Ethernet3" + }, + { + "op": "remove", + "path": "/VLAN_MEMBER/Vlan100|Ethernet2" + }, + { + "op": "remove", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1" + }, + { + "op": "remove", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1" + }, + { + "op": "remove", + "path": "/ACL_TABLE/NO-NSW-PACL-V4/ports/1" + } +] diff --git a/tests/generic_config_updater/files/empty_config_db.json b/tests/generic_config_updater/files/empty_config_db.json new file mode 100644 index 0000000000..2c63c08510 --- /dev/null +++ b/tests/generic_config_updater/files/empty_config_db.json @@ -0,0 +1,2 @@ +{ +} diff --git a/tests/generic_config_updater/files/simple_config_db_inc_deps.json b/tests/generic_config_updater/files/simple_config_db_inc_deps.json new file mode 100644 index 0000000000..4554582103 --- /dev/null +++ b/tests/generic_config_updater/files/simple_config_db_inc_deps.json @@ -0,0 +1,20 @@ +{ + "ACL_TABLE": { + "EVERFLOW": { + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet0" + ], + "stage": "ingress", + "type": "MIRROR" + } + }, + "PORT": { + "Ethernet0": { + "alias": "Eth1", + "lanes": "65, 66, 67, 68", + "description": "Ethernet0 100G link", + "speed": "100000" + } + } +} diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py index f18ad45799..f69ec08030 100644 --- a/tests/generic_config_updater/gu_common_test.py +++ b/tests/generic_config_updater/gu_common_test.py @@ -1,15 +1,12 @@ import json import jsonpatch +import sonic_yang import unittest from unittest.mock import MagicMock, Mock -from .gutest_helpers import create_side_effect_dict, Files +from .gutest_helpers import create_side_effect_dict, Files import generic_config_updater.gu_common as gu_common -# import sys -# sys.path.insert(0,'../../generic_config_updater') -# import gu_common - class TestConfigWrapper(unittest.TestCase): def setUp(self): self.config_wrapper_mock = gu_common.ConfigWrapper() @@ -333,3 +330,306 @@ def __assert_same_patch(self, config_db_patch, sonic_yang_patch, config_wrapper, config_wrapper.convert_sonic_yang_to_config_db(after_update_sonic_yang) self.assertTrue(patch_wrapper.verify_same_json(after_update_config_db_cropped, after_update_sonic_yang_as_config_db)) + +class TestPathAddressing(unittest.TestCase): + def setUp(self): + self.path_addressing = gu_common.PathAddressing() + self.sy_only_models = sonic_yang.SonicYang(gu_common.YANG_DIR) + self.sy_only_models.loadYangModel() + + def test_get_path_tokens(self): + def check(path, tokens): + expected=tokens + actual=self.path_addressing.get_path_tokens(path) + self.assertEqual(expected, actual) + + check("", []) + check("/", [""]) + check("/token", ["token"]) + check("/more/than/one/token", ["more", "than", "one", "token"]) + check("/has/numbers/0/and/symbols/^", ["has", "numbers", "0", "and", "symbols", "^"]) + check("/~0/this/is/telda", ["~", "this", "is", "telda"]) + check("/~1/this/is/forward-slash", ["/", "this", "is", "forward-slash"]) + check("/\\\\/no-escaping", ["\\\\", "no-escaping"]) + check("////empty/tokens/are/ok", ["", "", "", "empty", "tokens", "are", "ok"]) + + def test_create_path(self): + def check(tokens, path): + expected=path + actual=self.path_addressing.create_path(tokens) + self.assertEqual(expected, actual) + + check([], "",) + check([""], "/",) + check(["token"], "/token") + check(["more", "than", "one", "token"], "/more/than/one/token") + check(["has", "numbers", "0", "and", "symbols", "^"], "/has/numbers/0/and/symbols/^") + check(["~", "this", "is", "telda"], "/~0/this/is/telda") + check(["/", "this", "is", "forward-slash"], "/~1/this/is/forward-slash") + check(["\\\\", "no-escaping"], "/\\\\/no-escaping") + check(["", "", "", "empty", "tokens", "are", "ok"], "////empty/tokens/are/ok") + check(["~token", "telda-not-followed-by-0-or-1"], "/~0token/telda-not-followed-by-0-or-1") + + def test_get_xpath_tokens(self): + def check(path, tokens): + expected=tokens + actual=self.path_addressing.get_xpath_tokens(path) + self.assertEqual(expected, actual) + + self.assertRaises(ValueError, check, "", []) + check("/", []) + check("/token", ["token"]) + check("/more/than/one/token", ["more", "than", "one", "token"]) + check("/multi/tokens/with/empty/last/token/", ["multi", "tokens", "with", "empty", "last", "token", ""]) + check("/has/numbers/0/and/symbols/^", ["has", "numbers", "0", "and", "symbols", "^"]) + check("/has[a='predicate']/in/the/beginning", ["has[a='predicate']", "in", "the", "beginning"]) + check("/ha/s[a='predicate']/in/the/middle", ["ha", "s[a='predicate']", "in", "the", "middle"]) + check("/ha/s[a='predicate-in-the-end']", ["ha", "s[a='predicate-in-the-end']"]) + check("/it/has[more='than'][one='predicate']/somewhere", ["it", "has[more='than'][one='predicate']", "somewhere"]) + check("/ha/s[a='predicate\"with']/double-quotes/inside", ["ha", "s[a='predicate\"with']", "double-quotes", "inside"]) + check('/a/predicate[with="double"]/quotes', ["a", 'predicate[with="double"]', "quotes"]) + check('/multiple["predicate"][with="double"]/quotes', ['multiple["predicate"][with="double"]', "quotes"]) + check('/multiple["predicate"][with="double"]/quotes', ['multiple["predicate"][with="double"]', "quotes"]) + check('/ha/s[a="predicate\'with"]/single-quote/inside', ["ha", 's[a="predicate\'with"]', "single-quote", "inside"]) + # XPATH 1.0 does not support single-quote within single-quoted string. str literal can be '[^']*' + # Not validating no single-quote within single-quoted string + check("/a/mix['of''quotes\"does']/not/work/well", ["a", "mix['of''quotes\"does']", "not", "work", "well"]) + # XPATH 1.0 does not support double-quotes within double-quoted string. str literal can be "[^"]*" + # Not validating no double-quotes within double-quoted string + check('/a/mix["of""quotes\'does"]/not/work/well', ["a", 'mix["of""quotes\'does"]', "not", "work", "well"]) + + def test_create_xpath(self): + def check(tokens, xpath): + expected=xpath + actual=self.path_addressing.create_xpath(tokens) + self.assertEqual(expected, actual) + + check([], "/") + check(["token"], "/token") + check(["more", "than", "one", "token"], "/more/than/one/token") + check(["multi", "tokens", "with", "empty", "last", "token", ""], "/multi/tokens/with/empty/last/token/") + check(["has", "numbers", "0", "and", "symbols", "^"], "/has/numbers/0/and/symbols/^") + check(["has[a='predicate']", "in", "the", "beginning"], "/has[a='predicate']/in/the/beginning") + check(["ha", "s[a='predicate']", "in", "the", "middle"], "/ha/s[a='predicate']/in/the/middle") + check(["ha", "s[a='predicate-in-the-end']"], "/ha/s[a='predicate-in-the-end']") + check(["it", "has[more='than'][one='predicate']", "somewhere"], "/it/has[more='than'][one='predicate']/somewhere") + check(["ha", "s[a='predicate\"with']", "double-quotes", "inside"], "/ha/s[a='predicate\"with']/double-quotes/inside") + check(["a", 'predicate[with="double"]', "quotes"], '/a/predicate[with="double"]/quotes') + check(['multiple["predicate"][with="double"]', "quotes"], '/multiple["predicate"][with="double"]/quotes') + check(['multiple["predicate"][with="double"]', "quotes"], '/multiple["predicate"][with="double"]/quotes') + check(["ha", 's[a="predicate\'with"]', "single-quote", "inside"], '/ha/s[a="predicate\'with"]/single-quote/inside') + # XPATH 1.0 does not support single-quote within single-quoted string. str literal can be '[^']*' + # Not validating no single-quote within single-quoted string + check(["a", "mix['of''quotes\"does']", "not", "work", "well"], "/a/mix['of''quotes\"does']/not/work/well", ) + # XPATH 1.0 does not support double-quotes within double-quoted string. str literal can be "[^"]*" + # Not validating no double-quotes within double-quoted string + check(["a", 'mix["of""quotes\'does"]', "not", "work", "well"], '/a/mix["of""quotes\'does"]/not/work/well') + + def test_find_ref_paths__ref_is_the_whole_key__returns_ref_paths(self): + # Arrange + path = "/PORT/Ethernet0" + expected = [ + "/ACL_TABLE/NO-NSW-PACL-V4/ports/0", + "/VLAN_MEMBER/Vlan1000|Ethernet0", + ] + + # Act + actual = self.path_addressing.find_ref_paths(path, Files.CROPPED_CONFIG_DB_AS_JSON) + + # Assert + self.assertCountEqual(expected, actual) + + def test_find_ref_paths__ref_is_a_part_of_key__returns_ref_paths(self): + # Arrange + path = "/VLAN/Vlan1000" + expected = [ + "/VLAN_MEMBER/Vlan1000|Ethernet0", + "/VLAN_MEMBER/Vlan1000|Ethernet4", + "/VLAN_MEMBER/Vlan1000|Ethernet8", + ] + + # Act + actual = self.path_addressing.find_ref_paths(path, Files.CROPPED_CONFIG_DB_AS_JSON) + + # Assert + self.assertCountEqual(expected, actual) + + def test_find_ref_paths__ref_is_in_multilist__returns_ref_paths(self): + # Arrange + path = "/PORT/Ethernet8" + expected = [ + "/INTERFACE/Ethernet8", + "/INTERFACE/Ethernet8|10.0.0.1~130", + ] + + # Act + actual = self.path_addressing.find_ref_paths(path, Files.CONFIG_DB_WITH_INTERFACE) + + # Assert + self.assertCountEqual(expected, actual) + + def test_find_ref_paths__ref_is_in_leafref_union__returns_ref_paths(self): + # Arrange + path = "/PORTCHANNEL/PortChannel0001" + expected = [ + "/ACL_TABLE/NO-NSW-PACL-V4/ports/1", + ] + + # Act + actual = self.path_addressing.find_ref_paths(path, Files.CONFIG_DB_WITH_PORTCHANNEL_AND_ACL) + + # Assert + self.assertCountEqual(expected, actual) + + def test_find_ref_paths__path_is_table__returns_ref_paths(self): + # Arrange + path = "/PORT" + expected = [ + "/ACL_TABLE/DATAACL/ports/0", + "/ACL_TABLE/EVERFLOW/ports/0", + "/ACL_TABLE/EVERFLOWV6/ports/0", + "/ACL_TABLE/EVERFLOWV6/ports/1", + "/ACL_TABLE/NO-NSW-PACL-V4/ports/0", + "/VLAN_MEMBER/Vlan1000|Ethernet0", + "/VLAN_MEMBER/Vlan1000|Ethernet4", + "/VLAN_MEMBER/Vlan1000|Ethernet8", + ] + + # Act + actual = self.path_addressing.find_ref_paths(path, Files.CROPPED_CONFIG_DB_AS_JSON) + + # Assert + self.assertCountEqual(expected, actual) + + def test_find_ref_paths__whole_config_path__returns_all_refs(self): + # Arrange + path = "" + expected = [ + "/ACL_TABLE/DATAACL/ports/0", + "/ACL_TABLE/EVERFLOW/ports/0", + "/ACL_TABLE/EVERFLOWV6/ports/0", + "/ACL_TABLE/EVERFLOWV6/ports/1", + "/ACL_TABLE/NO-NSW-PACL-V4/ports/0", + "/VLAN_MEMBER/Vlan1000|Ethernet0", + "/VLAN_MEMBER/Vlan1000|Ethernet4", + "/VLAN_MEMBER/Vlan1000|Ethernet8", + ] + + # Act + actual = self.path_addressing.find_ref_paths(path, Files.CROPPED_CONFIG_DB_AS_JSON) + + # Assert + self.assertCountEqual(expected, actual) + + def test_convert_path_to_xpath(self): + def check(path, xpath, config=None): + if not config: + config = Files.CROPPED_CONFIG_DB_AS_JSON + + expected=xpath + actual=self.path_addressing.convert_path_to_xpath(path, config, self.sy_only_models) + self.assertEqual(expected, actual) + + check(path="", xpath="/") + check(path="/VLAN_MEMBER", xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER") + check(path="/VLAN/Vlan1000/dhcp_servers", + xpath="/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers") + check(path="/VLAN/Vlan1000/dhcp_servers/0", + xpath="/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers[.='192.0.0.1']") + check(path="/PORT/Ethernet0/lanes", xpath="/sonic-port:sonic-port/PORT/PORT_LIST[name='Ethernet0']/lanes") + check(path="/ACL_TABLE/NO-NSW-PACL-V4/ports/0", + xpath="/sonic-acl:sonic-acl/ACL_TABLE/ACL_TABLE_LIST[ACL_TABLE_NAME='NO-NSW-PACL-V4']/ports[.='Ethernet0']") + check(path="/ACL_TABLE/NO-NSW-PACL-V4/ports/0", + xpath="/sonic-acl:sonic-acl/ACL_TABLE/ACL_TABLE_LIST[ACL_TABLE_NAME='NO-NSW-PACL-V4']/ports[.='Ethernet0']") + check(path="/VLAN_MEMBER/Vlan1000|Ethernet8/tagging_mode", + xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode") + check(path="/VLAN_MEMBER/Vlan1000|Ethernet8", + xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']") + check(path="/DEVICE_METADATA/localhost/hwsku", + xpath="/sonic-device_metadata:sonic-device_metadata/DEVICE_METADATA/localhost/hwsku", + config=Files.CONFIG_DB_WITH_DEVICE_METADATA) + check(path="/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK/FLEX_COUNTER_STATUS", + xpath="/sonic-flex_counter:sonic-flex_counter/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK/FLEX_COUNTER_STATUS", + config=Files.CONTRAINER_WITH_CONTAINER_CONFIG_DB) + check(path="/ACL_RULE/SSH_ONLY|RULE1/L4_SRC_PORT", + xpath="/sonic-acl:sonic-acl/ACL_RULE/ACL_RULE_LIST[ACL_TABLE_NAME='SSH_ONLY'][RULE_NAME='RULE1']/L4_SRC_PORT", + config=Files.CONFIG_DB_CHOICE) + check(path="/INTERFACE/Ethernet8", + xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_LIST[name='Ethernet8']", + config=Files.CONFIG_DB_WITH_INTERFACE) + check(path="/INTERFACE/Ethernet8|10.0.0.1~130", + xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_IPPREFIX_LIST[name='Ethernet8'][ip-prefix='10.0.0.1/30']", + config=Files.CONFIG_DB_WITH_INTERFACE) + check(path="/INTERFACE/Ethernet8|10.0.0.1~130/scope", + xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_IPPREFIX_LIST[name='Ethernet8'][ip-prefix='10.0.0.1/30']/scope", + config=Files.CONFIG_DB_WITH_INTERFACE) + check(path="/PORTCHANNEL_INTERFACE", + xpath="/sonic-portchannel:sonic-portchannel/PORTCHANNEL_INTERFACE", + config=Files.CONFIG_DB_WITH_PORTCHANNEL_INTERFACE) + check(path="/PORTCHANNEL_INTERFACE/PortChannel0001|1.1.1.1~124", + xpath="/sonic-portchannel:sonic-portchannel/PORTCHANNEL_INTERFACE/PORTCHANNEL_INTERFACE_IPPREFIX_LIST[name='PortChannel0001'][ip_prefix='1.1.1.1/24']", + config=Files.CONFIG_DB_WITH_PORTCHANNEL_INTERFACE) + + def test_convert_xpath_to_path(self): + def check(xpath, path, config=None): + if not config: + config = Files.CROPPED_CONFIG_DB_AS_JSON + + expected=path + actual=self.path_addressing.convert_xpath_to_path(xpath, config, self.sy_only_models) + self.assertEqual(expected, actual) + + check(xpath="/",path="") + check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER", path="/VLAN_MEMBER") + check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST",path="/VLAN_MEMBER") + check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']", + path="/VLAN_MEMBER/Vlan1000|Ethernet8") + check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/name", + path="/VLAN_MEMBER/Vlan1000|Ethernet8") + check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/port", + path="/VLAN_MEMBER/Vlan1000|Ethernet8") + check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode", + path="/VLAN_MEMBER/Vlan1000|Ethernet8/tagging_mode") + check(xpath="/sonic-vlan:sonic-acl/ACL_RULE", path="/ACL_RULE") + check(xpath="/sonic-vlan:sonic-acl/ACL_RULE/ACL_RULE_LIST[ACL_TABLE_NAME='SSH_ONLY'][RULE_NAME='RULE1']", + path="/ACL_RULE/SSH_ONLY|RULE1", + config=Files.CONFIG_DB_CHOICE) + check(xpath="/sonic-acl:sonic-acl/ACL_RULE/ACL_RULE_LIST[ACL_TABLE_NAME='SSH_ONLY'][RULE_NAME='RULE1']/L4_SRC_PORT", + path="/ACL_RULE/SSH_ONLY|RULE1/L4_SRC_PORT", + config=Files.CONFIG_DB_CHOICE) + check(xpath="/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers", + path="/VLAN/Vlan1000/dhcp_servers") + check(xpath="/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST[name='Vlan1000']/dhcp_servers[.='192.0.0.1']", + path="/VLAN/Vlan1000/dhcp_servers/0") + check(xpath="/sonic-port:sonic-port/PORT/PORT_LIST[name='Ethernet0']/lanes", path="/PORT/Ethernet0/lanes") + check(xpath="/sonic-acl:sonic-acl/ACL_TABLE/ACL_TABLE_LIST[ACL_TABLE_NAME='NO-NSW-PACL-V4']/ports[.='Ethernet0']", + path="/ACL_TABLE/NO-NSW-PACL-V4/ports/0") + check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']/tagging_mode", + path="/VLAN_MEMBER/Vlan1000|Ethernet8/tagging_mode") + check(xpath="/sonic-vlan:sonic-vlan/VLAN_MEMBER/VLAN_MEMBER_LIST[name='Vlan1000'][port='Ethernet8']", + path="/VLAN_MEMBER/Vlan1000|Ethernet8") + check(xpath="/sonic-device_metadata:sonic-device_metadata/DEVICE_METADATA/localhost/hwsku", + path="/DEVICE_METADATA/localhost/hwsku", + config=Files.CONFIG_DB_WITH_DEVICE_METADATA) + check(xpath="/sonic-flex_counter:sonic-flex_counter/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK", + path="/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK", + config=Files.CONTRAINER_WITH_CONTAINER_CONFIG_DB) + check(xpath="/sonic-flex_counter:sonic-flex_counter/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK/FLEX_COUNTER_STATUS", + path="/FLEX_COUNTER_TABLE/BUFFER_POOL_WATERMARK/FLEX_COUNTER_STATUS", + config=Files.CONTRAINER_WITH_CONTAINER_CONFIG_DB) + check(xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_LIST[name='Ethernet8']", + path="/INTERFACE/Ethernet8", + config=Files.CONFIG_DB_WITH_INTERFACE) + check(xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_IPPREFIX_LIST[name='Ethernet8'][ip-prefix='10.0.0.1/30']", + path="/INTERFACE/Ethernet8|10.0.0.1~130", + config=Files.CONFIG_DB_WITH_INTERFACE) + check(xpath="/sonic-interface:sonic-interface/INTERFACE/INTERFACE_IPPREFIX_LIST[name='Ethernet8'][ip-prefix='10.0.0.1/30']/scope", + path="/INTERFACE/Ethernet8|10.0.0.1~130/scope", + config=Files.CONFIG_DB_WITH_INTERFACE) + check(xpath="/sonic-portchannel:sonic-portchannel/PORTCHANNEL_INTERFACE", + path="/PORTCHANNEL_INTERFACE", + config=Files.CONFIG_DB_WITH_PORTCHANNEL_INTERFACE) + check(xpath="/sonic-portchannel:sonic-portchannel/PORTCHANNEL_INTERFACE/PORTCHANNEL_INTERFACE_IPPREFIX_LIST[name='PortChannel0001'][ip_prefix='1.1.1.1/24']", + path="/PORTCHANNEL_INTERFACE/PortChannel0001|1.1.1.1~124", + config=Files.CONFIG_DB_WITH_PORTCHANNEL_INTERFACE) + diff --git a/tests/generic_config_updater/patch_sorter_test.py b/tests/generic_config_updater/patch_sorter_test.py new file mode 100644 index 0000000000..4da9fb901b --- /dev/null +++ b/tests/generic_config_updater/patch_sorter_test.py @@ -0,0 +1,1730 @@ +import jsonpatch +import unittest +from unittest.mock import MagicMock, Mock + +import generic_config_updater.patch_sorter as ps +from .gutest_helpers import Files, create_side_effect_dict +from generic_config_updater.gu_common import ConfigWrapper, PatchWrapper, OperationWrapper, \ + GenericConfigUpdaterError, OperationType, JsonChange, PathAddressing + +class TestDiff(unittest.TestCase): + def test_apply_move__updates_current_config(self): + # Arrange + diff = ps.Diff(current_config=Files.CROPPED_CONFIG_DB_AS_JSON, target_config=Files.ANY_CONFIG_DB) + move = ps.JsonMove.from_patch(Files.SINGLE_OPERATION_CONFIG_DB_PATCH) + + expected = ps.Diff(current_config=Files.CONFIG_DB_AFTER_SINGLE_OPERATION, target_config=Files.ANY_CONFIG_DB) + + # Act + actual = diff.apply_move(move) + + # Assert + self.assertEqual(expected.current_config, actual.current_config) + self.assertEqual(expected.target_config, actual.target_config) + + def test_has_no_diff__diff_exists__returns_false(self): + # Arrange + diff = ps.Diff(current_config=Files.CROPPED_CONFIG_DB_AS_JSON, + target_config=Files.CONFIG_DB_AFTER_SINGLE_OPERATION) + + # Act and Assert + self.assertFalse(diff.has_no_diff()) + + def test_has_no_diff__no_diff__returns_true(self): + # Arrange + diff = ps.Diff(current_config=Files.CROPPED_CONFIG_DB_AS_JSON, + target_config=Files.CROPPED_CONFIG_DB_AS_JSON) + + # Act and Assert + self.assertTrue(diff.has_no_diff()) + + def test_hash__different_current_config__different_hashes(self): + # Arrange + diff1 = ps.Diff(current_config=Files.CROPPED_CONFIG_DB_AS_JSON, target_config=Files.ANY_CONFIG_DB) + diff2 = ps.Diff(current_config=Files.CROPPED_CONFIG_DB_AS_JSON, target_config=Files.ANY_CONFIG_DB) + diff3 = ps.Diff(current_config=Files.CONFIG_DB_AFTER_SINGLE_OPERATION, target_config=Files.ANY_CONFIG_DB) + + # Act + hash1 = hash(diff1) + hash2 = hash(diff2) + hash3 = hash(diff3) + + # Assert + self.assertEqual(hash1, hash2) # same current config + self.assertNotEqual(hash1, hash3) + + def test_hash__different_target_config__different_hashes(self): + # Arrange + diff1 = ps.Diff(current_config=Files.ANY_CONFIG_DB, target_config=Files.CROPPED_CONFIG_DB_AS_JSON) + diff2 = ps.Diff(current_config=Files.ANY_CONFIG_DB, target_config=Files.CROPPED_CONFIG_DB_AS_JSON) + diff3 = ps.Diff(current_config=Files.ANY_CONFIG_DB, target_config=Files.CONFIG_DB_AFTER_SINGLE_OPERATION) + + # Act + hash1 = hash(diff1) + hash2 = hash(diff2) + hash3 = hash(diff3) + + # Assert + self.assertEqual(hash1, hash2) # same target config + self.assertNotEqual(hash1, hash3) + + def test_hash__swapped_current_and_target_configs__different_hashes(self): + # Arrange + diff1 = ps.Diff(current_config=Files.ANY_CONFIG_DB, target_config=Files.ANY_OTHER_CONFIG_DB) + diff2 = ps.Diff(current_config=Files.ANY_OTHER_CONFIG_DB, target_config=Files.ANY_CONFIG_DB) + + # Act + hash1 = hash(diff1) + hash2 = hash(diff2) + + # Assert + self.assertNotEqual(hash1, hash2) + + def test_eq__different_current_config__returns_false(self): + # Arrange + diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_CONFIG_DB) + other_diff = ps.Diff(Files.ANY_OTHER_CONFIG_DB, Files.ANY_CONFIG_DB) + + # Act and assert + self.assertNotEqual(diff, other_diff) + self.assertFalse(diff == other_diff) + + def test_eq__different_target_config__returns_false(self): + # Arrange + diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_CONFIG_DB) + other_diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_OTHER_CONFIG_DB) + + # Act and assert + self.assertNotEqual(diff, other_diff) + self.assertFalse(diff == other_diff) + + def test_eq__different_target_config__returns_true(self): + # Arrange + diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_CONFIG_DB) + other_diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_CONFIG_DB) + + # Act and assert + self.assertEqual(diff, other_diff) + self.assertTrue(diff == other_diff) + +class TestJsonMove(unittest.TestCase): + def setUp(self): + self.operation_wrapper = OperationWrapper() + self.any_op_type = OperationType.REPLACE + self.any_tokens = ["table1", "key11"] + self.any_path = "/table1/key11" + self.any_config = { + "table1": { + "key11": "value11" + } + } + self.any_value = "value11" + self.any_operation = self.operation_wrapper.create(self.any_op_type, self.any_path, self.any_value) + self.any_diff = ps.Diff(self.any_config, self.any_config) + + def test_ctor__delete_op_whole_config__none_value_and_empty_path(self): + # Arrange + path = "" + diff = ps.Diff(current_config={}, target_config=self.any_config) + + # Act + jsonmove = ps.JsonMove(diff, OperationType.REMOVE, []) + + # Assert + self.verify_jsonmove(self.operation_wrapper.create(OperationType.REMOVE, path), + OperationType.REMOVE, + [], + None, + jsonmove) + def test_ctor__remove_op__operation_created_directly(self): + # Arrange and Act + jsonmove = ps.JsonMove(self.any_diff, OperationType.REMOVE, self.any_tokens) + + # Assert + self.verify_jsonmove(self.operation_wrapper.create(OperationType.REMOVE, self.any_path), + OperationType.REMOVE, + self.any_tokens, + None, + jsonmove) + + def test_ctor__replace_op_whole_config__whole_config_value_and_empty_path(self): + # Arrange + path = "" + diff = ps.Diff(current_config={}, target_config=self.any_config) + + # Act + jsonmove = ps.JsonMove(diff, OperationType.REPLACE, [], []) + + # Assert + self.verify_jsonmove(self.operation_wrapper.create(OperationType.REPLACE, path, self.any_config), + OperationType.REPLACE, + [], + [], + jsonmove) + + def test_ctor__replace_op__operation_created_directly(self): + # Arrange and Act + jsonmove = ps.JsonMove(self.any_diff, OperationType.REPLACE, self.any_tokens, self.any_tokens) + + # Assert + self.verify_jsonmove(self.operation_wrapper.create(OperationType.REPLACE, self.any_path, self.any_value), + OperationType.REPLACE, + self.any_tokens, + self.any_tokens, + jsonmove) + + def test_ctor__add_op_whole_config__whole_config_value_and_empty_path(self): + # Arrange + path = "" + diff = ps.Diff(current_config={}, target_config=self.any_config) + + # Act + jsonmove = ps.JsonMove(diff, OperationType.ADD, [], []) + + # Assert + self.verify_jsonmove(self.operation_wrapper.create(OperationType.ADD, path, self.any_config), + OperationType.ADD, + [], + [], + jsonmove) + + def test_ctor__add_op_path_exist__same_value_and_path(self): + # Arrange and Act + jsonmove = ps.JsonMove(self.any_diff, OperationType.ADD, self.any_tokens, self.any_tokens) + + # Assert + self.verify_jsonmove(self.operation_wrapper.create(OperationType.ADD, self.any_path, self.any_value), + OperationType.ADD, + self.any_tokens, + self.any_tokens, + jsonmove) + + def test_ctor__add_op_path_exist_include_list__same_value_and_path(self): + # Arrange + current_config = { + "table1": { + "list1": ["value11", "value13"] + } + } + target_config = { + "table1": { + "list1": ["value11", "value12", "value13", "value14"] + } + } + diff = ps.Diff(current_config, target_config) + op_type = OperationType.ADD + current_config_tokens = ["table1", "list1", 1] # Index is 1 which does not exist in target + target_config_tokens = ["table1", "list1", 1] + expected_jsonpatch_path = "/table1/list1/1" + expected_jsonpatch_value = "value12" + # NOTE: the target config can contain more diff than the given move. + + # Act + jsonmove = ps.JsonMove(diff, op_type, current_config_tokens, target_config_tokens) + + # Assert + self.verify_jsonmove(self.operation_wrapper.create(op_type, expected_jsonpatch_path, expected_jsonpatch_value), + op_type, + current_config_tokens, + target_config_tokens, + jsonmove) + + def test_ctor__add_op_path_exist_list_index_doesnot_exist_in_target___same_value_and_path(self): + # Arrange + current_config = { + "table1": { + "list1": ["value11"] + } + } + target_config = { + "table1": { + "list1": ["value12"] + } + } + diff = ps.Diff(current_config, target_config) + op_type = OperationType.ADD + current_config_tokens = ["table1", "list1", 1] # Index is 1 which does not exist in target + target_config_tokens = ["table1", "list1", 0] + expected_jsonpatch_path = "/table1/list1/1" + expected_jsonpatch_value = "value12" + # NOTE: the target config can contain more diff than the given move. + + # Act + jsonmove = ps.JsonMove(diff, op_type, current_config_tokens, target_config_tokens) + + # Assert + self.verify_jsonmove(self.operation_wrapper.create(op_type, expected_jsonpatch_path, expected_jsonpatch_value), + op_type, + current_config_tokens, + target_config_tokens, + jsonmove) + + def test_ctor__add_op_path_doesnot_exist__value_and_path_of_parent(self): + # Arrange + current_config = { + } + target_config = { + "table1": { + "key11": { + "key111": "value111" + } + } + } + diff = ps.Diff(current_config, target_config) + op_type = OperationType.ADD + current_config_tokens = ["table1", "key11", "key111"] + target_config_tokens = ["table1", "key11", "key111"] + expected_jsonpatch_path = "/table1" + expected_jsonpatch_value = { + "key11": { + "key111": "value111" + } + } + # NOTE: the target config can contain more diff than the given move. + + # Act + jsonmove = ps.JsonMove(diff, op_type, current_config_tokens, target_config_tokens) + + # Assert + self.verify_jsonmove(self.operation_wrapper.create(op_type, expected_jsonpatch_path, expected_jsonpatch_value), + op_type, + current_config_tokens, + target_config_tokens, + jsonmove) + + def test_ctor__add_op_path_doesnot_exist_include_list__value_and_path_of_parent(self): + # Arrange + current_config = { + } + target_config = { + "table1": { + "list1": ["value11", "value12", "value13", "value14"] + } + } + diff = ps.Diff(current_config, target_config) + op_type = OperationType.ADD + current_config_tokens = ["table1", "list1", 0] + target_config_tokens = ["table1", "list1", 1] + expected_jsonpatch_path = "/table1" + expected_jsonpatch_value = { + "list1": ["value12"] + } + # NOTE: the target config can contain more diff than the given move. + + # Act + jsonmove = ps.JsonMove(diff, op_type, current_config_tokens, target_config_tokens) + + # Assert + self.verify_jsonmove(self.operation_wrapper.create(op_type, expected_jsonpatch_path, expected_jsonpatch_value), + op_type, + current_config_tokens, + target_config_tokens, + jsonmove) + + def test_from_patch__more_than_1_op__failure(self): + # Arrange + patch = jsonpatch.JsonPatch([self.any_operation, self.any_operation]) + + # Act and Assert + self.assertRaises(GenericConfigUpdaterError, ps.JsonMove.from_patch, patch) + + def test_from_patch__delete_op__delete_jsonmove(self): + # Arrange + operation = self.operation_wrapper.create(OperationType.REMOVE, self.any_path) + patch = jsonpatch.JsonPatch([operation]) + + # Act + jsonmove = ps.JsonMove.from_patch(patch) + + # Assert + self.verify_jsonmove(operation, + OperationType.REMOVE, + self.any_tokens, + None, + jsonmove) + + def test_from_patch__replace_op__replace_jsonmove(self): + # Arrange + operation = self.operation_wrapper.create(OperationType.REPLACE, self.any_path, self.any_value) + patch = jsonpatch.JsonPatch([operation]) + + # Act + jsonmove = ps.JsonMove.from_patch(patch) + + # Assert + self.verify_jsonmove(operation, + OperationType.REPLACE, + self.any_tokens, + self.any_tokens, + jsonmove) + + def test_from_patch__add_op__add_jsonmove(self): + # Arrange + operation = self.operation_wrapper.create(OperationType.ADD, self.any_path, self.any_value) + patch = jsonpatch.JsonPatch([operation]) + + # Act + jsonmove = ps.JsonMove.from_patch(patch) + + # Assert + self.verify_jsonmove(operation, + OperationType.ADD, + self.any_tokens, + self.any_tokens, + jsonmove) + + def test_from_patch__add_op_with_list_indexes__add_jsonmove(self): + # Arrange + path = "/table1/key11/list1111/3" + value = "value11111" + # From a JsonPatch it is not possible to figure out if the '3' is an item in a list or a dictionary, + # will assume by default a dictionary for simplicity. + tokens = ["table1", "key11", "list1111", "3"] + operation = self.operation_wrapper.create(OperationType.ADD, path, value) + patch = jsonpatch.JsonPatch([operation]) + + # Act + jsonmove = ps.JsonMove.from_patch(patch) + + # Assert + self.verify_jsonmove(operation, + OperationType.ADD, + tokens, + tokens, + jsonmove) + + def test_from_patch__replace_whole_config__whole_config_jsonmove(self): + # Arrange + tokens = [] + path = "" + value = {"table1": {"key1": "value1"} } + operation = self.operation_wrapper.create(OperationType.REPLACE, path, value) + patch = jsonpatch.JsonPatch([operation]) + + # Act + jsonmove = ps.JsonMove.from_patch(patch) + + # Assert + self.verify_jsonmove(operation, + OperationType.REPLACE, + tokens, + tokens, + jsonmove) + + def verify_jsonmove(self, + expected_operation, + expected_op_type, + expected_current_config_tokens, + expected_target_config_tokens, + jsonmove): + expected_patch = jsonpatch.JsonPatch([expected_operation]) + self.assertEqual(expected_patch, jsonmove.patch) + self.assertEqual(expected_op_type, jsonmove.op_type) + self.assertListEqual(expected_current_config_tokens, jsonmove.current_config_tokens) + self.assertEqual(expected_target_config_tokens, jsonmove.target_config_tokens) + +class TestMoveWrapper(unittest.TestCase): + def setUp(self): + self.any_current_config = {} + self.any_target_config = {} + self.any_diff = ps.Diff(self.any_current_config, self.any_target_config) + self.any_move = Mock() + self.any_other_move1 = Mock() + self.any_other_move2 = Mock() + self.any_extended_move = Mock() + self.any_other_extended_move1 = Mock() + self.any_other_extended_move2 = Mock() + + self.single_move_generator = Mock() + self.single_move_generator.generate.side_effect = \ + create_side_effect_dict({(str(self.any_diff),): [self.any_move]}) + + self.another_single_move_generator = Mock() + self.another_single_move_generator.generate.side_effect = \ + create_side_effect_dict({(str(self.any_diff),): [self.any_other_move1]}) + + self.multiple_move_generator = Mock() + self.multiple_move_generator.generate.side_effect = create_side_effect_dict( + {(str(self.any_diff),): [self.any_move, self.any_other_move1, self.any_other_move2]}) + + self.single_move_extender = Mock() + self.single_move_extender.extend.side_effect = create_side_effect_dict( + { + (str(self.any_move), str(self.any_diff)): [self.any_extended_move], + (str(self.any_extended_move), str(self.any_diff)): [], # As first extended move will be extended + (str(self.any_other_extended_move1), str(self.any_diff)): [] # Needed when mixed with other extenders + }) + + self.another_single_move_extender = Mock() + self.another_single_move_extender.extend.side_effect = create_side_effect_dict( + { + (str(self.any_move), str(self.any_diff)): [self.any_other_extended_move1], + (str(self.any_other_extended_move1), str(self.any_diff)): [], # As first extended move will be extended + (str(self.any_extended_move), str(self.any_diff)): [] # Needed when mixed with other extenders + }) + + self.multiple_move_extender = Mock() + self.multiple_move_extender.extend.side_effect = create_side_effect_dict( + { + (str(self.any_move), str(self.any_diff)): \ + [self.any_extended_move, self.any_other_extended_move1, self.any_other_extended_move2], + # All extended moves will be extended + (str(self.any_extended_move), str(self.any_diff)): [], + (str(self.any_other_extended_move1), str(self.any_diff)): [], + (str(self.any_other_extended_move2), str(self.any_diff)): [], + }) + + self.mixed_move_extender = Mock() + self.mixed_move_extender.extend.side_effect = create_side_effect_dict( + { + (str(self.any_move), str(self.any_diff)): [self.any_extended_move], + (str(self.any_other_move1), str(self.any_diff)): [self.any_other_extended_move1], + (str(self.any_extended_move), str(self.any_diff)): \ + [self.any_other_extended_move1, self.any_other_extended_move2], + # All extended moves will be extended + (str(self.any_other_extended_move1), str(self.any_diff)): [], + (str(self.any_other_extended_move2), str(self.any_diff)): [], + }) + + self.fail_move_validator = Mock() + self.fail_move_validator.validate.side_effect = create_side_effect_dict( + {(str(self.any_move), str(self.any_diff)): False}) + + self.success_move_validator = Mock() + self.success_move_validator.validate.side_effect = create_side_effect_dict( + {(str(self.any_move), str(self.any_diff)): True}) + + def test_ctor__assigns_values_correctly(self): + # Arrange + move_generators = Mock() + move_extenders = Mock() + move_validators = Mock() + + # Act + move_wrapper = ps.MoveWrapper(move_generators, move_extenders, move_validators) + + # Assert + self.assertIs(move_generators, move_wrapper.move_generators) + self.assertIs(move_extenders, move_wrapper.move_extenders) + self.assertIs(move_validators, move_wrapper.move_validators) + + def test_generate__single_move_generator__single_move_returned(self): + # Arrange + move_generators = [self.single_move_generator] + move_wrapper = ps.MoveWrapper(move_generators, [], []) + expected = [self.any_move] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__multiple_move_generator__multiple_move_returned(self): + # Arrange + move_generators = [self.multiple_move_generator] + move_wrapper = ps.MoveWrapper(move_generators, [], []) + expected = [self.any_move, self.any_other_move1, self.any_other_move2] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__different_move_generators__different_moves_returned(self): + # Arrange + move_generators = [self.single_move_generator, self.another_single_move_generator] + move_wrapper = ps.MoveWrapper(move_generators, [], []) + expected = [self.any_move, self.any_other_move1] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__duplicate_generated_moves__unique_moves_returned(self): + # Arrange + move_generators = [self.single_move_generator, self.single_move_generator] + move_wrapper = ps.MoveWrapper(move_generators, [], []) + expected = [self.any_move] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__single_move_extender__one_extended_move_returned(self): + # Arrange + move_generators = [self.single_move_generator] + move_extenders = [self.single_move_extender] + move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) + expected = [self.any_move, self.any_extended_move] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__multiple_move_extender__multiple_extended_move_returned(self): + # Arrange + move_generators = [self.single_move_generator] + move_extenders = [self.multiple_move_extender] + move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) + expected = [self.any_move, self.any_extended_move, self.any_other_extended_move1, self.any_other_extended_move2] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__different_move_extenders__different_extended_moves_returned(self): + # Arrange + move_generators = [self.single_move_generator] + move_extenders = [self.single_move_extender, self.another_single_move_extender] + move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) + expected = [self.any_move, self.any_extended_move, self.any_other_extended_move1] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__duplicate_extended_moves__unique_moves_returned(self): + # Arrange + move_generators = [self.single_move_generator] + move_extenders = [self.single_move_extender, self.single_move_extender] + move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) + expected = [self.any_move, self.any_extended_move] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_generate__mixed_extended_moves__unique_moves_returned(self): + # Arrange + move_generators = [self.single_move_generator, self.another_single_move_generator] + move_extenders = [self.mixed_move_extender] + move_wrapper = ps.MoveWrapper(move_generators, move_extenders, []) + expected = [self.any_move, + self.any_other_move1, + self.any_extended_move, + self.any_other_extended_move1, + self.any_other_extended_move2] + + # Act + actual = list(move_wrapper.generate(self.any_diff)) + + # Assert + self.assertListEqual(expected, actual) + + def test_validate__validation_fail__false_returned(self): + # Arrange + move_validators = [self.fail_move_validator] + move_wrapper = ps.MoveWrapper([], [], move_validators) + + # Act and assert + self.assertFalse(move_wrapper.validate(self.any_move, self.any_diff)) + + def test_validate__validation_succeed__true_returned(self): + # Arrange + move_validators = [self.success_move_validator] + move_wrapper = ps.MoveWrapper([], [], move_validators) + + # Act and assert + self.assertTrue(move_wrapper.validate(self.any_move, self.any_diff)) + + def test_validate__multiple_validators_last_fail___false_returned(self): + # Arrange + move_validators = [self.success_move_validator, self.success_move_validator, self.fail_move_validator] + move_wrapper = ps.MoveWrapper([], [], move_validators) + + # Act and assert + self.assertFalse(move_wrapper.validate(self.any_move, self.any_diff)) + + def test_validate__multiple_validators_succeed___true_returned(self): + # Arrange + move_validators = [self.success_move_validator, self.success_move_validator, self.success_move_validator] + move_wrapper = ps.MoveWrapper([], [], move_validators) + + # Act and assert + self.assertTrue(move_wrapper.validate(self.any_move, self.any_diff)) + + def test_simulate__applies_move(self): + # Arrange + diff = Mock() + diff.apply_move.side_effect = create_side_effect_dict({(str(self.any_move), ): self.any_diff}) + move_wrapper = ps.MoveWrapper(None, None, None) + + # Act + actual = move_wrapper.simulate(self.any_move, diff) + + # Assert + self.assertIs(self.any_diff, actual) + +class TestDeleteWholeConfigMoveValidator(unittest.TestCase): + def setUp(self): + self.operation_wrapper = OperationWrapper() + self.validator = ps.DeleteWholeConfigMoveValidator() + self.any_diff = Mock() + self.any_non_whole_config_path = "/table1" + self.whole_config_path = "" + + def test_validate__non_remove_op_non_whole_config__success(self): + self.verify(OperationType.REPLACE, self.any_non_whole_config_path, True) + self.verify(OperationType.ADD, self.any_non_whole_config_path, True) + + def test_validate__remove_op_non_whole_config__success(self): + self.verify(OperationType.REMOVE, self.any_non_whole_config_path, True) + + def test_validate__non_remove_op_whole_config__success(self): + self.verify(OperationType.REPLACE, self.whole_config_path, True) + self.verify(OperationType.ADD, self.whole_config_path, True) + + def test_validate__remove_op_whole_config__failure(self): + self.verify(OperationType.REMOVE, self.whole_config_path, False) + + def verify(self, operation_type, path, expected): + # Arrange + value = None + if operation_type in [OperationType.ADD, OperationType.REPLACE]: + value = Mock() + + operation = self.operation_wrapper.create(operation_type, path, value) + move = ps.JsonMove.from_operation(operation) + + # Act + actual = self.validator.validate(move, self.any_diff) + + # Assert + self.assertEqual(expected, actual) + +class TestUniqueLanesMoveValidator(unittest.TestCase): + def setUp(self): + self.validator = ps.UniqueLanesMoveValidator() + + def test_validate__no_port_table__success(self): + config = {"ACL_TABLE": {}} + self.validate_target_config(config) + + def test_validate__empty_port_table__success(self): + config = {"PORT": {}} + self.validate_target_config(config) + + def test_validate__single_lane__success(self): + config = {"PORT": {"Ethernet0": {"lanes": "66", "speed":"10000"}}} + self.validate_target_config(config) + + def test_validate__different_lanes_single_port___success(self): + config = {"PORT": {"Ethernet0": {"lanes": "66, 67, 68", "speed":"10000"}}} + self.validate_target_config(config) + + def test_validate__different_lanes_multi_ports___success(self): + config = {"PORT": { + "Ethernet0": {"lanes": "64, 65", "speed":"10000"}, + "Ethernet1": {"lanes": "66, 67, 68", "speed":"10000"}, + }} + self.validate_target_config(config) + + def test_validate__same_lanes_single_port___success(self): + config = {"PORT": {"Ethernet0": {"lanes": "65, 65", "speed":"10000"}}} + self.validate_target_config(config, False) + + def validate_target_config(self, target_config, expected=True): + # Arrange + current_config = {} + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, OperationType.REPLACE, [], []) + + # Act + actual = self.validator.validate(move, diff) + + # Assert + self.assertEqual(expected, actual) + +class TestFullConfigMoveValidator(unittest.TestCase): + def setUp(self): + self.any_current_config = Mock() + self.any_target_config = Mock() + self.any_simulated_config = Mock() + self.any_diff = ps.Diff(self.any_current_config, self.any_target_config) + self.any_move = Mock() + self.any_move.apply.side_effect = \ + create_side_effect_dict({(str(self.any_current_config),): self.any_simulated_config}) + + def test_validate__invalid_config_db_after_applying_move__failure(self): + # Arrange + config_wrapper = Mock() + config_wrapper.validate_config_db_config.side_effect = \ + create_side_effect_dict({(str(self.any_simulated_config),): False}) + validator = ps.FullConfigMoveValidator(config_wrapper) + + # Act and assert + self.assertFalse(validator.validate(self.any_move, self.any_diff)) + + def test_validate__valid_config_db_after_applying_move__success(self): + # Arrange + config_wrapper = Mock() + config_wrapper.validate_config_db_config.side_effect = \ + create_side_effect_dict({(str(self.any_simulated_config),): True}) + validator = ps.FullConfigMoveValidator(config_wrapper) + + # Act and assert + self.assertTrue(validator.validate(self.any_move, self.any_diff)) + +class TestCreateOnlyMoveValidator(unittest.TestCase): + def setUp(self): + self.validator = ps.CreateOnlyMoveValidator(ps.PathAddressing()) + self.any_diff = ps.Diff({}, {}) + + def test_validate__non_replace_operation__success(self): + # Assert + self.assertTrue(self.validator.validate( \ + ps.JsonMove(self.any_diff, OperationType.ADD, [], []), self.any_diff)) + self.assertTrue(self.validator.validate( \ + ps.JsonMove(self.any_diff, OperationType.REMOVE, [], []), self.any_diff)) + + def test_validate__no_create_only_field__success(self): + current_config = {"PORT": {}} + target_config = {"PORT": {}, "ACL_TABLE": {}} + self.verify_diff(current_config, target_config) + + def test_validate__same_create_only_field__success(self): + current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} + target_config = {"PORT": {"Ethernet0":{"lanes":"65"}}, "ACL_TABLE": {}} + self.verify_diff(current_config, target_config) + + def test_validate__different_create_only_field__failure(self): + current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} + target_config = {"PORT": {"Ethernet0":{"lanes":"66"}}, "ACL_TABLE": {}} + self.verify_diff(current_config, target_config, expected=False) + + def test_validate__different_create_only_field_directly_updated__failure(self): + current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} + target_config = {"PORT": {"Ethernet0":{"lanes":"66"}}, "ACL_TABLE": {}} + self.verify_diff(current_config, + target_config, + ["PORT", "Ethernet0", "lanes"], + ["PORT", "Ethernet0", "lanes"], + False) + + def test_validate__different_create_only_field_updating_parent__failure(self): + current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} + target_config = {"PORT": {"Ethernet0":{"lanes":"66"}}, "ACL_TABLE": {}} + self.verify_diff(current_config, + target_config, + ["PORT", "Ethernet0"], + ["PORT", "Ethernet0"], + False) + + def test_validate__different_create_only_field_updating_grandparent__failure(self): + current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} + target_config = {"PORT": {"Ethernet0":{"lanes":"66"}}, "ACL_TABLE": {}} + self.verify_diff(current_config, + target_config, + ["PORT"], + ["PORT"], + False) + + def test_validate__same_create_only_field_directly_updated__failure(self): + current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} + target_config = {"PORT": {"Ethernet0":{"lanes":"65"}}, "ACL_TABLE": {}} + self.verify_diff(current_config, + target_config, + ["PORT", "Ethernet0", "lanes"], + ["PORT", "Ethernet0", "lanes"]) + + def test_validate__same_create_only_field_updating_parent__failure(self): + current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} + target_config = {"PORT": {"Ethernet0":{"lanes":"65"}}, "ACL_TABLE": {}} + self.verify_diff(current_config, + target_config, + ["PORT", "Ethernet0"], + ["PORT", "Ethernet0"]) + + def test_validate__same_create_only_field_updating_grandparent__failure(self): + current_config = {"PORT": {"Ethernet0":{"lanes":"65"}}} + target_config = {"PORT": {"Ethernet0":{"lanes":"65"}}, "ACL_TABLE": {}} + self.verify_diff(current_config, + target_config, + ["PORT"], + ["PORT"]) + + def verify_diff(self, current_config, target_config, current_config_tokens=None, target_config_tokens=None, expected=True): + # Arrange + current_config_tokens = current_config_tokens if current_config_tokens else [] + target_config_tokens = target_config_tokens if target_config_tokens else [] + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, OperationType.REPLACE, current_config_tokens, target_config_tokens) + + # Act + actual = self.validator.validate(move, diff) + + # Assert + self.assertEqual(expected, actual) + +class TestNoDependencyMoveValidator(unittest.TestCase): + def setUp(self): + path_addressing = ps.PathAddressing() + config_wrapper = ConfigWrapper() + self.validator = ps.NoDependencyMoveValidator(path_addressing, config_wrapper) + + def test_validate__add_full_config_has_dependencies__failure(self): + # Arrange + # CROPPED_CONFIG_DB_AS_JSON has dependencies between PORT and ACL_TABLE + diff = ps.Diff(Files.EMPTY_CONFIG_DB, Files.CROPPED_CONFIG_DB_AS_JSON) + move = ps.JsonMove(diff, OperationType.ADD, [], []) + + # Act and assert + self.assertFalse(self.validator.validate(move, diff)) + + def test_validate__add_full_config_no_dependencies__success(self): + # Arrange + diff = ps.Diff(Files.EMPTY_CONFIG_DB, Files.CONFIG_DB_NO_DEPENDENCIES) + move = ps.JsonMove(diff, OperationType.ADD, [], []) + + # Act and assert + self.assertTrue(self.validator.validate(move, diff)) + + def test_validate__add_table_has_no_dependencies__success(self): + # Arrange + target_config = Files.CROPPED_CONFIG_DB_AS_JSON + # prepare current config by removing ACL_TABLE from current config + current_config = self.prepare_config(target_config, jsonpatch.JsonPatch([ + {"op": "remove", "path":"/ACL_TABLE"} + ])) + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, OperationType.ADD, ["ACL_TABLE"], ["ACL_TABLE"]) + + # Act and assert + self.assertTrue(self.validator.validate(move, diff)) + + def test_validate__remove_full_config_has_dependencies__failure(self): + # Arrange + # CROPPED_CONFIG_DB_AS_JSON has dependencies between PORT and ACL_TABLE + diff = ps.Diff(Files.CROPPED_CONFIG_DB_AS_JSON, Files.EMPTY_CONFIG_DB) + move = ps.JsonMove(diff, OperationType.REMOVE, [], []) + + # Act and assert + self.assertFalse(self.validator.validate(move, diff)) + + def test_validate__remove_full_config_no_dependencies__success(self): + # Arrange + diff = ps.Diff(Files.EMPTY_CONFIG_DB, Files.CONFIG_DB_NO_DEPENDENCIES) + move = ps.JsonMove(diff, OperationType.REMOVE, [], []) + + # Act and assert + self.assertTrue(self.validator.validate(move, diff)) + + def test_validate__remove_table_has_no_dependencies__success(self): + # Arrange + current_config = Files.CROPPED_CONFIG_DB_AS_JSON + target_config = self.prepare_config(current_config, jsonpatch.JsonPatch([ + {"op": "remove", "path":"/ACL_TABLE"} + ])) + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, OperationType.REMOVE, ["ACL_TABLE"]) + + # Act and assert + self.assertTrue(self.validator.validate(move, diff)) + + def test_validate__replace_whole_config_item_added_ref_added__failure(self): + # Arrange + target_config = Files.SIMPLE_CONFIG_DB_INC_DEPS + # prepare current config by removing an item and its ref from target config + current_config = self.prepare_config(target_config, jsonpatch.JsonPatch([ + {"op": "replace", "path":"/ACL_TABLE/EVERFLOW/ports/0", "value":""}, + {"op": "remove", "path":"/PORT/Ethernet0"} + ])) + + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, OperationType.REPLACE, [], []) + + # Act and assert + self.assertFalse(self.validator.validate(move, diff)) + + def test_validate__replace_whole_config_item_removed_ref_removed__false(self): + # Arrange + current_config = Files.SIMPLE_CONFIG_DB_INC_DEPS + # prepare target config by removing an item and its ref from current config + target_config = self.prepare_config(current_config, jsonpatch.JsonPatch([ + {"op": "replace", "path":"/ACL_TABLE/EVERFLOW/ports/0", "value":""}, + {"op": "remove", "path":"/PORT/Ethernet0"} + ])) + + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, OperationType.REPLACE, [], []) + + # Act and assert + self.assertFalse(self.validator.validate(move, diff)) + + def test_validate__replace_whole_config_item_same_ref_added__true(self): + # Arrange + target_config = Files.SIMPLE_CONFIG_DB_INC_DEPS + # prepare current config by removing ref from target config + current_config = self.prepare_config(target_config, jsonpatch.JsonPatch([ + {"op": "replace", "path":"/ACL_TABLE/EVERFLOW/ports/0", "value":""} + ])) + + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, OperationType.REPLACE, [], []) + + # Act and assert + self.assertTrue(self.validator.validate(move, diff)) + + def test_validate__replace_whole_config_item_same_ref_removed__true(self): + # Arrange + current_config= Files.SIMPLE_CONFIG_DB_INC_DEPS + # prepare target config by removing ref from current config + target_config = self.prepare_config(current_config, jsonpatch.JsonPatch([ + {"op": "replace", "path":"/ACL_TABLE/EVERFLOW/ports/0", "value":""} + ])) + + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, OperationType.REPLACE, [], []) + + # Act and assert + self.assertTrue(self.validator.validate(move, diff)) + + def test_validate__replace_whole_config_item_same_ref_same__true(self): + # Arrange + current_config= Files.SIMPLE_CONFIG_DB_INC_DEPS + # prepare target config by removing ref from current config + target_config = current_config + + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, OperationType.REPLACE, [], []) + + # Act and assert + self.assertTrue(self.validator.validate(move, diff)) + + def prepare_config(self, config, patch): + return patch.apply(config) + +class TestLowLevelMoveGenerator(unittest.TestCase): + def setUp(self): + path_addressing = PathAddressing() + self.generator = ps.LowLevelMoveGenerator(path_addressing) + + def test_generate__no_diff__no_moves(self): + self.verify() + + def test_generate__replace_key__replace_move(self): + self.verify(tc_ops=[{"op": "replace", 'path': '/PORT/Ethernet0/description', 'value':'any-desc'}]) + + def test_generate__leaf_key_missing__add_move(self): + self.verify( + cc_ops=[{"op": "remove", 'path': '/ACL_TABLE/EVERFLOW/policy_desc'}], + ex_ops=[{"op": "add", 'path': '/ACL_TABLE/EVERFLOW/policy_desc', 'value':'EVERFLOW'}] + ) + + def test_generate__leaf_key_additional__remove_move(self): + self.verify( + tc_ops=[{"op": "remove", 'path': '/ACL_TABLE/EVERFLOW/policy_desc'}] + ) + + def test_generate__table_missing__add_leafs_moves(self): + self.verify( + cc_ops=[{"op": "remove", 'path': '/VLAN'}], + ex_ops=[{'op': 'add', 'path': '/VLAN', 'value': {'Vlan1000': {'vlanid': '1000'}}}, + {'op': 'add', 'path': '/VLAN', 'value': {'Vlan1000': {'dhcp_servers': ['192.0.0.1']}}}, + {'op': 'add', 'path': '/VLAN', 'value': {'Vlan1000': {'dhcp_servers': ['192.0.0.2']}}}, + {'op': 'add', 'path': '/VLAN', 'value': {'Vlan1000': {'dhcp_servers': ['192.0.0.3']}}}, + {'op': 'add', 'path': '/VLAN', 'value': {'Vlan1000': {'dhcp_servers': ['192.0.0.4']}}}] + ) + + def test_generate__table_additional__remove_leafs_moves(self): + self.verify( + tc_ops=[{"op": "remove", 'path': '/VLAN'}], + ex_ops=[{'op': 'remove', 'path': '/VLAN/Vlan1000/vlanid'}, + {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, + {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/1'}, + {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/2'}, + {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/3'}] + ) + + def test_generate__leaf_table_missing__add_table(self): + self.verify( + tc_ops=[{"op": "add", 'path': '/NEW_TABLE', 'value':{}}] + ) + + def test_generate__leaf_table_additional__remove_table(self): + self.verify( + cc_ops=[{"op": "add", 'path': '/NEW_TABLE', 'value':{}}], + ex_ops=[{"op": "remove", 'path': '/NEW_TABLE'}] + ) + + def test_generate__replace_list_item__remove_add_replace_moves(self): + self.verify( + tc_ops=[{"op": "replace", 'path': '/ACL_TABLE/EVERFLOW/ports/0', 'value':'Ethernet0'}], + ex_ops=[ + {"op": "remove", 'path': '/ACL_TABLE/EVERFLOW/ports/0'}, + {"op": "add", 'path': '/ACL_TABLE/EVERFLOW/ports/0', 'value':'Ethernet0'}, + {"op": "replace", 'path': '/ACL_TABLE/EVERFLOW/ports/0', 'value':'Ethernet0'}, + ]) + + def test_generate__remove_list_item__remove_move(self): + self.verify( + tc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}]) + + def test_generate__remove_multiple_list_items__multiple_remove_moves(self): + self.verify( + tc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, + {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}], + ex_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, + {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/1'}] + ) + + def test_generate__remove_all_list_items__multiple_remove_moves(self): + self.verify( + tc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[]}], + ex_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, + {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/2'}, + {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/3'}, + {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/1'}] + ) + + def test_generate__add_list_items__add_move(self): + self.verify( + tc_ops=[{"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.5'}] + ) + + def test_generate__add_multiple_list_items__multiple_add_moves(self): + self.verify( + tc_ops=[{"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.5'}, + {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/3', 'value':'192.168.1.6'}] + ) + + def test_generate__add_all_list_items__multiple_add_moves(self): + self.verify( + cc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[]}], + ex_ops=[{"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.0.0.1'}, + {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.0.0.2'}, + {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.0.0.3'}, + {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.0.0.4'}] + ) + + def test_generate__replace_multiple_list_items__multiple_remove_add_replace_moves(self): + self.verify( + tc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.5'}, + {"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/3', 'value':'192.168.1.6'}], + ex_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, + {"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers/3'}, + {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.5'}, + {"op": "add", 'path': '/VLAN/Vlan1000/dhcp_servers/3', 'value':'192.168.1.6'}, + {"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.5'}, + {"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/3', 'value':'192.168.1.6'}, + {"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/3', 'value':'192.168.1.5'}, + {"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers/0', 'value':'192.168.1.6'}] + ) + + def test_generate__different_order_list_items__whole_list_replace_move(self): + self.verify( + tc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[ + "192.0.0.4", + "192.0.0.3", + "192.0.0.2", + "192.0.0.1" + ]}]) + + def test_generate__whole_list_missing__add_items_moves(self): + self.verify( + cc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers'}], + ex_ops=[{'op': 'add', 'path': '/VLAN/Vlan1000/dhcp_servers', 'value': ['192.0.0.1']}, + {'op': 'add', 'path': '/VLAN/Vlan1000/dhcp_servers', 'value': ['192.0.0.2']}, + {'op': 'add', 'path': '/VLAN/Vlan1000/dhcp_servers', 'value': ['192.0.0.3']}, + {'op': 'add', 'path': '/VLAN/Vlan1000/dhcp_servers', 'value': ['192.0.0.4']}]) + + def test_generate__whole_list_additional__remove_items_moves(self): + self.verify( + tc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers'}], + ex_ops=[{'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/0'}, + {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/1'}, + {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/2'}, + {'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers/3'}]) + + def test_generate__empty_list_missing__add_whole_list(self): + self.verify( + tc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[]}], + cc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers'}], + ex_ops=[{'op': 'add', 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[]}]) + + def test_generate__empty_list_additional__remove_whole_list(self): + self.verify( + tc_ops=[{"op": "remove", 'path': '/VLAN/Vlan1000/dhcp_servers'}], + cc_ops=[{"op": "replace", 'path': '/VLAN/Vlan1000/dhcp_servers', 'value':[]}], + ex_ops=[{'op': 'remove', 'path': '/VLAN/Vlan1000/dhcp_servers'}]) + + def test_generate__dpb_1_to_4_example(self): + # Arrange + diff = ps.Diff(Files.DPB_1_SPLIT_FULL_CONFIG, Files.DPB_4_SPLITS_FULL_CONFIG) + + # Act + moves = list(self.generator.generate(diff)) + + # Assert + self.verify_moves([{'op': 'replace', 'path': '/PORT/Ethernet0/alias', 'value': 'Eth1/1'}, + {'op': 'replace', 'path': '/PORT/Ethernet0/lanes', 'value': '65'}, + {'op': 'replace', 'path': '/PORT/Ethernet0/description', 'value': ''}, + {'op': 'replace', 'path': '/PORT/Ethernet0/speed', 'value': '10000'}, + {'op': 'add', 'path': '/PORT/Ethernet1', 'value': {'alias': 'Eth1/2'}}, + {'op': 'add', 'path': '/PORT/Ethernet1', 'value': {'lanes': '66'}}, + {'op': 'add', 'path': '/PORT/Ethernet1', 'value': {'description': ''}}, + {'op': 'add', 'path': '/PORT/Ethernet1', 'value': {'speed': '10000'}}, + {'op': 'add', 'path': '/PORT/Ethernet2', 'value': {'alias': 'Eth1/3'}}, + {'op': 'add', 'path': '/PORT/Ethernet2', 'value': {'lanes': '67'}}, + {'op': 'add', 'path': '/PORT/Ethernet2', 'value': {'description': ''}}, + {'op': 'add', 'path': '/PORT/Ethernet2', 'value': {'speed': '10000'}}, + {'op': 'add', 'path': '/PORT/Ethernet3', 'value': {'alias': 'Eth1/4'}}, + {'op': 'add', 'path': '/PORT/Ethernet3', 'value': {'lanes': '68'}}, + {'op': 'add', 'path': '/PORT/Ethernet3', 'value': {'description': ''}}, + {'op': 'add', 'path': '/PORT/Ethernet3', 'value': {'speed': '10000'}}, + {'op': 'add', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/1', 'value': 'Ethernet1'}, + {'op': 'add', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/1', 'value': 'Ethernet2'}, + {'op': 'add', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/1', 'value': 'Ethernet3'}, + {'op': 'add', 'path': '/VLAN_MEMBER/Vlan100|Ethernet1', 'value': {'tagging_mode': 'untagged'}}, + {'op': 'add', 'path': '/VLAN_MEMBER/Vlan100|Ethernet2', 'value': {'tagging_mode': 'untagged'}}, + {'op': 'add', 'path': '/VLAN_MEMBER/Vlan100|Ethernet3', 'value': {'tagging_mode': 'untagged'}}], + moves) + + def test_generate__dpb_4_to_1_example(self): + # Arrange + diff = ps.Diff(Files.DPB_4_SPLITs_FULL_CONFIG, Files.DPB_1_SPLIT_FULL_CONFIG) + + # Act + moves = list(self.generator.generate(diff)) + + # Assert + self.verify_moves([{'op': 'replace', 'path': '/PORT/Ethernet0/alias', 'value': 'Eth1'}, + {'op': 'replace', 'path': '/PORT/Ethernet0/lanes', 'value': '65, 66, 67, 68'}, + {'op': 'replace', 'path': '/PORT/Ethernet0/description', 'value': 'Ethernet0 100G link'}, + {'op': 'replace', 'path': '/PORT/Ethernet0/speed', 'value': '100000'}, + {'op': 'remove', 'path': '/PORT/Ethernet1/alias'}, + {'op': 'remove', 'path': '/PORT/Ethernet1/lanes'}, + {'op': 'remove', 'path': '/PORT/Ethernet1/description'}, + {'op': 'remove', 'path': '/PORT/Ethernet1/speed'}, + {'op': 'remove', 'path': '/PORT/Ethernet2/alias'}, + {'op': 'remove', 'path': '/PORT/Ethernet2/lanes'}, + {'op': 'remove', 'path': '/PORT/Ethernet2/description'}, + {'op': 'remove', 'path': '/PORT/Ethernet2/speed'}, + {'op': 'remove', 'path': '/PORT/Ethernet3/alias'}, + {'op': 'remove', 'path': '/PORT/Ethernet3/lanes'}, + {'op': 'remove', 'path': '/PORT/Ethernet3/description'}, + {'op': 'remove', 'path': '/PORT/Ethernet3/speed'}, + {'op': 'remove', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/1'}, + {'op': 'remove', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/2'}, + {'op': 'remove', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/3'}, + {'op': 'remove', 'path': '/VLAN_MEMBER/Vlan100|Ethernet1/tagging_mode'}, + {'op': 'remove', 'path': '/VLAN_MEMBER/Vlan100|Ethernet2/tagging_mode'}, + {'op': 'remove', 'path': '/VLAN_MEMBER/Vlan100|Ethernet3/tagging_mode'}], + moves) + + def verify(self, tc_ops=None, cc_ops=None, ex_ops=None): + """ + Generate a diff where target config is modified using the given tc_ops. + The expected low level moves should ex_ops if it is not None, otherwise tc_ops + """ + # Arrange + diff = self.get_diff(target_config_ops=tc_ops, current_config_ops=cc_ops) + expected = ex_ops if ex_ops is not None else \ + tc_ops if tc_ops is not None else \ + [] + + # Act + actual = self.generator.generate(diff) + + # Assert + self.verify_moves(expected, actual) + + def verify_moves(self, ops, moves): + moves_ops = [list(move.patch)[0] for move in moves] + self.assertCountEqual(ops, moves_ops) + + def get_diff(self, target_config_ops = None, current_config_ops = None): + current_config = Files.CROPPED_CONFIG_DB_AS_JSON + if current_config_ops: + cc_patch = jsonpatch.JsonPatch(current_config_ops) + current_config = cc_patch.apply(current_config) + + target_config = Files.CROPPED_CONFIG_DB_AS_JSON + if target_config_ops: + tc_patch = jsonpatch.JsonPatch(target_config_ops) + target_config = tc_patch.apply(target_config) + + return ps.Diff(current_config, target_config) + +class TestUpperLevelMoveExtender(unittest.TestCase): + def setUp(self): + self.extender = ps.UpperLevelMoveExtender() + self.any_diff = ps.Diff(Files.ANY_CONFIG_DB, Files.ANY_CONFIG_DB) + + def test_extend__root_level_move__no_extended_moves(self): + self.verify(OperationType.REMOVE, []) + self.verify(OperationType.ADD, [], []) + self.verify(OperationType.REPLACE, [], []) + + def test_extend__remove_key_upper_level_does_not_exist__remove_upper_level(self): + self.verify(OperationType.REMOVE, + ["ACL_TABLE", "EVERFLOW", "policy_desc"], + tc_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW'}], + ex_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW'}]) + + def test_extend__remove_key_upper_level_does_exist__replace_upper_level(self): + self.verify(OperationType.REMOVE, + ["ACL_TABLE", "EVERFLOW", "policy_desc"], + tc_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW/policy_desc'}], + ex_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW', 'value':{ + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }}]) + + def test_extend__remove_list_item_upper_level_does_not_exist__remove_upper_level(self): + self.verify(OperationType.REMOVE, + ["VLAN", "Vlan1000", "dhcp_servers", 1], + tc_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers'}], + ex_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers'}]) + + def test_extend__remove_list_item_upper_level_does_exist__replace_upper_level(self): + self.verify(OperationType.REMOVE, + ["VLAN", "Vlan1000", "dhcp_servers", 1], + tc_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers/1'}], + ex_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers', 'value':[ + "192.0.0.1", + "192.0.0.3", + "192.0.0.4" + ]}]) + + def test_extend__add_key_upper_level_missing__add_upper_level(self): + self.verify(OperationType.ADD, + ["ACL_TABLE", "EVERFLOW", "policy_desc"], + ["ACL_TABLE", "EVERFLOW", "policy_desc"], + cc_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW'}], + ex_ops=[{'op':'add', 'path':'/ACL_TABLE/EVERFLOW', 'value':{ + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }}]) + + def test_extend__add_key_upper_level_exist__replace_upper_level(self): + self.verify(OperationType.ADD, + ["ACL_TABLE", "EVERFLOW", "policy_desc"], + ["ACL_TABLE", "EVERFLOW", "policy_desc"], + cc_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW/policy_desc'}], + ex_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW', 'value':{ + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }}]) + + def test_extend__add_list_item_upper_level_missing__add_upper_level(self): + self.verify(OperationType.ADD, + ["VLAN", "Vlan1000", "dhcp_servers", 1], + ["VLAN", "Vlan1000", "dhcp_servers", 1], + cc_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers'}], + ex_ops=[{'op':'add', 'path':'/VLAN/Vlan1000/dhcp_servers', 'value':[ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ]}]) + + def test_extend__add_list_item_upper_level_exist__replace_upper_level(self): + self.verify(OperationType.ADD, + ["VLAN", "Vlan1000", "dhcp_servers", 1], + ["VLAN", "Vlan1000", "dhcp_servers", 1], + cc_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers/1'}], + ex_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers', 'value':[ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ]}]) + + def test_extend__add_table__replace_whole_config(self): + self.verify(OperationType.ADD, + ["ACL_TABLE"], + ["ACL_TABLE"], + cc_ops=[{'op':'remove', 'path':'/ACL_TABLE'}], + ex_ops=[{'op':'replace', 'path':'', 'value':Files.CROPPED_CONFIG_DB_AS_JSON}]) + + def test_extend__replace_key__replace_upper_level(self): + self.verify(OperationType.REPLACE, + ["ACL_TABLE", "EVERFLOW", "policy_desc"], + ["ACL_TABLE", "EVERFLOW", "policy_desc"], + cc_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW/policy_desc', 'value':'old_desc'}], + ex_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW', 'value':{ + "policy_desc": "EVERFLOW", + "ports": [ + "Ethernet8" + ], + "stage": "ingress", + "type": "MIRROR" + }}]) + + def test_extend__replace_list_item__replace_upper_level(self): + self.verify(OperationType.REPLACE, + ["VLAN", "Vlan1000", "dhcp_servers", 1], + ["VLAN", "Vlan1000", "dhcp_servers", 1], + cc_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers/1', 'value':'192.0.0.7'}], + ex_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers', 'value':[ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4" + ]}]) + + def test_extend__replace_table__replace_whole_config(self): + self.verify(OperationType.REPLACE, + ["VLAN"], + ["VLAN"], + cc_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers/1', 'value':'192.0.0.7'}], + ex_ops=[{'op':'replace', 'path':'', 'value':Files.CROPPED_CONFIG_DB_AS_JSON}]) + + def verify(self, op_type, ctokens, ttokens=None, cc_ops=[], tc_ops=[], ex_ops=[]): + """ + cc_ops, tc_ops are used to build the diff object. + diff, op_type, ctokens, ttokens are used to build the move. + move is extended and the result should match ex_ops. + """ + # Arrange + current_config=jsonpatch.JsonPatch(cc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) + target_config=jsonpatch.JsonPatch(tc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, op_type, ctokens, ttokens) + + # Act + moves = self.extender.extend(move, diff) + + # Assert + self.verify_moves(ex_ops, moves) + + def verify_moves(self, ex_ops, moves): + moves_ops = [list(move.patch)[0] for move in moves] + self.assertCountEqual(ex_ops, moves_ops) + +class TestDeleteInsteadOfReplaceMoveExtender(unittest.TestCase): + def setUp(self): + self.extender = ps.DeleteInsteadOfReplaceMoveExtender() + + def test_extend__non_replace__no_extended_moves(self): + self.verify(OperationType.REMOVE, + ["ACL_TABLE"], + tc_ops=[{'op':'remove', 'path':'/ACL_TABLE'}], + ex_ops=[]) + self.verify(OperationType.ADD, + ["ACL_TABLE"], + ["ACL_TABLE"], + cc_ops=[{'op':'remove', 'path':'/ACL_TABLE'}], + ex_ops=[]) + + def test_extend__replace_key__delete_key(self): + self.verify(OperationType.REPLACE, + ["ACL_TABLE", "EVERFLOW", "policy_desc"], + ["ACL_TABLE", "EVERFLOW", "policy_desc"], + cc_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW/policy_desc', 'value':'old_desc'}], + ex_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW/policy_desc'}]) + + def test_extend__replace_list_item__delete_list_item(self): + self.verify(OperationType.REPLACE, + ["VLAN", "Vlan1000", "dhcp_servers", 1], + ["VLAN", "Vlan1000", "dhcp_servers", 1], + cc_ops=[{'op':'replace', 'path':'/VLAN/Vlan1000/dhcp_servers/1', 'value':'192.0.0.7'}], + ex_ops=[{'op':'remove', 'path':'/VLAN/Vlan1000/dhcp_servers/1'}]) + + def test_extend__replace_table__delete_table(self): + self.verify(OperationType.REPLACE, + ["ACL_TABLE"], + ["ACL_TABLE"], + cc_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW/policy_desc', 'value':'old_desc'}], + ex_ops=[{'op':'remove', 'path':'/ACL_TABLE'}]) + + def test_extend__replace_whole_config__delete_whole_config(self): + self.verify(OperationType.REPLACE, + [], + [], + cc_ops=[{'op':'replace', 'path':'/ACL_TABLE/EVERFLOW/policy_desc', 'value':'old_desc'}], + ex_ops=[{'op':'remove', 'path':''}]) + + def verify(self, op_type, ctokens, ttokens=None, cc_ops=[], tc_ops=[], ex_ops=[]): + """ + cc_ops, tc_ops are used to build the diff object. + diff, op_type, ctokens, ttokens are used to build the move. + move is extended and the result should match ex_ops. + """ + # Arrange + current_config=jsonpatch.JsonPatch(cc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) + target_config=jsonpatch.JsonPatch(tc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, op_type, ctokens, ttokens) + + # Act + moves = self.extender.extend(move, diff) + + # Assert + self.verify_moves(ex_ops, moves) + + def verify_moves(self, ex_ops, moves): + moves_ops = [list(move.patch)[0] for move in moves] + self.assertCountEqual(ex_ops, moves_ops) + +class DeleteRefsMoveExtender(unittest.TestCase): + def setUp(self): + self.extender = ps.DeleteRefsMoveExtender(PathAddressing()) + + def test_extend__non_delete_ops__no_extended_moves(self): + self.verify(OperationType.ADD, + ["ACL_TABLE"], + ["ACL_TABLE"], + cc_ops=[{'op':'remove', 'path':'/ACL_TABLE'}], + ex_ops=[]) + self.verify(OperationType.REPLACE, + ["ACL_TABLE"], + ["ACL_TABLE"], + cc_ops=[{'op':'remove', 'path':'/ACL_TABLE/EVERFLOW'}], + ex_ops=[]) + + def test_extend__path_with_no_refs__no_extended_moves(self): + self.verify(OperationType.REMOVE, + ["ACL_TABLE"], + tc_ops=[{'op':'remove', 'path':'/ACL_TABLE'}], + ex_ops=[]) + + def test_extend__path_with_direct_refs__extended_moves(self): + self.verify(OperationType.REMOVE, + ["PORT", "Ethernet0"], + tc_ops=[{'op':'remove', 'path':'/PORT/Ethernet0'}], + ex_ops=[{'op': 'remove', 'path': '/VLAN_MEMBER/Vlan1000|Ethernet0'}, + {'op': 'remove', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/0'}]) + + def test_extend__path_with_refs_to_children__extended_moves(self): + self.verify(OperationType.REMOVE, + ["PORT"], + tc_ops=[{'op':'remove', 'path':'/PORT/Ethernet0'}], + ex_ops=[{'op': 'remove', 'path': '/VLAN_MEMBER/Vlan1000|Ethernet0'}, + {'op': 'remove', 'path': '/ACL_TABLE/NO-NSW-PACL-V4/ports/0'}, + {'op': 'remove', 'path': '/VLAN_MEMBER/Vlan1000|Ethernet4'}, + {'op': 'remove', 'path': '/ACL_TABLE/DATAACL/ports/0'}, + {'op': 'remove', 'path': '/VLAN_MEMBER/Vlan1000|Ethernet8'}, + {'op': 'remove', 'path': '/ACL_TABLE/EVERFLOWV6/ports/0'}, + {'op': 'remove', 'path': '/ACL_TABLE/EVERFLOW/ports/0'}, + {'op': 'remove', 'path': '/ACL_TABLE/EVERFLOWV6/ports/1'}]) + + def verify(self, op_type, ctokens, ttokens=None, cc_ops=[], tc_ops=[], ex_ops=[]): + """ + cc_ops, tc_ops are used to build the diff object. + diff, op_type, ctokens, ttokens are used to build the move. + move is extended and the result should match ex_ops. + """ + # Arrange + current_config=jsonpatch.JsonPatch(cc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) + target_config=jsonpatch.JsonPatch(tc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) + diff = ps.Diff(current_config, target_config) + move = ps.JsonMove(diff, op_type, ctokens, ttokens) + + # Act + moves = self.extender.extend(move, diff) + + # Assert + self.verify_moves(ex_ops, moves) + + def verify_moves(self, ex_ops, moves): + moves_ops = [list(move.patch)[0] for move in moves] + self.assertCountEqual(ex_ops, moves_ops) + +class TestSortAlgorithmFactory(unittest.TestCase): + def test_dfs_sorter(self): + self.verify(ps.Algorithm.DFS, ps.DfsSorter) + + def test_bfs_sorter(self): + self.verify(ps.Algorithm.BFS, ps.BfsSorter) + + def test_memoization_sorter(self): + self.verify(ps.Algorithm.MEMOIZATION, ps.MemoizationSorter) + + def verify(self, algo, algo_class): + # Arrange + factory = ps.SortAlgorithmFactory(OperationWrapper(), ConfigWrapper(), PathAddressing()) + expected_generators = [ps.LowLevelMoveGenerator] + expected_extenders = [ps.UpperLevelMoveExtender, ps.DeleteInsteadOfReplaceMoveExtender, ps.DeleteRefsMoveExtender] + expected_validator = [ps.DeleteWholeConfigMoveValidator, + ps.FullConfigMoveValidator, + ps.NoDependencyMoveValidator, + ps.UniqueLanesMoveValidator, + ps.CreateOnlyMoveValidator] + + # Act + sorter = factory.create(algo) + actual_generators = [type(item) for item in sorter.move_wrapper.move_generators] + actual_extenders = [type(item) for item in sorter.move_wrapper.move_extenders] + actual_validators = [type(item) for item in sorter.move_wrapper.move_validators] + + # Assert + self.assertIsInstance(sorter, algo_class) + self.assertCountEqual(expected_generators, actual_generators) + self.assertCountEqual(expected_extenders, actual_extenders) + self.assertCountEqual(expected_validator, actual_validators) + +class TestPatchSorter(unittest.TestCase): + def create_patch_sorter(self, config=None): + if config is None: + config=Files.CROPPED_CONFIG_DB_AS_JSON + config_wrapper = ConfigWrapper() + config_wrapper.get_config_db_as_json = MagicMock(return_value=config) + patch_wrapper = PatchWrapper(config_wrapper) + operation_wrapper = OperationWrapper() + path_addressing= ps.PathAddressing() + sort_algorithm_factory = ps.SortAlgorithmFactory(operation_wrapper, config_wrapper, path_addressing) + + return ps.PatchSorter(config_wrapper, patch_wrapper, sort_algorithm_factory) + + def test_sort__empty_patch__returns_empty_changes_list(self): + # Arrange + patch = jsonpatch.JsonPatch([]) + expected = [] + + # Act + actual = self.create_patch_sorter().sort(patch) + + # Assert + self.assertCountEqual(expected, actual) + + def test_sort__patch_with_single_simple_operation__returns_one_change(self): + # Arrange + patch = jsonpatch.JsonPatch([{"op":"remove", "path":"/VLAN/Vlan1000/dhcp_servers/0"}]) + expected = [JsonChange(patch)] + + # Act + actual = self.create_patch_sorter().sort(patch) + + # Assert + self.assertCountEqual(expected, actual) + + def test_sort__replacing_create_only_field__success(self): + # Arrange + patch = jsonpatch.JsonPatch([{"op":"replace", "path": "/PORT/Ethernet0/lanes", "value":"67"}]) + + # Act + actual = self.create_patch_sorter(Files.DPB_1_SPLIT_FULL_CONFIG).sort(patch) + + # Assert + self.assertNotEqual(None, actual) + + def test_sort__inter_dependency_within_same_table__success(self): + # Arrange + patch = jsonpatch.JsonPatch([{"op":"add", "path":"/VLAN_INTERFACE", "value": { + "Vlan1000|fc02:1000::1/64": {}, + "Vlan1000|192.168.0.1/21": {}, + "Vlan1000": {} + }}]) + expected = [ + JsonChange(jsonpatch.JsonPatch([{"op": "add", "path": "/VLAN_INTERFACE", "value": {"Vlan1000": {}}}])), + JsonChange(jsonpatch.JsonPatch([{"op": "add", "path": "/VLAN_INTERFACE/Vlan1000|fc02:1000::1~164", "value": {}}])), + JsonChange(jsonpatch.JsonPatch([{"op": "add", "path": "/VLAN_INTERFACE/Vlan1000|192.168.0.1~121", "value": {}}])) + ] + + # Act + actual = self.create_patch_sorter().sort(patch) + + # Assert + self.assertListEqual(expected, actual) + + def test_sort__add_table__success(self): + self.verify(cc_ops=[{"op":"remove", "path":"/ACL_TABLE"}]) + + def test_sort__remove_table__success(self): + self.verify(tc_ops=[{"op":"remove", "path":"/ACL_TABLE"}]) + + def test_sort__modify_value_in_existing_table__success(self): + self.verify(tc_ops=[{"op":"replace", "path":"/ACL_TABLE/EVERFLOW/stage", "value":"egress"}]) + + def test_sort__modify_value_in_existing_array__success(self): + self.verify(tc_ops=[{"op":"replace", "path":"/ACL_TABLE/EVERFLOWV6/ports/0", "value":"Ethernet0"}]) + + def test_sort__add_value_to_existing_array__success(self): + self.verify(tc_ops=[{"op":"add", "path":"/ACL_TABLE/EVERFLOWV6/ports/0", "value":"Ethernet0"}]) + + def test_sort__add_new_key_to_existing_table__success(self): + self.verify(cc_ops=[{"op":"remove", "path":"/ACL_TABLE/EVERFLOWV6"}]) + + def test_sort__remove_2_items_with_dependency_from_different_tables__success(self): + self.verify(tc_ops=[{"op":"remove", "path":"/PORT/Ethernet0"}, + {"op":"remove", "path":"/VLAN_MEMBER/Vlan1000|Ethernet0"}, + {"op":"remove", "path":"/ACL_TABLE/NO-NSW-PACL-V4"}], # removing ACL from current and target + cc_ops=[{"op":"remove", "path":"/ACL_TABLE/NO-NSW-PACL-V4"}]) + + def test_sort__add_2_items_with_dependency_from_different_tables__success(self): + self.verify(tc_ops=[{"op":"remove", "path":"/ACL_TABLE/NO-NSW-PACL-V4"}], # removing ACL from current and target + cc_ops=[{"op":"remove", "path":"/PORT/Ethernet0"}, + {"op":"remove", "path":"/VLAN_MEMBER/Vlan1000|Ethernet0"}, + {"op":"remove", "path":"/ACL_TABLE/NO-NSW-PACL-V4"}]) + + def test_sort__remove_2_items_with_dependency_from_same_table__success(self): + self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_INTERFACE}, + {"op":"remove", "path":"/INTERFACE/Ethernet8"}, + {"op":"remove", "path":"/INTERFACE/Ethernet8|10.0.0.1~130"}], + cc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_INTERFACE}]) + + def test_sort__add_2_items_with_dependency_from_same_table__success(self): + self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_INTERFACE}], + cc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_INTERFACE}, + {"op":"remove", "path":"/INTERFACE/Ethernet8"}, + {"op":"remove", "path":"/INTERFACE/Ethernet8|10.0.0.1~130"}]) + + def test_sort__replace_mandatory_item__success(self): + self.verify(tc_ops=[{"op":"replace", "path":"/ACL_TABLE/EVERFLOWV6/type", "value":"L2"}]) + + def test_sort__dpb_1_to_4__success(self): + self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.DPB_4_SPLITS_FULL_CONFIG}], + cc_ops=[{"op":"replace", "path":"", "value":Files.DPB_1_SPLIT_FULL_CONFIG}]) + + def test_sort__dpb_4_to_1__success(self): + self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.DPB_1_SPLIT_FULL_CONFIG}], + cc_ops=[{"op":"replace", "path":"", "value":Files.DPB_4_SPLITS_FULL_CONFIG}]) + + def test_sort__remove_an_item_with_default_value__success(self): + self.verify(tc_ops=[{"op":"remove", "path":"/ACL_TABLE/EVERFLOW/stage"}]) + + def test_sort__modify_items_with_dependencies_using_must__success(self): + self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_CRM}, + {"op":"replace", "path":"/CRM/Config/acl_counter_high_threshold", "value":"60"}, + {"op":"replace", "path":"/CRM/Config/acl_counter_low_threshold", "value":"50"}], + cc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_CRM}]) + + # in the following example, it is possible to start with acl_counter_high_threshold + self.verify(tc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_CRM}, + {"op":"replace", "path":"/CRM/Config/acl_counter_high_threshold", "value":"80"}, + {"op":"replace", "path":"/CRM/Config/acl_counter_low_threshold", "value":"60"}], + cc_ops=[{"op":"replace", "path":"", "value":Files.CONFIG_DB_WITH_CRM}]) + + def verify(self, cc_ops=[], tc_ops=[]): + # Arrange + config_wrapper=ConfigWrapper() + target_config=jsonpatch.JsonPatch(tc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) + current_config=jsonpatch.JsonPatch(cc_ops).apply(Files.CROPPED_CONFIG_DB_AS_JSON) + patch=jsonpatch.make_patch(current_config, target_config) + + # Act + actual = self.create_patch_sorter(current_config).sort(patch) + + # Assert + simulated_config = current_config + for move in actual: + simulated_config = move.apply(simulated_config) + self.assertTrue(config_wrapper.validate_config_db_config(simulated_config)) + self.assertEqual(target_config, simulated_config) From 16fa940646bd54f79f2c1c16a9b6f8bd52c4bdb1 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Thu, 19 Aug 2021 03:25:10 +0000 Subject: [PATCH 28/60] generate_dump updated Signed-off-by: Vivek Reddy Karri --- scripts/generate_dump | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/scripts/generate_dump b/scripts/generate_dump index 302571068f..713561e987 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -39,6 +39,14 @@ USER=${USER:-root} TIMEOUT_MIN="5" SKIP_BCMCMD=0 +handle_signal() +{ + echo "Generate Dump received interrupt" >&2 + $RM $V -rf $TARDIR + exit 1 +} +trap 'handle_signal' SIGINT + save_bcmcmd() { local start_t=$(date +%s%3N) local end_t=0 @@ -1010,14 +1018,6 @@ save_counter_snapshot() { save_cmd_all_ns "ifconfig -a" "ifconfig.counters_$idx" } -handle_error() { - if [ "$1" != "0" ]; then - echo "ERR: RC:-$1 observed on line $2" >&2 - RETURN_CODE=1 - fi - return $1 -} - ############################################################################### # Main generate_dump routine # Globals: @@ -1208,11 +1208,11 @@ main() { echo "WARNING: gzip operation appears to have failed." >&2 fi fi - + # Invoke the TechSupport Cleanup Hook setsid $(echo > /tmp/techsupport_cleanup.log; python3 /usr/local/bin/techsupport_cleanup.py ${TARFILE} &>> /tmp/techsupport_cleanup.log) & - + echo ${TARFILE} } @@ -1273,7 +1273,7 @@ OPTIONS EOF } -while getopts ":xnvhzas:t:" opt; do +while getopts ":xnvhzas:t:" opt; do case $opt in x) # enable bash debugging From 556756b410d4d1ac64f1dd3361945d0fc3ccb484 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Thu, 19 Aug 2021 03:27:32 +0000 Subject: [PATCH 29/60] setup.py updated Signed-off-by: Vivek Reddy Karri --- setup.py | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/setup.py b/setup.py index 7d89b643d4..d0eb2febcc 100644 --- a/setup.py +++ b/setup.py @@ -5,21 +5,9 @@ # under scripts/. Consider stop using scripts and use console_scripts instead # # https://stackoverflow.com/questions/18787036/difference-between-entry-points-console-scripts-and-scripts-in-setup-py -import fastentrypoints, sys +import fastentrypoints from setuptools import setup -from setuptools.command.test import test as TestCommand - -class PyTest(TestCommand): - user_options = [("pytest-args=", "a", "Arguments to pass to pytest")] - def initialize_options(self): - TestCommand.initialize_options(self) - self.pytest_args = "" - def run_tests(self): - import shlex - import pytest - errno = pytest.main(shlex.split(self.pytest_args)) - sys.exit(errno) setup( name='sonic-utilities', @@ -232,6 +220,5 @@ def run_tests(self): 'Topic :: Utilities', ], keywords='sonic SONiC utilities command line cli CLI', - cmdclass={"pytest": PyTest}, test_suite='setup.get_test_suite' ) From c08644f73392428887e31ef43287e78bfbfbd06d Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Fri, 20 Aug 2021 00:20:02 +0000 Subject: [PATCH 30/60] Beautifier changes Signed-off-by: Vivek Reddy Karri --- config/plugins/auto_techsupport.py | 80 +-------------------- scripts/coredump_gen_handler.py | 10 +-- show/plugins/auto_techsupport.py | 61 ++++++++-------- tests/coredump_gen_handler_test.py | 11 +-- tests/techsupport_cleanup_test.py | 4 +- utilities_common/auto_techsupport_helper.py | 11 +-- 6 files changed, 51 insertions(+), 126 deletions(-) diff --git a/config/plugins/auto_techsupport.py b/config/plugins/auto_techsupport.py index 7db3bf7fa5..7299c40e21 100644 --- a/config/plugins/auto_techsupport.py +++ b/config/plugins/auto_techsupport.py @@ -125,41 +125,6 @@ def clear_list_entry_validated(db, table, key, attr): update_entry_validated(db, table, key, {attr: None}) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @click.group(name="auto-techsupport", cls=clicommon.AliasedGroup) def AUTO_TECHSUPPORT(): @@ -168,8 +133,6 @@ def AUTO_TECHSUPPORT(): pass - - @AUTO_TECHSUPPORT.group(name="global", cls=clicommon.AliasedGroup) @clicommon.pass_db @@ -179,10 +142,7 @@ def AUTO_TECHSUPPORT_global(db): pass - - @AUTO_TECHSUPPORT_global.command(name="auto-invoke-ts") - @click.argument( "auto-invoke-ts", nargs=1, @@ -203,9 +163,7 @@ def AUTO_TECHSUPPORT_global_auto_invoke_ts(db, auto_invoke_ts): exit_with_error(f"Error: {err}", fg="red") - @AUTO_TECHSUPPORT_global.command(name="coredump-cleanup") - @click.argument( "coredump-cleanup", nargs=1, @@ -226,9 +184,7 @@ def AUTO_TECHSUPPORT_global_coredump_cleanup(db, coredump_cleanup): exit_with_error(f"Error: {err}", fg="red") - @AUTO_TECHSUPPORT_global.command(name="techsupport-cleanup") - @click.argument( "techsupport-cleanup", nargs=1, @@ -249,9 +205,7 @@ def AUTO_TECHSUPPORT_global_techsupport_cleanup(db, techsupport_cleanup): exit_with_error(f"Error: {err}", fg="red") - @AUTO_TECHSUPPORT_global.command(name="cooloff") - @click.argument( "cooloff", nargs=1, @@ -272,9 +226,7 @@ def AUTO_TECHSUPPORT_global_cooloff(db, cooloff): exit_with_error(f"Error: {err}", fg="red") - @AUTO_TECHSUPPORT_global.command(name="max-techsupport-size") - @click.argument( "max-techsupport-size", nargs=1, @@ -295,9 +247,7 @@ def AUTO_TECHSUPPORT_global_max_techsupport_size(db, max_techsupport_size): exit_with_error(f"Error: {err}", fg="red") - @AUTO_TECHSUPPORT_global.command(name="core-usage") - @click.argument( "core-usage", nargs=1, @@ -318,9 +268,7 @@ def AUTO_TECHSUPPORT_global_core_usage(db, core_usage): exit_with_error(f"Error: {err}", fg="red") - @AUTO_TECHSUPPORT_global.command(name="since") - @click.argument( "since", nargs=1, @@ -341,34 +289,8 @@ def AUTO_TECHSUPPORT_global_since(db, since): exit_with_error(f"Error: {err}", fg="red") - - - - - - - - - - - - - - - - - - - - - - - - - - def register(cli): cli_node = AUTO_TECHSUPPORT if cli_node.name in cli.commands: raise Exception(f"{cli_node.name} already exists in CLI") - cli.add_command(AUTO_TECHSUPPORT) \ No newline at end of file + cli.add_command(AUTO_TECHSUPPORT) diff --git a/scripts/coredump_gen_handler.py b/scripts/coredump_gen_handler.py index 2bbfa58815..5f6d1a2d02 100644 --- a/scripts/coredump_gen_handler.py +++ b/scripts/coredump_gen_handler.py @@ -31,13 +31,14 @@ def handle_coredump_cleanup(dump_name, db): syslog.syslog(syslog.LOG_INFO, "No Cleanup is performed, current size occupied: {}".format(pretty_size(num_bytes))) return - cleanup_process(core_usage, CORE_DUMP_PTRN, CORE_DUMP_DIR) + cleanup_process(core_usage, CORE_DUMP_PTRN, CORE_DUMP_DIR) class CriticalProcCoreDumpHandle(): """ Class to handle coredump creation event for critical processes """ + def __init__(self, core_name, db): self.core_name = core_name self.db = db @@ -71,12 +72,12 @@ def handle_core_dump_creation_event(self): try: global_cooloff = float(global_cooloff) - except: + except BaseException: global_cooloff = 0.0 try: proc_cooloff = float(proc_cooloff) - except: + except BaseException: proc_cooloff = 0.0 cooloff_passed = self.verify_cooloff(global_cooloff, proc_cooloff, process_name) @@ -181,5 +182,6 @@ def main(): cls.handle_core_dump_creation_event() handle_coredump_cleanup(args.name, db) + if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/show/plugins/auto_techsupport.py b/show/plugins/auto_techsupport.py index 18b7b2954d..58a9b1a9f3 100644 --- a/show/plugins/auto_techsupport.py +++ b/show/plugins/auto_techsupport.py @@ -42,7 +42,6 @@ def format_group_value(entry, attrs): return tabulate.tabulate(data, tablefmt="plain") - @click.group(name="auto-techsupport", cls=clicommon.AliasedGroup) def AUTO_TECHSUPPORT(): @@ -68,35 +67,35 @@ def AUTO_TECHSUPPORT_global(db): table = db.cfgdb.get_table("AUTO_TECHSUPPORT") entry = table.get("global", {}) row = [ - format_attr_value( - entry, - {'name': 'auto_invoke_ts', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} - ), - format_attr_value( - entry, - {'name': 'coredump_cleanup', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} - ), - format_attr_value( - entry, - {'name': 'techsupport_cleanup', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} - ), - format_attr_value( - entry, - {'name': 'cooloff', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} - ), - format_attr_value( - entry, - {'name': 'max_techsupport_size', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} - ), - format_attr_value( - entry, - {'name': 'core_usage', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} - ), - format_attr_value( - entry, - {'name': 'since', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} - ), -] + format_attr_value( + entry, + {'name': 'auto_invoke_ts', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'coredump_cleanup', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'techsupport_cleanup', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'cooloff', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'max_techsupport_size', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'core_usage', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'since', 'description': '', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + ] body.append(row) click.echo(tabulate.tabulate(body, header)) @@ -117,4 +116,4 @@ def register(cli): cli_node = AUTO_TECHSUPPORT if cli_node.name in cli.commands: raise Exception(f"{cli_node.name} already exists in CLI") - cli.add_command(AUTO_TECHSUPPORT) \ No newline at end of file + cli.add_command(AUTO_TECHSUPPORT) diff --git a/tests/coredump_gen_handler_test.py b/tests/coredump_gen_handler_test.py index af777ae919..e05d03432a 100644 --- a/tests/coredump_gen_handler_test.py +++ b/tests/coredump_gen_handler_test.py @@ -1,4 +1,5 @@ -import os, time +import os +import time import sys import pyfakefs import unittest @@ -13,10 +14,10 @@ def set_auto_ts_cfg(redis_mock, auto_invoke_ts="disabled", - core_cleanup="disabled", - cooloff="0", - core_usage="0", - since_cfg="None"): + core_cleanup="disabled", + cooloff="0", + core_usage="0", + since_cfg="None"): redis_mock.set(cdump_mod.CFG_DB, cdump_mod.AUTO_TS, cdump_mod.CFG_INVOC_TS, auto_invoke_ts) redis_mock.set(cdump_mod.CFG_DB, cdump_mod.AUTO_TS, cdump_mod.COOLOFF, cooloff) redis_mock.set(cdump_mod.CFG_DB, cdump_mod.AUTO_TS, cdump_mod.CFG_CORE_USAGE, core_usage) diff --git a/tests/techsupport_cleanup_test.py b/tests/techsupport_cleanup_test.py index 1fc82336c8..4f5a66965f 100644 --- a/tests/techsupport_cleanup_test.py +++ b/tests/techsupport_cleanup_test.py @@ -1,3 +1,4 @@ +import techsupport_cleanup as ts_mod import os import sys import pyfakefs @@ -9,7 +10,6 @@ from .mock_tables import dbconnector sys.path.append("scripts") -import techsupport_cleanup as ts_mod def set_auto_ts_cfg(redis_mock, ts_cleanup="disabled", max_ts="0"): @@ -105,4 +105,4 @@ def test_state_db_update(self): assert "sonic_dump_random3.tar.gz" in current_fs final_state = redis_mock.get_all(ts_mod.STATE_DB, ts_mod.TS_MAP) assert "sonic_dump_random2.tar.gz" in final_state - assert "sonic_dump_random1.tar.gz" not in final_state \ No newline at end of file + assert "sonic_dump_random1.tar.gz" not in final_state diff --git a/utilities_common/auto_techsupport_helper.py b/utilities_common/auto_techsupport_helper.py index 456be71cc4..f8983aea95 100644 --- a/utilities_common/auto_techsupport_helper.py +++ b/utilities_common/auto_techsupport_helper.py @@ -1,4 +1,5 @@ -import os, re +import os +import re import sys import glob import time @@ -56,7 +57,7 @@ SLEEP_FOR = 5 -##### Helper methods +# Helper methods def subprocess_exec(cmd, env=None): output = subprocess.run( cmd, @@ -81,7 +82,7 @@ def verify_recent_file_creation(file_path, in_last_sec=TIME_BUF): curr = time.time() try: was_created_on = os.path.getmtime(file_path) - except: + except BaseException: return False if curr - was_created_on < in_last_sec: return True @@ -141,7 +142,7 @@ def cleanup_process(limit, file_ptrn, dir): fs_stats, curr_size = get_stats(os.path.join(dir, file_ptrn)) orig_dumps = len(fs_stats) disk_stats = shutil.disk_usage(dir) - max_limit_bytes = math.floor((limit*disk_stats.total/100)) + max_limit_bytes = math.floor((limit * disk_stats.total / 100)) if curr_size <= max_limit_bytes: return @@ -159,4 +160,4 @@ def cleanup_process(limit, file_ptrn, dir): continue num_deleted += stat[1] syslog.syslog(syslog.LOG_INFO, "{} deleted from {}".format(pretty_size(num_deleted), dir)) - return removed_files \ No newline at end of file + return removed_files From f4071714f5f313491c4dfec19ac2f3b759ee8781 Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Fri, 20 Aug 2021 00:50:52 +0000 Subject: [PATCH 31/60] Minor Changes Signed-off-by: Vivek Reddy Karri --- scripts/coredump_gen_handler.py | 13 +++++++++++-- scripts/techsupport_cleanup.py | 7 +++++-- utilities_common/auto_techsupport_helper.py | 12 ++++++------ 3 files changed, 22 insertions(+), 10 deletions(-) diff --git a/scripts/coredump_gen_handler.py b/scripts/coredump_gen_handler.py index 5f6d1a2d02..7f316dd641 100644 --- a/scripts/coredump_gen_handler.py +++ b/scripts/coredump_gen_handler.py @@ -17,7 +17,15 @@ def handle_coredump_cleanup(dump_name, db): + file_path = os.path.join(CORE_DUMP_DIR, dump_name) + if not verify_recent_file_creation(file_path): + return + + _, num_bytes = get_stats(os.path.join(CORE_DUMP_DIR, CORE_DUMP_PTRN)) + if db.get(CFG_DB, AUTO_TS, CFG_CORE_CLEANUP) != "enabled": + msg = "coredump_cleanup is disabled. No cleanup is performed. current size occupied : {}" + syslog.syslog(syslog.LOG_NOTICE, msg.format(pretty_size(num_bytes))) return core_usage = db.get(CFG_DB, AUTO_TS, CFG_CORE_USAGE) @@ -27,8 +35,8 @@ def handle_coredump_cleanup(dump_name, db): core_usage = 0.0 if not core_usage: - _, num_bytes = get_stats(os.path.join(CORE_DUMP_DIR, CORE_DUMP_PTRN)) - syslog.syslog(syslog.LOG_INFO, "No Cleanup is performed, current size occupied: {}".format(pretty_size(num_bytes))) + msg = "core-usage argument is not set. No cleanup is performed, current size occupied: {}" + syslog.syslog(syslog.LOG_NOTICE, msg.format(pretty_size(num_bytes))) return cleanup_process(core_usage, CORE_DUMP_PTRN, CORE_DUMP_DIR) @@ -53,6 +61,7 @@ def handle_core_dump_creation_event(self): return if self.db.get(CFG_DB, AUTO_TS, CFG_INVOC_TS) != "enabled": + syslog.syslog(syslog.LOG_NOTICE, "auto_invoke_ts is disabled. No cleanup is performed: core {}".format(self.core_name)) return container_name, process_name = self.fetch_exit_event() diff --git a/scripts/techsupport_cleanup.py b/scripts/techsupport_cleanup.py index 9fa30a698a..aa0ca34606 100644 --- a/scripts/techsupport_cleanup.py +++ b/scripts/techsupport_cleanup.py @@ -28,8 +28,11 @@ def handle_techsupport_creation_event(dump_name, db): if not verify_recent_file_creation(file_path): return curr_list = get_ts_dumps() + _ , num_bytes = get_stats(os.path.join(TS_DIR, TS_PTRN)) if db.get(CFG_DB, AUTO_TS, CFG_TS_CLEANUP) != "enabled": + msg = "techsupport_cleanup is disabled. No cleanup is performed. current size occupied : {}" + syslog.syslog(syslog.LOG_NOTICE, msg.format(pretty_size(num_bytes))) return max_ts = db.get(CFG_DB, AUTO_TS, CFG_MAX_TS) @@ -39,8 +42,8 @@ def handle_techsupport_creation_event(dump_name, db): max_ts = 0.0 if not max_ts: - _ , num_bytes = get_stats(os.path.join(TS_DIR, TS_PTRN)) - syslog.syslog(syslog.LOG_INFO, "No Cleanup is performed, current size occupied: {}".format(pretty_size(num_bytes))) + msg = "max-techsupport-size argument is not set. No cleanup is performed, current size occupied: {}" + syslog.syslog(syslog.LOG_NOTICE, msg.format(pretty_size(num_bytes))) return removed_files = cleanup_process(max_ts, TS_PTRN, TS_DIR) diff --git a/utilities_common/auto_techsupport_helper.py b/utilities_common/auto_techsupport_helper.py index f8983aea95..d844c560a9 100644 --- a/utilities_common/auto_techsupport_helper.py +++ b/utilities_common/auto_techsupport_helper.py @@ -40,14 +40,14 @@ sonic_dump_sonic_20210405_202756 = python3.1617684247.17.core;1617684249;snmp-subagent """ -CRITICAL_PROC = "AUTO_TECHSUPPORT|PROC_EXIT_EVENTS" +CRITICAL_PROC = "AUTO_TECHSUPPORT|FEATURE_PROC_INFO" """ -key = "AUTO_TECHSUPPORT|PROC_EXIT_EVENTS" - = +key = "AUTO_TECHSUPPORT|FEATURE_PROC_INFO" + = Eg: - = "swss;orchagent" - = "snmp;snmp-subagent" - = "lldp;lldp_syncd" + = + = + = """ TIME_BUF = 20 From b2ff906bef89ae1bf93472b3e7ba11f2d213d1de Mon Sep 17 00:00:00 2001 From: Vivek Reddy Karri Date: Tue, 31 Aug 2021 00:46:45 +0000 Subject: [PATCH 32/60] Handled comments Signed-off-by: Vivek Reddy Karri --- scripts/coredump_gen_handler.py | 3 +- tests/coredump_gen_handler_test.py | 46 ++++++++++----------- utilities_common/auto_techsupport_helper.py | 31 ++++++++++---- 3 files changed, 47 insertions(+), 33 deletions(-) diff --git a/scripts/coredump_gen_handler.py b/scripts/coredump_gen_handler.py index 7f316dd641..790d6ea718 100644 --- a/scripts/coredump_gen_handler.py +++ b/scripts/coredump_gen_handler.py @@ -77,7 +77,7 @@ def handle_core_dump_creation_event(self): return global_cooloff = self.db.get(CFG_DB, AUTO_TS, COOLOFF) - proc_cooloff = self.db.get(CFG_DB, FEATURE_KEY, COOLOFF) + proc_cooloff = self.db.get(CFG_DB, AUTO_TS_RATE_INTV, container_name) try: global_cooloff = float(global_cooloff) @@ -133,7 +133,6 @@ def verify_cooloff(self, global_cooloff, proc_cooloff, proc): ts_map = self.db.get_all(STATE_DB, TS_MAP) self.parse_ts_map(ts_map) - print(self.core_ts_map) if proc_cooloff and proc in self.core_ts_map: last_creation_time = self.core_ts_map[proc][0][0] if time.time() - last_creation_time < proc_cooloff: diff --git a/tests/coredump_gen_handler_test.py b/tests/coredump_gen_handler_test.py index e05d03432a..224ba07ebe 100644 --- a/tests/coredump_gen_handler_test.py +++ b/tests/coredump_gen_handler_test.py @@ -15,19 +15,19 @@ def set_auto_ts_cfg(redis_mock, auto_invoke_ts="disabled", core_cleanup="disabled", - cooloff="0", - core_usage="0", + rate_limit_interval="0", + max_core_size="0", since_cfg="None"): redis_mock.set(cdump_mod.CFG_DB, cdump_mod.AUTO_TS, cdump_mod.CFG_INVOC_TS, auto_invoke_ts) - redis_mock.set(cdump_mod.CFG_DB, cdump_mod.AUTO_TS, cdump_mod.COOLOFF, cooloff) - redis_mock.set(cdump_mod.CFG_DB, cdump_mod.AUTO_TS, cdump_mod.CFG_CORE_USAGE, core_usage) + redis_mock.set(cdump_mod.CFG_DB, cdump_mod.AUTO_TS, cdump_mod.COOLOFF, rate_limit_interval) + redis_mock.set(cdump_mod.CFG_DB, cdump_mod.AUTO_TS, cdump_mod.CFG_CORE_USAGE, max_core_size) redis_mock.set(cdump_mod.CFG_DB, cdump_mod.AUTO_TS, cdump_mod.CFG_CORE_CLEANUP, core_cleanup) redis_mock.set(cdump_mod.CFG_DB, cdump_mod.AUTO_TS, cdump_mod.CFG_SINCE, since_cfg) -def set_feature_table_cfg(redis_mock, ts="disabled", cooloff="0", container_name="swss"): +def set_feature_table_cfg(redis_mock, ts="disabled", rate_limit_interval="0", container_name="swss"): redis_mock.set(cdump_mod.CFG_DB, cdump_mod.FEATURE.format(container_name), cdump_mod.TS, ts) - redis_mock.set(cdump_mod.CFG_DB, cdump_mod.FEATURE.format(container_name), cdump_mod.COOLOFF, cooloff) + redis_mock.set(cdump_mod.CFG_DB, cdump_mod.AUTO_TS_RATE_INTV, container_name, rate_limit_interval) def populate_state_db(redis_mock, @@ -48,7 +48,7 @@ def setUp(self): def test_invoc_ts_state_db_update(self): """ - Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled and no cooloff is provided + Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled and no rate_limit_interval is provided Check if techsupport is invoked, file is created and State DB is updated """ db_wrap = Db() @@ -80,14 +80,14 @@ def mock_cmd(cmd): assert "sonic_dump_random3.tar.gz" in final_state assert "orchagent" in final_state["sonic_dump_random3.tar.gz"] - def test_global_cooloff(self): + def test_global_rate_limit_interval(self): """ Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is enabled - Global cooloff is not passed yet. Check if techsupport isn't invoked. + Global rate_limit_interval is not passed yet. Check if techsupport isn't invoked. """ db_wrap = Db() redis_mock = db_wrap.db - set_auto_ts_cfg(redis_mock, auto_invoke_ts="enabled", cooloff="1") + set_auto_ts_cfg(redis_mock, auto_invoke_ts="enabled", rate_limit_interval="1") set_feature_table_cfg(redis_mock, ts="enabled") populate_state_db(redis_mock) with Patcher() as patcher: @@ -113,15 +113,15 @@ def mock_cmd(cmd): assert "sonic_dump_random2.tar.gz" in final_state assert "sonic_dump_random3.tar.gz" not in final_state - def test_per_proc_cooloff(self): + def test_per_proc_rate_limit_interval(self): """ Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled. Global Cooloff is passed - But Per Proc cooloff is not passed yet. Check if techsupport isn't invoked + But Per Proc rate_limit_interval is not passed yet. Check if techsupport isn't invoked """ db_wrap = Db() redis_mock = db_wrap.db - set_auto_ts_cfg(redis_mock, auto_invoke_ts="enabled", cooloff="0.25") - set_feature_table_cfg(redis_mock, ts="enabled", cooloff="10") + set_auto_ts_cfg(redis_mock, auto_invoke_ts="enabled", rate_limit_interval="0.25") + set_feature_table_cfg(redis_mock, ts="enabled", rate_limit_interval="10") populate_state_db(redis_mock, ts_map={"sonic_dump_random1.tar.gz": "orchagent;{};orchagent".format(int(time.time()))}) with Patcher() as patcher: @@ -136,7 +136,7 @@ def mock_cmd(cmd): patcher.fs.create_file("/var/dump/sonic_dump_random1.tar.gz") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz", redis_mock) - time.sleep(0.25) # wait for global cooloff to pass + time.sleep(0.25) # wait for global rate_limit_interval to pass cls.handle_core_dump_creation_event() assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random3.tar.gz" not in os.listdir(cdump_mod.TS_DIR) @@ -144,15 +144,15 @@ def mock_cmd(cmd): assert "sonic_dump_random1.tar.gz" in final_state assert "sonic_dump_random3.tar.gz" not in final_state - def test_invoc_ts_after_cooloff(self): + def test_invoc_ts_after_rate_limit_interval(self): """ Scenario: CFG_INVOC_TS is enabled. CFG_CORE_CLEANUP is disabled. - All the cooloff's are passed. Check if techsupport is invoked + All the rate_limit_interval's are passed. Check if techsupport is invoked """ db_wrap = Db() redis_mock = db_wrap.db - set_auto_ts_cfg(redis_mock, auto_invoke_ts="enabled", cooloff="0.1") - set_feature_table_cfg(redis_mock, ts="enabled", cooloff="0.25") + set_auto_ts_cfg(redis_mock, auto_invoke_ts="enabled", rate_limit_interval="0.1") + set_feature_table_cfg(redis_mock, ts="enabled", rate_limit_interval="0.25") populate_state_db(redis_mock, ts_map={"sonic_dump_random1.tar.gz": "orchagent;{};orchagent".format(int(time.time()))}) with Patcher() as patcher: @@ -168,7 +168,7 @@ def mock_cmd(cmd): patcher.fs.create_file("/var/dump/sonic_dump_random2.tar.gz") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz") cls = cdump_mod.CriticalProcCoreDumpHandle("orchagent.12345.123.core.gz", redis_mock) - time.sleep(0.25) # wait for all the cooloff's to pass + time.sleep(0.25) # wait for all the rate_limit_interval's to pass cls.handle_core_dump_creation_event() assert "sonic_dump_random1.tar.gz" in os.listdir(cdump_mod.TS_DIR) assert "sonic_dump_random3.tar.gz" in os.listdir(cdump_mod.TS_DIR) @@ -337,7 +337,7 @@ def test_core_dump_cleanup(self): """ db_wrap = Db() redis_mock = db_wrap.db - set_auto_ts_cfg(redis_mock, core_cleanup="enabled", core_usage="6.0") + set_auto_ts_cfg(redis_mock, core_cleanup="enabled", max_core_size="6.0") with Patcher() as patcher: patcher.fs.set_disk_usage(1000, path="/var/core/") patcher.fs.create_file("/var/core/orchagent.12345.123.core.gz", st_size=25) @@ -350,14 +350,14 @@ def test_core_dump_cleanup(self): assert "lldpmgrd.12345.22.core.gz" in current_fs assert "python3.12345.21.core.gz" in current_fs - def test_core_usage_limit_not_crossed(self): + def test_max_core_size_limit_not_crossed(self): """ Scenario: CFG_CORE_CLEANUP is enabled. core-dump limit is crossed Verify Whether is cleanup is performed """ db_wrap = Db() redis_mock = db_wrap.db - set_auto_ts_cfg(redis_mock, core_cleanup="enabled", core_usage="5.0") + set_auto_ts_cfg(redis_mock, core_cleanup="enabled", max_core_size="5.0") with Patcher() as patcher: def mock_cmd(cmd): cmd_str = " ".join(cmd) diff --git a/utilities_common/auto_techsupport_helper.py b/utilities_common/auto_techsupport_helper.py index d844c560a9..0e489289c2 100644 --- a/utilities_common/auto_techsupport_helper.py +++ b/utilities_common/auto_techsupport_helper.py @@ -9,21 +9,36 @@ import math import syslog +# MISC +CORE_DUMP_DIR = "/var/core" +CORE_DUMP_PTRN = "*.core.gz" + +TS_DIR = "/var/dump" +TS_PTRN = "sonic_dump_*.tar*" + +# CONFIG DB Attributes CFG_DB = "CONFIG_DB" -AUTO_TS = "AUTO_TECHSUPPORT|global" + +# AUTO_TECHSUPPORT|GLOBAL table attributes +AUTO_TS = "AUTO_TECHSUPPORT|GLOBAL" CFG_INVOC_TS = "auto_invoke_ts" CFG_CORE_CLEANUP = "coredump_cleanup" CFG_TS_CLEANUP = "techsupport_cleanup" CFG_MAX_TS = "max_techsupport_size" -COOLOFF = "cooloff" -CFG_CORE_USAGE = "core_usage" +COOLOFF = "rate_limit_interval" +CFG_CORE_USAGE = "max_core_size" CFG_SINCE = "since" -CORE_DUMP_DIR = "/var/core" -CORE_DUMP_PTRN = "*.core.gz" - -TS_DIR = "/var/dump" -TS_PTRN = "sonic_dump_*.tar*" +# AUTO_TECHSUPPORT|RATE_LIMIT_INTERVAL table attributes +AUTO_TS_RATE_INTV = "AUTO_TECHSUPPORT|RATE_INTERVAL" +""" +key = "AUTO_TECHSUPPORT|RATE_INTERVAL" + =