diff --git a/calm/dsl/api/application.py b/calm/dsl/api/application.py index 7227aa64..2edd4af0 100644 --- a/calm/dsl/api/application.py +++ b/calm/dsl/api/application.py @@ -11,6 +11,10 @@ def __init__(self, connection): self.DOWNLOAD_RUNLOG = self.ITEM + "/app_runlogs/{}/output/download" self.ACTION_VARIABLE = self.ITEM + "/actions/{}/variables" self.RECOVERY_GROUPS_LIST = self.ITEM + "/recovery_groups/list" + self.BLUEPRINTS_ORIGINAL = self.ITEM + "/blueprints/original" + self.BLUEPRINT_ENTITIES_ESCRIPT_UPDATE = ( + self.ITEM + "/blueprints/entities/update" + ) def run_action(self, app_id, action_id, payload): return self.connection._call( @@ -20,6 +24,21 @@ def run_action(self, app_id, action_id, payload): method=REQUEST.METHOD.POST, ) + def blueprints_original(self, app_id): + return self.connection._call( + self.BLUEPRINTS_ORIGINAL.format(app_id), + verify=False, + method=REQUEST.METHOD.GET, + ) + + def blueprints_entities_update(self, app_id, payload): + return self.connection._call( + self.BLUEPRINT_ENTITIES_ESCRIPT_UPDATE.format(app_id), + request_json=payload, + verify=False, + method=REQUEST.METHOD.PUT, + ) + def run_patch(self, app_id, patch_id, payload): return self.connection._call( self.PATCH_RUN.format(app_id, patch_id), diff --git a/calm/dsl/builtins/models/calm_ref.py b/calm/dsl/builtins/models/calm_ref.py index c3bac886..9833f03f 100644 --- a/calm/dsl/builtins/models/calm_ref.py +++ b/calm/dsl/builtins/models/calm_ref.py @@ -647,7 +647,14 @@ def compile(cls, name=None, **kwargs): "uuid": cache_cluster_data["uuid"], } else: - cdict = AhvCluster(name).compile() + # set parent as it is used during cluster check for whitelisting + cluster_entity = AhvCluster(name) + try: + cluster_entity.__parent__ = cls.__parent__ + except: + pass + + cdict = cluster_entity.compile() return { "kind": "cluster", "name": cdict["name"], diff --git a/calm/dsl/builtins/models/package.py b/calm/dsl/builtins/models/package.py index 4364758b..f5f34596 100644 --- a/calm/dsl/builtins/models/package.py +++ b/calm/dsl/builtins/models/package.py @@ -147,7 +147,7 @@ def decompile(mcls, cdict, context=[], prefix=""): elif package_type == "SUBSTRATE_IMAGE": disk_pkg_data = { "name": cdict["name"], - "description": cdict["description"], + "description": cdict.get("description", ""), "options": cdict["options"], } types = EntityTypeBase.get_entity_types() diff --git a/calm/dsl/builtins/models/profile.py b/calm/dsl/builtins/models/profile.py index b57474d5..6bcfc4f9 100644 --- a/calm/dsl/builtins/models/profile.py +++ b/calm/dsl/builtins/models/profile.py @@ -38,6 +38,18 @@ def pre_decompile(mcls, cdict, context, prefix=""): {"kind": "environment", "uuid": _eid} for _eid in env_uuids ] + # Dont support decompilation for other providers + configs = {"snapshot_config_list": [], "restore_config_list": []} + for _config in cdict.get("snapshot_config_list", []): + if _config.get("type") == "AHV_SNAPSHOT": + configs["snapshot_config_list"].append(_config) + + for _config in cdict.get("restore_config_list", []): + if _config.get("type") == "AHV_RESTORE": + configs["restore_config_list"].append(_config) + + cdict.update(configs) + return cdict def compile(cls): diff --git a/calm/dsl/builtins/models/substrate.py b/calm/dsl/builtins/models/substrate.py index a6981ff8..49788d8a 100644 --- a/calm/dsl/builtins/models/substrate.py +++ b/calm/dsl/builtins/models/substrate.py @@ -119,7 +119,8 @@ def get_referenced_account_uuid(cls): if cls_deployment.substrate.name != str(cls): continue - environment = getattr(cls_profile, "environment", {}) + profile_envs = getattr(cls_profile, "environments", []) + environment = profile_envs[0].get_dict() if profile_envs else dict() if environment: LOG.debug( "Found environment {} associated to app-profile {}".format( diff --git a/calm/dsl/builtins/models/task.py b/calm/dsl/builtins/models/task.py index 980b16ec..49104eef 100644 --- a/calm/dsl/builtins/models/task.py +++ b/calm/dsl/builtins/models/task.py @@ -13,7 +13,7 @@ from .helper import common as common_helper from calm.dsl.log import get_logging_handle -from calm.dsl.api.handle import get_api_client +from calm.dsl.store import Cache from calm.dsl.builtins.models.ndb import ( DatabaseServer, Database, @@ -22,6 +22,7 @@ Tag, ) from calm.dsl.builtins.models.constants import NutanixDB as NutanixDBConst +from calm.dsl.constants import CACHE LOG = get_logging_handle(__name__) @@ -119,6 +120,25 @@ def decompile(mcls, cdict, context=[], prefix=""): auth_cred, prefix=prefix ) + tunnel_data = attrs.get("tunnel_reference", {}) + if tunnel_data: + if not tunnel_data.get("name"): + cache_vpc_data = Cache.get_entity_data_using_uuid( + CACHE.ENTITY.AHV_VPC, None, tunnel_uuid=tunnel_data["uuid"] + ) + + # Decompile should not fail + if not cache_vpc_data: + LOG.info( + "tunnel(uuid={}) used in task (name={}) not found".format( + tunnel_data["uuid"], cdict["name"] + ) + ) + attrs.pop("tunnel_reference", None) + + else: + tunnel_data["name"] = cache_vpc_data.get("tunnel_name") + cdict["attrs"] = attrs return super().decompile(cdict, context=context, prefix=prefix) @@ -214,8 +234,8 @@ def _exec_create( "Only one of script or filename should be given for exec task " + (name or "") ) - if script_type != "static" and tunnel is not None: - raise ValueError("Tunnel is supported only for Escript script type") + if script_type not in ["static", "static_py3"] and tunnel is not None: + raise ValueError("Tunnel is supported only for Escript script types") if filename is not None: file_path = os.path.join( @@ -276,8 +296,8 @@ def _decision_create( "One of script or filename is required for decision task " + (name or "") ) - if script_type != "static" and tunnel is not None: - raise ValueError("Tunnel is support only for Escript script type") + if script_type not in ["static", "static_py3"] and tunnel is not None: + raise ValueError("Tunnel is support only for Escript script types") params = { "name": name, @@ -438,7 +458,13 @@ def exec_task_ssh( def exec_task_escript( - script=None, filename=None, name=None, target=None, depth=2, tunnel=None, **kwargs + script=None, + filename=None, + name=None, + target=None, + depth=2, + tunnel=None, + **kwargs, ): return _exec_create( "static", @@ -453,6 +479,28 @@ def exec_task_escript( ) +def exec_task_escript_py3( + script=None, + filename=None, + name=None, + target=None, + depth=2, + tunnel=None, + **kwargs, +): + return _exec_create( + "static_py3", + script=script, + filename=filename, + name=name, + target=target, + target_endpoint=None, + depth=depth, + tunnel=tunnel, + **kwargs, + ) + + def exec_task_powershell( script=None, filename=None, @@ -593,7 +641,6 @@ def decision_task_escript( filename=None, name=None, target=None, - cred=None, depth=2, tunnel=None, **kwargs, @@ -605,7 +652,6 @@ def decision_task_escript( filename(str): file which has script name(str): Task name target(Entity/Ref): Entity/Ref that is the target for this task - cred (Entity/Ref): Entity/Ref that is the cred for this task depth (int): Number of times to look back in call stack, will be used to locate filename specified tunnel (ref.Tunnel): Tunnel reference :keyword inherit_target (bool): True if target needs to be inherited. @@ -618,7 +664,40 @@ def decision_task_escript( filename=filename, name=name, target=target, - cred=cred, + depth=depth, + tunnel=tunnel, + **kwargs, + ) + + +def decision_task_escript_py3( + script=None, + filename=None, + name=None, + target=None, + depth=2, + tunnel=None, + **kwargs, +): + """ + This function is used to create decision task with escript(python3) target + Args: + script(str): Script which needs to be run + filename(str): file which has script + name(str): Task name + target(Entity/Ref): Entity/Ref that is the target for this task + depth (int): Number of times to look back in call stack, will be used to locate filename specified + tunnel (ref.Tunnel): Tunnel reference + :keyword inherit_target (bool): True if target needs to be inherited. + Returns: + obj: Decision task object + """ + return _decision_create( + "static_py3", + script=script, + filename=filename, + name=name, + target=target, depth=depth, tunnel=tunnel, **kwargs, @@ -694,7 +773,6 @@ def set_variable_task_escript( filename(str): file which has script name(str): Task name target(Entity/Ref): Entity/Ref that is the target for this task - cred (Entity/Ref): Entity/Ref that is the cred for this task depth (int): Number of times to look back in call stack, will be used to locate filename specified tunnel (Ref.Tunnel): Tunnel reference :keyword inherit_target (bool): True if target needs to be inherited. @@ -713,6 +791,41 @@ def set_variable_task_escript( return _set_variable_create(task, variables) +def set_variable_task_escript_py3( + script=None, + filename=None, + name=None, + target=None, + variables=None, + depth=3, + tunnel=None, + **kwargs, +): + """ + This function is used to create set variable task with escript(python3) target + Args: + script(str): Script which needs to be run + filename(str): file which has script + name(str): Task name + target(Entity/Ref): Entity/Ref that is the target for this task + depth (int): Number of times to look back in call stack, will be used to locate filename specified + tunnel (Ref.Tunnel): Tunnel reference + :keyword inherit_target (bool): True if target needs to be inherited. + Returns: + obj: Set variable task object + """ + task = exec_task_escript_py3( + script=script, + filename=filename, + name=name, + target=target, + depth=depth, + tunnel=tunnel, + **kwargs, + ) + return _set_variable_create(task, variables) + + def set_variable_task_powershell( script=None, filename=None, @@ -750,6 +863,82 @@ def set_variable_task_powershell( return _set_variable_create(task, variables) +class EscriptTaskType: + class ExecTask: + def __new__( + cls, + script=None, + filename=None, + name=None, + target=None, + depth=2, + tunnel=None, + **kwargs, + ): + return exec_task_escript( + script=script, + filename=filename, + name=name, + target=target, + depth=depth + 1, + tunnel=tunnel, + **kwargs, + ) + + py2 = exec_task_escript + py3 = exec_task_escript_py3 + + class DecisionTask: + def __new__( + cls, + script=None, + filename=None, + name=None, + target=None, + depth=2, + tunnel=None, + **kwargs, + ): + return decision_task_escript( + script=script, + filename=filename, + name=name, + target=target, + depth=depth + 1, + tunnel=tunnel, + **kwargs, + ) + + py2 = decision_task_escript + py3 = decision_task_escript_py3 + + class SetVariableTask: + def __new__( + cls, + script=None, + filename=None, + name=None, + target=None, + variables=None, + depth=3, + tunnel=None, + **kwargs, + ): + return set_variable_task_escript( + script=script, + filename=filename, + name=name, + target=target, + variables=variables, + depth=depth + 1, + tunnel=tunnel, + **kwargs, + ) + + py2 = set_variable_task_escript + py3 = set_variable_task_escript_py3 + + def http_task_on_endpoint( method, relative_url=None, @@ -1545,7 +1734,7 @@ def __new__( class SetVariable: ssh = set_variable_task_ssh powershell = set_variable_task_powershell - escript = set_variable_task_escript + escript = EscriptTaskType.SetVariableTask class Delay: def __new__(cls, delay_seconds=None, name=None, target=None): @@ -1563,7 +1752,7 @@ def __new__(cls, *args, **kwargs): ssh = exec_task_ssh powershell = exec_task_powershell - escript = exec_task_escript + escript = EscriptTaskType.ExecTask class ConfigExec: def __new__(cls, config, name=None): @@ -1582,7 +1771,7 @@ def __new__(cls, *args, **kwargs): ssh = decision_task_ssh powershell = decision_task_powershell - escript = decision_task_escript + escript = EscriptTaskType.DecisionTask class Exec: def __new__(cls, *args, **kwargs): @@ -1590,7 +1779,7 @@ def __new__(cls, *args, **kwargs): ssh = exec_task_ssh_runbook powershell = exec_task_powershell_runbook - escript = exec_task_escript + escript = EscriptTaskType.ExecTask class ResourceTypeOperationTask: def __new__( diff --git a/calm/dsl/cli/app_commands.py b/calm/dsl/cli/app_commands.py index 6bace98b..b8c2ef28 100644 --- a/calm/dsl/cli/app_commands.py +++ b/calm/dsl/cli/app_commands.py @@ -2,7 +2,18 @@ from calm.dsl.api import get_api_client -from .main import main, get, describe, delete, run, watch, download, create, update +from .main import ( + main, + get, + describe, + delete, + run, + watch, + download, + create, + update, + decompile, +) from .utils import Display, FeatureFlagGroup from .apps import ( get_apps, @@ -14,6 +25,9 @@ delete_app, download_runlog, create_app, + describe_app_actions_to_update, + decompile_app_migratable_bp, + update_app_migratable_bp, ) from calm.dsl.log import get_logging_handle @@ -104,6 +118,44 @@ def _get_apps(name, filter_by, limit, offset, quiet, all_items, out): get_apps(name, filter_by, limit, offset, quiet, all_items, out) +@describe.command("app-migratable-entities") +@click.argument("app_name") +def _describe_app(app_name): + """Display app-actions to be migrated""" + + describe_app_actions_to_update(app_name) + + +@decompile.command("app-migratable-bp") +@click.option( + "--dir", + "-d", + "bp_dir", + default=None, + help="Blueprint directory location used for placing decompiled entities", +) +@click.argument("app_name") +def _decompile_app_migratable_bp(app_name, bp_dir): + """Decompile app-blueprint app-actions to be migrated""" + + decompile_app_migratable_bp(app_name, bp_dir) + + +@update.command("app-migratable-bp") +@click.option( + "--file", + "-f", + "bp_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + help="Path to Blueprint file", +) +@click.argument("app_name") +def _update_app_migratable_bp(app_name, bp_file): + """update app-blueprint app-actions to be migrated""" + + update_app_migratable_bp(app_name, bp_file) + + @describe.command("app") @click.argument("app_name") @click.option( diff --git a/calm/dsl/cli/apps.py b/calm/dsl/cli/apps.py index b8ef2814..4fa4affd 100644 --- a/calm/dsl/cli/apps.py +++ b/calm/dsl/cli/apps.py @@ -23,6 +23,7 @@ get_app, parse_launch_runtime_vars, parse_launch_params_attribute, + _decompile_bp, ) from calm.dsl.log import get_logging_handle @@ -1563,3 +1564,374 @@ def download_runlog(runlog_id, app_name, file_name): click.echo("Runlogs saved as {}".format(highlight_text(file_name))) else: LOG.error("[{}] - {}".format(err["code"], err["error"])) + + +def update_app_migratable_bp(app_name, bp_file): + """ + Updates app migratable blueprint + """ + + client = get_api_client() + params = {"filter": "name=={}".format(app_name)} + app_name_uuid_map = client.application.get_name_uuid_map(params) + app_uuid = app_name_uuid_map.get(app_name) + if not app_uuid: + LOG.error("Application with name {} not found".format(app_name)) + sys.exit("Invalid app name") + + bp_payload = compile_blueprint(bp_file) + remove_non_escript_actions_variables(bp_payload["spec"]["resources"]) + + client = get_api_client() + res, err = client.application.blueprints_entities_update( + app_id=app_uuid, payload=bp_payload + ) + if err: + LOG.error("Application update failed for app {}".format(app_name)) + sys.exit("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + runlog_uuid = res["status"]["runlog_uuid"] + click.echo( + "Application update is successful. Got Action Runlog uuid: {}".format( + highlight_text(runlog_uuid) + ) + ) + if res["status"].get("message_list", []): + click.echo("Update messages:") + click.echo("\t", nl=False) + click.echo("\n\t".join(res["status"].get("message_list", []))) + + +def decompile_app_migratable_bp(app_name, bp_dir): + + client = get_api_client() + params = {"filter": "name=={}".format(app_name)} + app_name_uuid_map = client.application.get_name_uuid_map(params) + app_uuid = app_name_uuid_map.get(app_name) + if not app_uuid: + LOG.error("Application with name {} not found".format(app_name)) + sys.exit("Invalid app name") + + res, err = client.application.blueprints_original(app_uuid) + if err: + LOG.exception("[{}] - {}".format(err["code"], err["error"])) + sys.exit("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + fix_missing_name_in_reference(res["spec"]) + remove_non_escript_actions_variables(res["spec"]["resources"]) + _decompile_bp(bp_payload=res, with_secrets=False, bp_dir=bp_dir) + + +def fix_missing_name_in_reference(data): + """ + Adds name field in missing references + """ + + uuid_name_map = {} + update_uuid_name_map_from_payload(data, uuid_name_map) + fill_name_in_refs(data, uuid_name_map) + + +def update_uuid_name_map_from_payload(data, uuid_name_map): + if not isinstance(data, dict): + return + + if data.get("name") and data.get("uuid"): + uuid_name_map[data["uuid"]] = data["name"] + + for k, v in data.items(): + if isinstance(v, dict): + if "reference" not in k: + update_uuid_name_map_from_payload(v, uuid_name_map) + + elif isinstance(v, list): + for _lv in v: + update_uuid_name_map_from_payload(_lv, uuid_name_map) + + +def fill_name_in_refs(data, uuid_name_map, force=False): + """ + Fills the name in references using uuid_name_map. + Force parameter is used, if it object is ref object. ex: service_reference etc. + """ + + if not isinstance(data, dict): + return + + if data.get("uuid") and not data.get("name"): + if data["uuid"] in uuid_name_map: + data["name"] = uuid_name_map[data["uuid"]] + elif force: + if data.get("uuid") and data["uuid"] in uuid_name_map: + data["name"] = uuid_name_map[data["uuid"]] + + for k, v in data.items(): + if isinstance(v, dict): + if "reference" in k: + fill_name_in_refs(v, uuid_name_map, True) + else: + fill_name_in_refs(v, uuid_name_map) + + elif isinstance(v, list): + for _lv in v: + fill_name_in_refs(_lv, uuid_name_map) + + +def get_runbook_payload_having_escript_task_vars_only(rb_payload): + escript_tasks = get_escript_tasks_in_runbook(rb_payload) + escript_variables = get_escript_vars_in_entity(rb_payload) + + dag_task = [ + _dag for _dag in rb_payload["task_definition_list"] if _dag["type"] == "DAG" + ] + dag_task = dag_task[0] + task_edges = [] + for ind, _task in enumerate(escript_tasks[1:]): + task_edges.append( + { + "from_task_reference": { + "kind": "app_task", + "name": escript_tasks[ind]["name"], + }, + "to_task_reference": { + "kind": "app_task", + "name": _task["name"], + }, + } + ) + + if escript_tasks[ind].get("uuid"): + task_edges[-1]["from_task_reference"]["uuid"] = escript_tasks[ind]["uuid"] + task_edges[-1]["to_task_reference"]["uuid"] = _task["uuid"] + + child_task_refs = [] + for _task in escript_tasks: + child_task_refs.append({"kind": "app_task", "name": _task["name"]}) + if _task.get("uuid"): + child_task_refs[-1]["uuid"] = _task["uuid"] + + dag_task["child_tasks_local_reference_list"] = child_task_refs + dag_task["attrs"]["edges"] = task_edges + escript_tasks.insert(0, dag_task) + rb_payload["task_definition_list"] = escript_tasks + rb_payload["variable_list"] = escript_variables + + return rb_payload, len(escript_tasks) > 1 or len(escript_variables) > 0 + + +def get_actions_having_escript_entities(action_list): + final_action_list = [] + for _a in action_list: + ( + _escript_runbook, + any_escript_entity_present, + ) = get_runbook_payload_having_escript_task_vars_only(_a["runbook"]) + if not any_escript_entity_present: + continue + + _a["runbook"] = _escript_runbook + final_action_list.append(_a) + + return final_action_list + + +def remove_non_escript_actions_variables(bp_payload): + """ + Remove actions and variables that doesnt escript task/var + """ + + entity_list = [ + "app_profile_list", + "credential_definition_list", + "substrate_definition_list", + "package_definition_list", + "service_definition_list", + ] + for _el in entity_list: + for _e in bp_payload.get(_el, []): + if "action_list" in _e: + _e["action_list"] = get_actions_having_escript_entities( + _e["action_list"] + ) + + if "variable_list" in _e: + _e["variable_list"] = get_escript_vars_in_entity(_e) + + if _el == "package_definition_list" and _e.get("type", "") == "DEB": + for pkg_runbook_name in ["install_runbook", "uninstall_runbook"]: + ( + _e["options"][pkg_runbook_name], + _, + ) = get_runbook_payload_having_escript_task_vars_only( + _e["options"].get(pkg_runbook_name, {}) + ) + + +def get_escript_tasks_in_runbook(runbook_payload): + + task_list = [] + for _task in runbook_payload["task_definition_list"]: + if _task["type"] in ["EXEC", "SET_VARIABLE"] and _task.get("attrs", {}).get( + "script_type", "" + ) in ["static", "static_py3"]: + task_list.append(_task) + return task_list + + +def get_escript_vars_in_entity(runbook_payload): + final_variable_list = [] + for _v in runbook_payload.get("variable_list", []): + if _v.get("options", {}).get("attrs", {}).get("script_type", "") in [ + "static", + "static_py3", + ]: + final_variable_list.append(_v) + return final_variable_list + + +def get_runbook_dependencies(runbook_payload): + + dependencies = [] + for _task in runbook_payload["task_definition_list"]: + if _task["type"] == "CALL_RUNBOOK": + dependencies.append(_task["attrs"]["runbook_reference"]["uuid"]) + return dependencies + + +def describe_app_actions_to_update(app_name): + """Displays blueprint data""" + + DISPLAY_MAP = { + "service_definition_list": "Services", + "substrate_definition_list": "Substrates", + "app_profile_list": "Application Profiles", + "package_definition_list": "Packages", + } + + client = get_api_client() + params = {"filter": "name=={}".format(app_name)} + app_name_uuid_map = client.application.get_name_uuid_map(params) + app_uuid = app_name_uuid_map.get(app_name) + if not app_uuid: + LOG.error("Application with name {} not found".format(app_name)) + sys.exit("Invalid app name") + + res, err = client.application.blueprints_original(app_uuid) + if err: + LOG.exception("[{}] - {}".format(err["code"], err["error"])) + sys.exit("[{}] - {}".format(err["code"], err["error"])) + + res = res.json() + resources = res["status"]["resources"] + + dependencies = {} + runbook_uuid_context = {} + runbook_containing_migratable_entities = [] + for _key in resources.keys(): + if _key in [ + "service_definition_list", + "substrate_definition_list", + "app_profile_list", + "package_definition_list", + ]: + for _entity in resources[_key]: + if ( + _key == "package_definition_list" + and _entity.get("type", "") == "DEB" + ): + install_runbook = _entity["options"]["install_runbook"] + uninstall_runbook = _entity["options"]["uninstall_runbook"] + + _entity["action_list"] = [ + {"name": "install_runbook", "runbook": install_runbook}, + {"name": "uninstall_runbook", "runbook": uninstall_runbook}, + ] + + for _action in _entity.get("action_list", []): + runbook_uuid = _action["runbook"]["uuid"] + runbook_uuid_context[runbook_uuid] = "{}.{}.Action.{}".format( + DISPLAY_MAP[_key], _entity["name"], _action["name"] + ) + dependencies[runbook_uuid] = get_runbook_dependencies( + _action["runbook"] + ) + if get_escript_tasks_in_runbook(_action["runbook"]): + runbook_containing_migratable_entities.append(runbook_uuid) + elif get_escript_vars_in_entity(_action["runbook"]): + runbook_containing_migratable_entities.append(runbook_uuid) + + for _key in resources.keys(): + if _key in [ + "service_definition_list", + "substrate_definition_list", + "app_profile_list", + "package_definition_list", + ]: + print("\n{} [{}]:".format(DISPLAY_MAP[_key], len(resources[_key]))) + for _entity in resources[_key]: + print("\t-> {}".format(highlight_text(_entity["name"]))) + print("\t Actions:") + + any_action_to_be_modified = False + for _action in _entity.get("action_list", []): + + runbook_uuid = _action["runbook"]["uuid"] + has_migratable_entities = ( + runbook_uuid in runbook_containing_migratable_entities + ) + + dependable_migratable_actions = [] + for _run_uuid in dependencies.get(runbook_uuid, []): + if _run_uuid in runbook_containing_migratable_entities: + dependable_migratable_actions.append( + runbook_uuid_context[_run_uuid] + ) + + if has_migratable_entities or dependable_migratable_actions: + any_action_to_be_modified = True + print("\t\t-> {}".format(highlight_text(_action["name"]))) + if has_migratable_entities: + print("\t\t Tasks:") + task_list = get_escript_tasks_in_runbook(_action["runbook"]) + migratable_task_names = [ + _task["name"] for _task in task_list + ] + + if migratable_task_names: + for _ind, _tname in enumerate(migratable_task_names): + print( + "\t\t\t{}. {}".format( + _ind, highlight_text(_tname) + ) + ) + else: + print("\t\t\t No Tasks to be migrated") + + print("\t\t Variables:") + var_list = get_escript_vars_in_entity(_action["runbook"]) + migratable_var_names = [_var["name"] for _var in var_list] + if migratable_var_names: + for _ind, _tname in enumerate(migratable_var_names): + print( + "\t\t\t{}. {}".format( + _ind, highlight_text(_tname) + ) + ) + else: + print("\t\t\t No Variables to be migrated") + + if dependable_migratable_actions: + print("\t\t Dependable actions to be migrated:") + for _ind, _act_ctx in enumerate( + dependable_migratable_actions + ): + print( + "\t\t\t{}. {}".format( + _ind, highlight_text(_act_ctx) + ) + ) + + if not any_action_to_be_modified: + print("\t\t No actions found to be modified") diff --git a/calm/dsl/cli/bps.py b/calm/dsl/cli/bps.py index dc6a5bbe..1d8225d1 100644 --- a/calm/dsl/cli/bps.py +++ b/calm/dsl/cli/bps.py @@ -643,7 +643,7 @@ def _decompile_bp( metadata_obj = MetadataType.decompile(blueprint_metadata) # Copying dsl_name_map to global client_attrs - if bp_payload["spec"]["resources"]["client_attrs"].get("None", {}): + if bp_payload["spec"]["resources"].get("client_attrs", {}).get("None", {}): init_dsl_metadata_map(bp_payload["spec"]["resources"]["client_attrs"]["None"]) LOG.info("Decompiling blueprint {}".format(blueprint_name)) diff --git a/calm/dsl/cli/constants.py b/calm/dsl/cli/constants.py index f478bc2b..5e1636cb 100644 --- a/calm/dsl/cli/constants.py +++ b/calm/dsl/cli/constants.py @@ -96,6 +96,7 @@ class STATES: DISABLED = "DISABLED" DELETED = "DELETED" DRAFT = "DRAFT" + POLICY_EXEC = "POLICY_EXEC" class APPROVAL_REQUEST: @@ -214,6 +215,7 @@ class SCRIPT_TYPES: POWERSHELL = "npsscript" SHELL = "sh" ESCRIPT = "static" + ESCRIPT_PY3 = "static_py3" class STATES: ACTIVE = "ACTIVE" diff --git a/calm/dsl/cli/init_command.py b/calm/dsl/cli/init_command.py index 4f9fcebf..eba1e623 100644 --- a/calm/dsl/cli/init_command.py +++ b/calm/dsl/cli/init_command.py @@ -11,6 +11,7 @@ get_default_db_file, get_default_local_dir, get_default_connection_config, + get_default_log_config, init_context, ) from calm.dsl.db import init_db_handle @@ -22,9 +23,11 @@ from calm.dsl.constants import POLICY, STRATOS, DSL_CONFIG from .main import init, set -from calm.dsl.log import get_logging_handle +from calm.dsl.log import get_logging_handle, CustomLogging LOG = get_logging_handle(__name__) +DEFAULT_CONNECTION_CONFIG = get_default_connection_config() +DEFAULT_LOG_CONFIG = get_default_log_config() @init.command("dsl") @@ -89,6 +92,33 @@ envvar="CALM_DSL_DEFAULT_PROJECT", help="Default project name used for entities", ) +@click.option( + "--log_level", + "-l", + envvar="CALM_DSL_LOG_LEVEL", + default=DEFAULT_LOG_CONFIG["level"], + help="Default log level", +) +@click.option( + "--retries-enabled/--retries-disabled", + "-re/-rd", + default=DEFAULT_CONNECTION_CONFIG["retries_enabled"], + help="Retries enabled/disabled for api connections", +) +@click.option( + "--connection-timeout", + "-ct", + type=int, + default=DEFAULT_CONNECTION_CONFIG["connection_timeout"], + help="Connection timeout for api connections", +) +@click.option( + "--read-timeout", + "-rt", + type=int, + default=DEFAULT_CONNECTION_CONFIG["read_timeout"], + help="Read timeout for api connections", +) def initialize_engine( ip, port, @@ -98,6 +128,10 @@ def initialize_engine( db_file, local_dir, config_file, + log_level, + retries_enabled, + connection_timeout, + read_timeout, ): """ \b @@ -124,6 +158,10 @@ def initialize_engine( db_file=db_file, local_dir=local_dir, config_file=config_file, + log_level=log_level, + retries_enabled=retries_enabled, + connection_timeout=connection_timeout, + read_timeout=read_timeout, ) init_db() sync_cache() @@ -149,6 +187,10 @@ def set_server_details( db_file, local_dir, config_file, + log_level, + retries_enabled, + connection_timeout, + read_timeout, ): if not (ip and port and username and password and project_name): @@ -162,15 +204,6 @@ def set_server_details( "Project", default=DSL_CONFIG.EMPTY_PROJECT_NAME ) - # Default log-level - log_level = "INFO" - - # Default connection params - default_connection_config = get_default_connection_config() - retries_enabled = default_connection_config["retries_enabled"] - connection_timeout = default_connection_config["connection_timeout"] - read_timeout = default_connection_config["read_timeout"] - # Do not prompt for init config variables, Take default values for init.ini file config_file = config_file or get_default_config_file() local_dir = local_dir or get_default_local_dir() @@ -285,6 +318,9 @@ def set_server_details( LOG.info("Updating context for using latest config file data") init_context() + if log_level: + CustomLogging.set_verbose_level(getattr(CustomLogging, log_level)) + def init_db(): LOG.info("Creating local database") diff --git a/calm/dsl/cli/library_tasks.py b/calm/dsl/cli/library_tasks.py index 5619d070..bb5f2f5d 100644 --- a/calm/dsl/cli/library_tasks.py +++ b/calm/dsl/cli/library_tasks.py @@ -501,12 +501,16 @@ def import_task(task_file, name, description, out_vars, force): if ( task_file.endswith(".sh") or task_file.endswith(".escript") + or task_file.endswith(".escript.py2") + or task_file.endswith(".escript.py3") or task_file.endswith(".ps1") ): if task_file.endswith(".sh"): script_type = TASKS.SCRIPT_TYPES.SHELL - elif task_file.endswith(".escript"): + elif task_file.endswith(".escript") or task_file.endswith(".escript.py2"): script_type = TASKS.SCRIPT_TYPES.ESCRIPT + elif task_file.endswith(".escript.py3"): + script_type = TASKS.SCRIPT_TYPES.ESCRIPT_PY3 elif task_file.endswith(".ps1"): script_type = TASKS.SCRIPT_TYPES.POWERSHELL diff --git a/calm/dsl/cli/library_tasks_commands.py b/calm/dsl/cli/library_tasks_commands.py index 4e5b8c46..7e41b3e6 100644 --- a/calm/dsl/cli/library_tasks_commands.py +++ b/calm/dsl/cli/library_tasks_commands.py @@ -82,7 +82,7 @@ def _delete_task(task_names): "task_file", type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), required=True, - help="Path of task file (.json, .sh, .escript, .ps1)", + help="Path of task file (.json, .sh, .escript, .escript.py2, .escript.py3, .ps1)", ) @click.option("--name", "-n", default=None, help="Task Library item name (Optional)") @click.option( @@ -108,17 +108,22 @@ def _import_task(task_file, name, description, out_vars, force): (-f | --file) supports:\n - \t.sh - Shell script file\n - \t.escript - Escript file\n - \t.ps1 - Powershell Script File\n + \t.sh - Shell script file\n + \t.escript - Escript file(python2)\n + \t.escript.py2 - Escript file(python2)\n + \t.escript.py3 - Escript file(python3)\n + \t.ps1 - Powershell Script File\n Note:\n To import Set-Variable task, use --out-vars="OUT1,OUT2". + To import escript file with specific version, use file extension as .escript, .escript.py2; .escript.py3 Examples:\n calm import library task --name="Install IIS" -f Install_IIS.ps1\n calm import library task -f Install_Docker.sh\n - calm import library task -f Install_Docker.sh --out-vars="IP_ADDRESS,PORT" """ + calm import library task -f Install_Docker.sh --out-vars="IP_ADDRESS,PORT"\n + calm import library task --name="Escript-RUtil-Py2" -f request_util.escript.py2\n + calm import library task --name="Escript-RUtil-Py3" -f request_util.escript.py3\n""" import_task(task_file, name, description, out_vars, force) diff --git a/calm/dsl/config/__init__.py b/calm/dsl/config/__init__.py index e64e0311..b4495914 100644 --- a/calm/dsl/config/__init__.py +++ b/calm/dsl/config/__init__.py @@ -1,5 +1,10 @@ from .config import get_config_handle, set_dsl_config -from .context import get_context, init_context, get_default_connection_config +from .context import ( + get_context, + init_context, + get_default_connection_config, + get_default_log_config, +) from .init_config import ( get_default_config_file, get_default_db_file, @@ -16,4 +21,5 @@ "get_default_db_file", "get_default_local_dir", "get_default_connection_config", + "get_default_log_config", ] diff --git a/calm/dsl/config/config.py b/calm/dsl/config/config.py index 49c19664..cb13096e 100644 --- a/calm/dsl/config/config.py +++ b/calm/dsl/config/config.py @@ -4,6 +4,7 @@ from .schema import validate_config from .init_config import get_init_config_handle +from .constants import CONFIG from calm.dsl.tools import make_file_dir from calm.dsl.log import get_logging_handle @@ -79,7 +80,7 @@ def get_stratos_config(self): stratos_config = {} if "STRATOS" in self._CONFIG_PARSER_OBJECT: for k, v in self._CONFIG_PARSER_OBJECT.items("STRATOS"): - if k == "stratos_status": + if k == CONFIG.STRATOS.STATUS: stratos_config[k] = self._CONFIG_PARSER_OBJECT[ "STRATOS" ].getboolean(k) @@ -103,11 +104,14 @@ def get_connection_config(self): connection_config = {} if "CONNECTION" in self._CONFIG_PARSER_OBJECT: for k, v in self._CONFIG_PARSER_OBJECT.items("CONNECTION"): - if k == "retries_enabled": + if k == CONFIG.CONNECTION.RETRIES_ENABLED: connection_config[k] = self._CONFIG_PARSER_OBJECT[ "CONNECTION" ].getboolean(k) - elif k in ["connection_timeout", "read_timeout"]: + elif k in [ + CONFIG.CONNECTION.CONNECTION_TIMEOUT, + CONFIG.CONNECTION.READ_TIMEOUT, + ]: connection_config[k] = self._CONFIG_PARSER_OBJECT[ "CONNECTION" ].getint(k) diff --git a/calm/dsl/config/constants.py b/calm/dsl/config/constants.py new file mode 100644 index 00000000..cecc9328 --- /dev/null +++ b/calm/dsl/config/constants.py @@ -0,0 +1,71 @@ +class IterableConstants: + @classmethod + def ALL(cls): + """ + Schematics expect list or tuple strictly; not just iterable. + :returns: a list of all public attr + """ + attrs = [] + for field in vars(cls): + if not field.startswith("__"): + attrs.append(getattr(cls, field)) + return attrs + + +class CONFIG: + class CONNECTION(IterableConstants): + CONNECTION_TIMEOUT = "connection_timeout" + RETRIES_ENABLED = "retries_enabled" + READ_TIMEOUT = "read_timeout" + + class LOG(IterableConstants): + LEVEL = "level" + + class STRATOS(IterableConstants): + STATUS = "stratos_status" + + class APPROVAL_POLICY(IterableConstants): + STATUS = "approval_policy_status" + + class POLICY(IterableConstants): + STATUS = "policy_status" + + class SERVER(IterableConstants): + HOST = "pc_ip" + PORT = "pc_port" + USERNAME = "pc_username" + PASSWORD = "pc_password" + + class PROJECT(IterableConstants): + NAME = "name" + + class INIT_CONFIG(IterableConstants): + CONFIG_FILE_LOCATION = "config_file_location" + LOCAL_DIR_LOCATION = "local_dir_location" + DB_LOCATION = "db_location" + + +class ENV_CONFIG: + class CONNECTION(IterableConstants): + CONNECTION_TIMEOUT = "CALM_DSL_CONNECTION_TIMEOUT" + RETRIES_ENABLED = "CALM_DSL_RETRIES_ENABLED" + READ_TIMEOUT = "CALM_DSL_READ_TIMEOUT" + + class LOG(IterableConstants): + LEVEL = "CALM_DSL_LOG_LEVEL" + + class SERVER(IterableConstants): + HOST = "CALM_DSL_PC_IP" + PORT = "CALM_DSL_PC_PORT" + USERNAME = "CALM_DSL_PC_USERNAME" + PASSWORD = "CALM_DSL_PC_PASSWORD" + + class PROJECT(IterableConstants): + NAME = "CALM_DSL_DEFAULT_PROJECT" + + class INIT_CONFIG: + CONFIG_FILE_LOCATION = "CALM_DSL_CONFIG_FILE_LOCATION" + LOCAL_DIR_LOCATION = "CALM_DSL_LOCAL_DIR_LOCATION" + DB_LOCATION = "CALM_DSL_DB_LOCATION" + + COMPILE_SECRETS = "COMPILE_SECRETS" diff --git a/calm/dsl/config/context.py b/calm/dsl/config/context.py index 2407b03f..bd235458 100644 --- a/calm/dsl/config/context.py +++ b/calm/dsl/config/context.py @@ -3,14 +3,16 @@ from .env_config import EnvConfig from .config import get_config_handle +from .constants import CONFIG from calm.dsl.log import get_logging_handle from calm.dsl.constants import DSL_CONFIG LOG = get_logging_handle(__name__) DEFAULT_RETRIES_ENABLED = True -DEFAILT_CONNECTION_TIMEOUT = 5 +DEFAULT_CONNECTION_TIMEOUT = 5 DEFAULT_READ_TIMEOUT = 30 +DEFAULT_LOG_LEVEL = "INFO" class Context: @@ -39,6 +41,7 @@ def initialize_configuration(self): self.server_config.update(EnvConfig.get_server_config()) self.project_config.update(EnvConfig.get_project_config()) self.log_config.update(EnvConfig.get_log_config()) + self.connection_config.update(EnvConfig.get_connection_config()) init_config = config_handle.get_init_config() self._CONFIG_FILE = init_config["CONFIG"]["location"] @@ -66,25 +69,25 @@ def get_server_config(self): config = self.server_config try: # if all server variables are present either in env or some other way, not required to validate config file - if not config.get("pc_ip"): + if not config.get(CONFIG.SERVER.HOST): LOG.error( "Host IP not found. Please provide it in config file or set environment variable 'CALM_DSL_PC_IP'" ) sys.exit(-1) - if not config.get("pc_port"): + if not config.get(CONFIG.SERVER.PORT): LOG.error( "Host Port not found. Please provide it in config file or set environment variable 'CALM_DSL_PC_PORT'" ) sys.exit(-1) - if not config.get("pc_username"): + if not config.get(CONFIG.SERVER.USERNAME): LOG.error( "Host username not found. Please provide it in config file or set environment variable 'CALM_DSL_PC_USERNAME'" ) sys.exit(-1) - if not config.get("pc_password"): + if not config.get(CONFIG.SERVER.PASSWORD): LOG.error( "Host password not found. Please provide it in config file or set environment variable 'CALM_DSL_PC_PASSWORD'" ) @@ -100,7 +103,7 @@ def get_project_config(self): """returns project configuration""" config = self.project_config - if not config.get("name"): + if not config.get(CONFIG.PROJECT.NAME): config["name"] = DSL_CONFIG.EMPTY_PROJECT_NAME return config @@ -109,38 +112,34 @@ def get_connection_config(self): """returns connection configuration""" config = self.connection_config - if "retries_enabled" not in config: - config[ - "retries_enabled" - ] = DEFAULT_RETRIES_ENABLED # TODO check boolean is supported by ini - if "connection_timeout" not in config: - config["connection_timeout"] = DEFAILT_CONNECTION_TIMEOUT - if "read_timeout" not in config: - config["read_timeout"] = DEFAULT_READ_TIMEOUT + default_configuration = get_default_connection_config() + for _key in CONFIG.CONNECTION.ALL(): + if _key not in config: + config[_key] = default_configuration[_key] return config def get_policy_config(self): """returns policy configuration""" config = self.policy_config - if not config.get("policy_status"): - config["policy_status"] = False + if not config.get(CONFIG.POLICY.STATUS): + config[CONFIG.POLICY.STATUS] = False return config def get_approval_policy_config(self): """returns approval policy configuration""" config = self.approval_policy_config - if not config.get("approval_policy_status"): - config["approval_policy_status"] = False + if not config.get(CONFIG.APPROVAL_POLICY.STATUS): + config[CONFIG.APPROVAL_POLICY.STATUS] = False return config def get_stratos_config(self): """returns stratos configuration""" config = self.stratos_config - if not config.get("stratos_status"): - config["stratos_status"] = False + if not config.get(CONFIG.STRATOS.STATUS): + config[CONFIG.STRATOS.STATUS] = False return config @@ -148,11 +147,12 @@ def get_log_config(self): """returns logging configuration""" config = self.log_config - if not config.get("level"): + default_configuration = get_default_log_config() + if not config.get(CONFIG.LOG.LEVEL): LOG.warning( "Default log-level not found in config file or environment('CALM_DSL_LOG_LEVEL'). Setting it to 'INFO' level" ) - config["level"] = "INFO" + config["level"] = default_configuration["level"] return config @@ -201,18 +201,20 @@ def print_config(self): ConfigHandle = get_config_handle() config_str = ConfigHandle._render_config_template( - ip=server_config["pc_ip"], - port=server_config["pc_port"], - username=server_config["pc_username"], + ip=server_config[CONFIG.SERVER.HOST], + port=server_config[CONFIG.SERVER.PORT], + username=server_config[CONFIG.SERVER.USERNAME], password="xxxxxxxx", # Do not render password - project_name=project_config["name"], - log_level=log_config["level"], - retries_enabled=connection_config["retries_enabled"], - connection_timeout=connection_config["connection_timeout"], - read_timeout=connection_config["read_timeout"], - policy_status=policy_config["policy_status"], - approval_policy_status=approval_policy_config["approval_policy_status"], - stratos_status=stratos_status["stratos_status"], + project_name=project_config[CONFIG.PROJECT.NAME], + log_level=log_config[CONFIG.LOG.LEVEL], + policy_status=policy_config[CONFIG.POLICY.STATUS], + approval_policy_status=approval_policy_config[ + CONFIG.APPROVAL_POLICY.STATUS + ], + stratos_status=stratos_status[CONFIG.STRATOS.STATUS], + retries_enabled=connection_config[CONFIG.CONNECTION.RETRIES_ENABLED], + connection_timeout=connection_config[CONFIG.CONNECTION.CONNECTION_TIMEOUT], + read_timeout=connection_config[CONFIG.CONNECTION.READ_TIMEOUT], ) print(config_str) @@ -239,7 +241,13 @@ def get_default_connection_config(): """Returns default connection config""" return { - "connection_timeout": DEFAILT_CONNECTION_TIMEOUT, + "connection_timeout": DEFAULT_CONNECTION_TIMEOUT, "read_timeout": DEFAULT_READ_TIMEOUT, "retries_enabled": DEFAULT_RETRIES_ENABLED, } + + +def get_default_log_config(): + """Returns default log config""" + + return {"level": DEFAULT_LOG_LEVEL} diff --git a/calm/dsl/config/env_config.py b/calm/dsl/config/env_config.py index a17097f6..4a3dbda8 100644 --- a/calm/dsl/config/env_config.py +++ b/calm/dsl/config/env_config.py @@ -1,33 +1,46 @@ import os +from .constants import ENV_CONFIG, CONFIG + class EnvConfig: - pc_ip = os.environ.get("CALM_DSL_PC_IP") or "" - pc_port = os.environ.get("CALM_DSL_PC_PORT") or "" - pc_username = os.environ.get("CALM_DSL_PC_USERNAME") or "" - pc_password = os.environ.get("CALM_DSL_PC_PASSWORD") or "" - default_project = os.environ.get("CALM_DSL_DEFAULT_PROJECT") or "" - log_level = os.environ.get("CALM_DSL_LOG_LEVEL") or "" + pc_ip = os.environ.get(ENV_CONFIG.SERVER.HOST) or "" + pc_port = os.environ.get(ENV_CONFIG.SERVER.PORT) or "" + pc_username = os.environ.get(ENV_CONFIG.SERVER.USERNAME) or "" + pc_password = os.environ.get(ENV_CONFIG.SERVER.PASSWORD) or "" + default_project = os.environ.get(ENV_CONFIG.PROJECT.NAME) or "" + log_level = os.environ.get(ENV_CONFIG.LOG.LEVEL) or "" + + config_file_location = ( + os.environ.get(ENV_CONFIG.INIT_CONFIG.CONFIG_FILE_LOCATION) or "" + ) + local_dir_location = os.environ.get(ENV_CONFIG.INIT_CONFIG.LOCAL_DIR_LOCATION) or "" + db_location = os.environ.get(ENV_CONFIG.INIT_CONFIG.DB_LOCATION) + is_compile_secrets = os.environ.get(ENV_CONFIG.COMPILE_SECRETS) or "false" + + connection_timeout = os.environ.get(ENV_CONFIG.CONNECTION.CONNECTION_TIMEOUT) or "" + read_timeout = os.environ.get(ENV_CONFIG.CONNECTION.READ_TIMEOUT) or "" + retries_enabled = os.environ.get(ENV_CONFIG.CONNECTION.RETRIES_ENABLED) or "" - config_file_location = os.environ.get("CALM_DSL_CONFIG_FILE_LOCATION") or "" - local_dir_location = os.environ.get("CALM_DSL_LOCAL_DIR_LOCATION") or "" - db_location = os.environ.get("CALM_DSL_DB_LOCATION") + @classmethod + def is_compile_secret(cls): + return cls.is_compile_secrets.lower() == "true" @classmethod def get_server_config(cls): config = {} if cls.pc_ip: - config["pc_ip"] = cls.pc_ip + config[CONFIG.SERVER.HOST] = cls.pc_ip if cls.pc_port: - config["pc_port"] = cls.pc_port + config[CONFIG.SERVER.PORT] = cls.pc_port if cls.pc_username: - config["pc_username"] = cls.pc_username + config[CONFIG.SERVER.USERNAME] = cls.pc_username if cls.pc_password: - config["pc_password"] = cls.pc_password + config[CONFIG.SERVER.PASSWORD] = cls.pc_password return config @@ -36,7 +49,7 @@ def get_project_config(cls): config = {} if cls.default_project: - config["name"] = cls.default_project + config[CONFIG.PROJECT.NAME] = cls.default_project return config @@ -45,7 +58,7 @@ def get_log_config(cls): config = {} if cls.log_level: - config["level"] = cls.log_level + config[CONFIG.LOG.LEVEL] = cls.log_level return config @@ -54,12 +67,29 @@ def get_init_config(cls): config = {} if cls.config_file_location: - config["config_file_location"] = cls.config_file_location + config[CONFIG.INIT_CONFIG.CONFIG_FILE_LOCATION] = cls.config_file_location if cls.local_dir_location: - config["local_dir_location"] = cls.local_dir_location + config[CONFIG.INIT_CONFIG.LOCAL_DIR_LOCATION] = cls.local_dir_location if cls.db_location: - config["db_location"] = cls.db_location + config[CONFIG.INIT_CONFIG.DB_LOCATION] = cls.db_location + + return config + + @classmethod + def get_connection_config(cls): + + config = {} + if cls.connection_timeout: + config[CONFIG.CONNECTION.CONNECTION_TIMEOUT] = int(cls.connection_timeout) + + if cls.read_timeout: + config[CONFIG.CONNECTION.READ_TIMEOUT] = int(cls.read_timeout) + + if cls.retries_enabled: + config[CONFIG.CONNECTION.RETRIES_ENABLED] = ( + cls.retries_enabled.lower() == "true" + ) return config diff --git a/calm/dsl/constants.py b/calm/dsl/constants.py index 0cf13275..89b5ac6c 100644 --- a/calm/dsl/constants.py +++ b/calm/dsl/constants.py @@ -169,6 +169,9 @@ class TYPE: VMWARE = "VMWARE_VM" AWS = "AWS_VM" + class AHV: + VLAN_1211 = "vlan1211" + class QUOTA(object): class STATE(object): diff --git a/calm/dsl/db/table_config.py b/calm/dsl/db/table_config.py index 18c9ff6d..cb44ec50 100644 --- a/calm/dsl/db/table_config.py +++ b/calm/dsl/db/table_config.py @@ -1068,8 +1068,15 @@ def get_entity_data(cls, name, **kwargs): @classmethod def get_entity_data_using_uuid(cls, uuid, **kwargs): + query_obj = {} + if uuid: + query_obj["uuid"] = uuid + + if kwargs.get("tunnel_uuid", ""): + query_obj["tunnel_uuid"] = kwargs.get("tunnel_uuid") + try: - entity = super().get(cls.uuid == uuid) + entity = super().get(**query_obj) return entity.get_detail_dict() except DoesNotExist: diff --git a/calm/dsl/decompile/config_spec.py b/calm/dsl/decompile/config_spec.py index 36138157..e8ceab85 100644 --- a/calm/dsl/decompile/config_spec.py +++ b/calm/dsl/decompile/config_spec.py @@ -38,9 +38,11 @@ def render_snapshot_config_template(cls, entity_context, CONFIG_SPEC_MAP): attrs["target_any_local_reference"]["name"] ) user_attrs["num_of_replicas"] = attrs["num_of_replicas"] - if attrs.get("app_protection_policy_reference", None): - user_attrs["policy"] = attrs["app_protection_policy_reference"]["name"] - if attrs.get("app_protection_rule_reference", None): - user_attrs["rule"] = attrs["app_protection_rule_reference"]["name"] + + # TODO fix App Protection policy model, decompilation is wrong and in compilation also metadata project is not considered + # if attrs.get("app_protection_policy_reference", {}).get("name", {}): + # user_attrs["policy"] = attrs["app_protection_policy_reference"]["name"] + # if attrs.get("app_protection_rule_reference", {}).get("name", {}): + # user_attrs["rule"] = attrs["app_protection_rule_reference"]["name"] text = render_template(schema_file="snapshot_config.py.jinja2", obj=user_attrs) return text.strip() diff --git a/calm/dsl/decompile/schemas/metadata.py.jinja2 b/calm/dsl/decompile/schemas/metadata.py.jinja2 index 8eeda55b..cc2bd0e6 100644 --- a/calm/dsl/decompile/schemas/metadata.py.jinja2 +++ b/calm/dsl/decompile/schemas/metadata.py.jinja2 @@ -2,6 +2,8 @@ class BpMetadata(Metadata): {% if obj.categories %}categories={{obj.categories}}{% endif %} + {% if obj.project_name %}project=Ref.Project("{{obj.project_name}}"){% endif %} + {%- endmacro %} {{ metadata(obj) }} diff --git a/calm/dsl/decompile/schemas/task_decision_escript.py.jinja2 b/calm/dsl/decompile/schemas/task_decision_escript.py.jinja2 index 63711885..cc7bffb0 100644 --- a/calm/dsl/decompile/schemas/task_decision_escript.py.jinja2 +++ b/calm/dsl/decompile/schemas/task_decision_escript.py.jinja2 @@ -1,12 +1,12 @@ {%- macro decision_escript_task(obj) -%} {%- if obj.cred is not defined and obj.target is not defined %} -CalmTask.Decision.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +CalmTask.Decision.escript.py2(name='{{obj.name}}', filename={{obj.attrs.script_file}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) {%- elif obj.cred is not defined %} -CalmTask.Decision.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}}, target={{obj.target}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +CalmTask.Decision.escript.py2(name='{{obj.name}}', filename={{obj.attrs.script_file}}, target={{obj.target}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) {%- elif obj.target is not defined %} -CalmTask.Decision.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +CalmTask.Decision.escript.py2(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) {%- else %} -CalmTask.Decision.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +CalmTask.Decision.escript.py2(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) {%- endif %} {%- endmacro %} diff --git a/calm/dsl/decompile/schemas/task_decision_escript_py3.py.jinja2 b/calm/dsl/decompile/schemas/task_decision_escript_py3.py.jinja2 new file mode 100644 index 00000000..faec12f1 --- /dev/null +++ b/calm/dsl/decompile/schemas/task_decision_escript_py3.py.jinja2 @@ -0,0 +1,13 @@ +{%- macro decision_escript_py3_task(obj) -%} +{%- if obj.cred is not defined and obj.target is not defined %} +CalmTask.Decision.escript.py3(name='{{obj.name}}', filename={{obj.attrs.script_file}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- elif obj.cred is not defined %} +CalmTask.Decision.escript.py3(name='{{obj.name}}', filename={{obj.attrs.script_file}}, target={{obj.target}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- elif obj.target is not defined %} +CalmTask.Decision.escript.py3(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- else %} +CalmTask.Decision.escript.py3(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- endif %} +{%- endmacro %} + +{{ decision_escript_py3_task(obj) }} \ No newline at end of file diff --git a/calm/dsl/decompile/schemas/task_exec_escript.py.jinja2 b/calm/dsl/decompile/schemas/task_exec_escript.py.jinja2 index a6765da8..1c63c17e 100644 --- a/calm/dsl/decompile/schemas/task_exec_escript.py.jinja2 +++ b/calm/dsl/decompile/schemas/task_exec_escript.py.jinja2 @@ -1,12 +1,12 @@ {%- macro exec_escript_task(obj) -%} {%- if obj.cred is not defined and obj.target is not defined %} -CalmTask.Exec.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +CalmTask.Exec.escript.py2(name='{{obj.name}}', filename={{obj.attrs.script_file}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) {%- elif obj.cred is not defined %} -CalmTask.Exec.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}}, target={{obj.target}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +CalmTask.Exec.escript.py2(name='{{obj.name}}', filename={{obj.attrs.script_file}}, target={{obj.target}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) {%- elif obj.target is not defined %} -CalmTask.Exec.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +CalmTask.Exec.escript.py2(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) {%- else %} -CalmTask.Exec.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +CalmTask.Exec.escript.py2(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) {%- endif %} {%- endmacro %} diff --git a/calm/dsl/decompile/schemas/task_exec_escript_py3.py.jinja2 b/calm/dsl/decompile/schemas/task_exec_escript_py3.py.jinja2 new file mode 100644 index 00000000..08d6de9e --- /dev/null +++ b/calm/dsl/decompile/schemas/task_exec_escript_py3.py.jinja2 @@ -0,0 +1,13 @@ +{%- macro exec_escript_py3_task(obj) -%} +{%- if obj.cred is not defined and obj.target is not defined %} +CalmTask.Exec.escript.py3(name='{{obj.name}}', filename={{obj.attrs.script_file}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- elif obj.cred is not defined %} +CalmTask.Exec.escript.py3(name='{{obj.name}}', filename={{obj.attrs.script_file}}, target={{obj.target}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- elif obj.target is not defined %} +CalmTask.Exec.escript.py3(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- else %} +CalmTask.Exec.escript.py3(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- endif %} +{%- endmacro %} + +{{ exec_escript_py3_task(obj) }} diff --git a/calm/dsl/decompile/schemas/task_setvariable_escript.py.jinja2 b/calm/dsl/decompile/schemas/task_setvariable_escript.py.jinja2 index c343cd82..8784fac2 100644 --- a/calm/dsl/decompile/schemas/task_setvariable_escript.py.jinja2 +++ b/calm/dsl/decompile/schemas/task_setvariable_escript.py.jinja2 @@ -1,14 +1,14 @@ {%- macro setvariable_escript_task(obj) -%} {%- if obj.cred is not defined and obj.target and obj.variables is not defined %} -CalmTask.SetVariable.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +CalmTask.SetVariable.escript.py2(name='{{obj.name}}', filename={{obj.attrs.script_file}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) {%- elif obj.cred is not defined %} -CalmTask.SetVariable.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}} {%- if obj.target %}, target={{obj.target}}{% endif %}, variables={{obj.variables}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +CalmTask.SetVariable.escript.py2(name='{{obj.name}}', filename={{obj.attrs.script_file}} {%- if obj.target %}, target={{obj.target}}{% endif %}, variables={{obj.variables}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) {%- elif obj.target is not defined %} -CalmTask.SetVariable.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, variables={{obj.variables}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +CalmTask.SetVariable.escript.py2(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, variables={{obj.variables}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) {%- elif obj.variables is not defined %} -CalmTask.SetVariable.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +CalmTask.SetVariable.escript.py2(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) {%- else %} -CalmTask.SetVariable.escript(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}}, variables={{obj.variables}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +CalmTask.SetVariable.escript.py2(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}}, variables={{obj.variables}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) {%- endif %} {%- endmacro %} diff --git a/calm/dsl/decompile/schemas/task_setvariable_escript_py3.py.jinja2 b/calm/dsl/decompile/schemas/task_setvariable_escript_py3.py.jinja2 new file mode 100644 index 00000000..b4f93e5c --- /dev/null +++ b/calm/dsl/decompile/schemas/task_setvariable_escript_py3.py.jinja2 @@ -0,0 +1,15 @@ +{%- macro setvariable_escript_py3_task(obj) -%} +{%- if obj.cred is not defined and obj.target and obj.variables is not defined %} +CalmTask.SetVariable.escript.py3(name='{{obj.name}}', filename={{obj.attrs.script_file}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- elif obj.cred is not defined %} +CalmTask.SetVariable.escript.py3(name='{{obj.name}}', filename={{obj.attrs.script_file}} {%- if obj.target %}, target={{obj.target}}{% endif %}, variables={{obj.variables}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- elif obj.target is not defined %} +CalmTask.SetVariable.escript.py3(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, variables={{obj.variables}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- elif obj.variables is not defined %} +CalmTask.SetVariable.escript.py3(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- else %} +CalmTask.SetVariable.escript.py3(name='{{obj.name}}', filename={{obj.attrs.script_file}}, cred={{obj.cred}}, target={{obj.target}}, variables={{obj.variables}} {%- if obj.attrs.tunnel_reference%}, tunnel=Ref.Tunnel(name="{{ obj.attrs.tunnel_reference.name }}"){%- endif %}) +{%- endif %} +{%- endmacro %} + +{{ setvariable_escript_py3_task(obj) }} diff --git a/calm/dsl/decompile/task.py b/calm/dsl/decompile/task.py index c820ba74..20300c01 100644 --- a/calm/dsl/decompile/task.py +++ b/calm/dsl/decompile/task.py @@ -57,6 +57,9 @@ def render_task_template( elif script_type == "static": schema_file = "task_exec_escript.py.jinja2" + elif script_type == "static_py3": + schema_file = "task_exec_escript_py3.py.jinja2" + elif script_type == "npsscript": schema_file = "task_exec_powershell.py.jinja2" @@ -75,6 +78,9 @@ def render_task_template( elif script_type == "static": schema_file = "task_setvariable_escript.py.jinja2" + elif script_type == "static_py3": + schema_file = "task_setvariable_escript_py3.py.jinja2" + elif script_type == "npsscript": schema_file = "task_setvariable_powershell.py.jinja2" @@ -200,6 +206,9 @@ def render_task_template( elif script_type == "static": schema_file = "task_decision_escript.py.jinja2" + elif script_type == "static_py3": + schema_file = "task_decision_escript_py3.py.jinja2" + elif script_type == "npsscript": schema_file = "task_decision_powershell.py.jinja2" elif cls.type == "WHILE_LOOP": @@ -235,7 +244,7 @@ def create_script_file(script_type, script="", entity_context=""): elif script_type == "npsscript": file_name += ".ps1" - elif script_type == "static": + elif script_type in ["static", "static_py3"]: file_name += ".py" else: diff --git a/calm/dsl/providers/plugins/ahv_vm/ahv_vm_provider_spec.yaml.jinja2 b/calm/dsl/providers/plugins/ahv_vm/ahv_vm_provider_spec.yaml.jinja2 index fa110476..938086cb 100644 --- a/calm/dsl/providers/plugins/ahv_vm/ahv_vm_provider_spec.yaml.jinja2 +++ b/calm/dsl/providers/plugins/ahv_vm/ahv_vm_provider_spec.yaml.jinja2 @@ -475,6 +475,18 @@ properties: type: string type: type: string + recovery_point_reference: + type: [object, "null"] + properties: + uuid: + type: string + kind: + type: string + default: availability_zone + name: + type: string + type: + type: string availability_zone_reference: type: [object, "null"] properties: diff --git a/calm/dsl/providers/plugins/azure_vm/azure_vm_provider_spec.yaml.jinja2 b/calm/dsl/providers/plugins/azure_vm/azure_vm_provider_spec.yaml.jinja2 index 30d110b3..e8d54bbb 100644 --- a/calm/dsl/providers/plugins/azure_vm/azure_vm_provider_spec.yaml.jinja2 +++ b/calm/dsl/providers/plugins/azure_vm/azure_vm_provider_spec.yaml.jinja2 @@ -37,7 +37,7 @@ title: Azure Linux OS Configuration type: [object, "null"] properties: custom_data: - type: string + type: [string, "null"] disable_password_auth: type: boolean default: False @@ -286,6 +286,12 @@ properties: {{ azurePublicIpInfo() | indent(4) }} type: type: string + enable_ip_forwarding: + type: boolean + asg_list: + type: array + items: + type: string {%- endmacro %} diff --git a/calm/dsl/store/cache.py b/calm/dsl/store/cache.py index e4861592..8b58ff74 100644 --- a/calm/dsl/store/cache.py +++ b/calm/dsl/store/cache.py @@ -160,7 +160,17 @@ def sync(cls): def sync_tables(tables): for table in tables: - table.sync() + try: + LOG.debug( + "Syncing cache for '{}' table".format(table.get_cache_type()) + ) + table.sync() + except Exception as exp: + LOG.error( + "Cache sync failed for '{}' table".format( + table.get_cache_type() + ) + ) click.echo(".", nl=False, err=True) cache_table_map = cls.get_cache_tables(sync_version=True) diff --git a/calm/dsl/store/version.py b/calm/dsl/store/version.py index fc9fb046..2efaeb90 100644 --- a/calm/dsl/store/version.py +++ b/calm/dsl/store/version.py @@ -49,3 +49,7 @@ def sync(cls): res = res.json() pc_version = res["version"] cls.create("PC", pc_version) + + @classmethod + def get_cache_type(cls): + return "Version" diff --git a/tests/ahv_vm_overlay_subnet/test_overlay_subnet_blueprint.json b/tests/ahv_vm_overlay_subnet/test_overlay_subnet_blueprint.json index dcc90b01..eece0c0b 100644 --- a/tests/ahv_vm_overlay_subnet/test_overlay_subnet_blueprint.json +++ b/tests/ahv_vm_overlay_subnet/test_overlay_subnet_blueprint.json @@ -418,7 +418,7 @@ "nic_list": [ { "network_function_nic_type": "INGRESS", - "nic_type": "DIRECT_NIC", + "nic_type": "NORMAL_NIC", "subnet_reference": { "kind": "subnet", "name": "vpc_subnet_1", diff --git a/tests/ahv_vm_overlay_subnet/test_overlay_subnet_blueprint.py b/tests/ahv_vm_overlay_subnet/test_overlay_subnet_blueprint.py index b2787836..d10c70be 100644 --- a/tests/ahv_vm_overlay_subnet/test_overlay_subnet_blueprint.py +++ b/tests/ahv_vm_overlay_subnet/test_overlay_subnet_blueprint.py @@ -10,38 +10,11 @@ from calm.dsl.builtins import vm_disk_package, AhvVmDisk, AhvVmNic from calm.dsl.builtins import AhvVmGC, AhvVmResources, AhvVm, Ref -from tests.utils import get_vpc_project +from tests.utils import get_vpc_project, get_local_az_overlay_details_from_dsl_config DSL_CONFIG = json.loads(read_local_file(".tests/config.json")) VPC_PROJECT = get_vpc_project(DSL_CONFIG) - -def get_local_az_overlay_details_from_dsl_config(config): - networks = config["ACCOUNTS"]["NUTANIX_PC"] - local_az_account = None - for account in networks: - if account.get("NAME") == "NTNX_LOCAL_AZ": - local_az_account = account - break - overlay_subnets_list = local_az_account.get("OVERLAY_SUBNETS", []) - vlan_subnets_list = local_az_account.get("SUBNETS", []) - - cluster = "" - vpc = "" - overlay_subnet = "" - - for subnet in overlay_subnets_list: - if subnet["NAME"] == "vpc_subnet_1" and subnet["VPC"] == "vpc_name_1": - overlay_subnet = subnet["NAME"] - vpc = subnet["VPC"] - - for subnet in vlan_subnets_list: - if subnet["NAME"] == config["AHV"]["NETWORK"]["VLAN1211"]: - cluster = subnet["CLUSTER"] - break - return overlay_subnet, vpc, cluster - - NETWORK1, VPC1, CLUSTER1 = get_local_az_overlay_details_from_dsl_config(DSL_CONFIG) # SSH Credentials diff --git a/tests/api_interface/test_runbooks/test_exec_task.py b/tests/api_interface/test_runbooks/test_exec_task.py index 015377b9..fd2f4ef6 100644 --- a/tests/api_interface/test_runbooks/test_exec_task.py +++ b/tests/api_interface/test_runbooks/test_exec_task.py @@ -32,6 +32,7 @@ # TODO: Add validation for macro test values: EscriptMacroTask class TestExecTasks: + @pytest.mark.escript @pytest.mark.runbook @pytest.mark.regression @pytest.mark.parametrize( @@ -133,6 +134,7 @@ def test_script_run(self, Runbook): if err: pytest.fail("[{}] - {}".format(err["code"], err["error"])) + @pytest.mark.escript @pytest.mark.runbook @pytest.mark.regression @pytest.mark.parametrize( @@ -172,6 +174,7 @@ def test_exec_validations(self, Runbook): else: print("runbook {} deleted".format(rb_name)) + @pytest.mark.escript @pytest.mark.runbook @pytest.mark.regression @pytest.mark.parametrize( @@ -254,6 +257,7 @@ def test_macro_in_script(self, Runbook): if err: pytest.fail("[{}] - {}".format(err["code"], err["error"])) + @pytest.mark.escript @pytest.mark.runbook @pytest.mark.regression @pytest.mark.parametrize( diff --git a/tests/api_interface/test_runbooks/test_runbook_mpi.py b/tests/api_interface/test_runbooks/test_runbook_mpi.py index dc414c1d..13a9174a 100644 --- a/tests/api_interface/test_runbooks/test_runbook_mpi.py +++ b/tests/api_interface/test_runbooks/test_runbook_mpi.py @@ -1256,6 +1256,7 @@ def test_mpi_while_decision(self): if err: pytest.fail("[{}] - {}".format(err["code"], err["error"])) + @pytest.mark.escript @pytest.mark.runbook @pytest.mark.regression @pytest.mark.skipif( diff --git a/tests/blueprint_example/test_aws_bp/blueprint.py b/tests/blueprint_example/test_aws_bp/blueprint.py index 30c6c560..219b7ea1 100644 --- a/tests/blueprint_example/test_aws_bp/blueprint.py +++ b/tests/blueprint_example/test_aws_bp/blueprint.py @@ -12,6 +12,8 @@ DSL_CONFIG = json.loads(read_local_file(".tests/config.json")) POLICY_PROJECT = get_approval_project(DSL_CONFIG) +POLICY_PROJECT_CONFIG = DSL_CONFIG["POLICY_PROJECTS"]["PROJECT1"] +AWS_ACCOUNT = POLICY_PROJECT_CONFIG["ACCOUNTS"]["AWS"][0] # Secret Variables BP_CRED_root_PASSWORD = read_local_file(".tests/password") @@ -36,6 +38,7 @@ class VM1(Substrate): os_type = "Linux" provider_type = "AWS_VM" provider_spec = read_provider_spec(os.path.join("specs", "VM1_provider_spec.yaml")) + provider_spec.spec.get("resources", {})["account_uuid"] = AWS_ACCOUNT.get("UUID") provider_spec_editables = read_spec( os.path.join("specs", "VM1_create_spec_editables.yaml") ) diff --git a/tests/blueprint_example/test_vmware_bp/blueprint.py b/tests/blueprint_example/test_vmware_bp/blueprint.py index e8ff630a..17611bae 100644 --- a/tests/blueprint_example/test_vmware_bp/blueprint.py +++ b/tests/blueprint_example/test_vmware_bp/blueprint.py @@ -12,6 +12,8 @@ DSL_CONFIG = json.loads(read_local_file(".tests/config.json")) POLICY_PROJECT = get_approval_project(DSL_CONFIG) +POLICY_PROJECT_CONFIG = DSL_CONFIG["POLICY_PROJECTS"]["PROJECT1"] +VMWARE_ACCOUNT = POLICY_PROJECT_CONFIG["ACCOUNTS"]["VMWARE"][0] # Secret Variables BP_CRED_root_PASSWORD = read_local_file(".tests/password") @@ -36,6 +38,7 @@ class VM1(Substrate): os_type = "Linux" provider_type = "VMWARE_VM" provider_spec = read_vmw_spec(os.path.join("specs", "VM1_provider_spec.yaml")) + provider_spec.spec.get("resources", {})["account_uuid"] = VMWARE_ACCOUNT.get("UUID") provider_spec_editables = read_spec( os.path.join("specs", "VM1_create_spec_editables.yaml") ) diff --git a/tests/cli/runtime_helpers/ahv/app_edit_overlay_blueprint.py b/tests/cli/runtime_helpers/ahv/app_edit_overlay_blueprint.py index 53f97fe1..6972a702 100644 --- a/tests/cli/runtime_helpers/ahv/app_edit_overlay_blueprint.py +++ b/tests/cli/runtime_helpers/ahv/app_edit_overlay_blueprint.py @@ -9,7 +9,7 @@ from calm.dsl.builtins import read_local_file, read_spec from calm.dsl.builtins import readiness_probe, Ref, Metadata from calm.dsl.builtins import AppEdit, PatchField, AhvUpdateConfigAttrs -from tests.utils import get_vpc_project +from tests.utils import get_vpc_project, get_local_az_overlay_details_from_dsl_config CRED_USERNAME = read_local_file(".tests/username") CRED_PASSWORD = read_local_file(".tests/password") @@ -21,33 +21,6 @@ CLUSTER = DSL_CONFIG["ACCOUNTS"]["NUTANIX_PC"][0]["SUBNETS"][0]["CLUSTER"] NETWORK1 = DSL_CONFIG["AHV"]["NETWORK"]["VLAN1211"] - -def get_local_az_overlay_details_from_dsl_config(config): - networks = config["ACCOUNTS"]["NUTANIX_PC"] - local_az_account = None - for account in networks: - if account.get("NAME") == "NTNX_LOCAL_AZ": - local_az_account = account - break - overlay_subnets_list = local_az_account.get("OVERLAY_SUBNETS", []) - vlan_subnets_list = local_az_account.get("SUBNETS", []) - - cluster = "" - vpc = "" - overlay_subnet = "" - - for subnet in overlay_subnets_list: - if subnet["NAME"] == "vpc_subnet_1" and subnet["VPC"] == "vpc_name_1": - overlay_subnet = subnet["NAME"] - vpc = subnet["VPC"] - - for subnet in vlan_subnets_list: - if subnet["NAME"] == config["AHV"]["NETWORK"]["VLAN1211"]: - cluster = subnet["CLUSTER"] - break - return overlay_subnet, vpc, cluster - - NETWORK1, VPC1, CLUSTER1 = get_local_az_overlay_details_from_dsl_config(DSL_CONFIG) VPC_PROJECT = get_vpc_project(DSL_CONFIG) diff --git a/tests/cli/runtime_helpers/ahv/test_app_edit_overlay_json.py b/tests/cli/runtime_helpers/ahv/test_app_edit_overlay_json.py index 3ed90b4b..a71c3bfd 100644 --- a/tests/cli/runtime_helpers/ahv/test_app_edit_overlay_json.py +++ b/tests/cli/runtime_helpers/ahv/test_app_edit_overlay_json.py @@ -11,6 +11,7 @@ ) from calm.dsl.store import Version from distutils.version import LooseVersion as LV +from tests.utils import get_local_az_overlay_details_from_dsl_config CRED_USERNAME = read_local_file(".tests/username") CRED_PASSWORD = read_local_file(".tests/password") @@ -26,33 +27,6 @@ LOG = get_logging_handle(__name__) - -def get_local_az_overlay_details_from_dsl_config(config): - networks = config["ACCOUNTS"]["NUTANIX_PC"] - local_az_account = None - for account in networks: - if account.get("NAME") == "NTNX_LOCAL_AZ": - local_az_account = account - break - overlay_subnets_list = local_az_account.get("OVERLAY_SUBNETS", []) - vlan_subnets_list = local_az_account.get("SUBNETS", []) - - cluster = "" - vpc = "" - overlay_subnet = "" - - for subnet in overlay_subnets_list: - if subnet["NAME"] == "vpc_subnet_1" and subnet["VPC"] == "vpc_name_1": - overlay_subnet = subnet["NAME"] - vpc = subnet["VPC"] - - for subnet in vlan_subnets_list: - if subnet["NAME"] == config["AHV"]["NETWORK"]["VLAN1211"]: - cluster = subnet["CLUSTER"] - break - return overlay_subnet, vpc, cluster - - NETWORK1, VPC1, CLUSTER1 = get_local_az_overlay_details_from_dsl_config(DSL_CONFIG) diff --git a/tests/cli/test_policy_commands.py b/tests/cli/test_policy_commands.py index 9be4a461..b083d8c5 100644 --- a/tests/cli/test_policy_commands.py +++ b/tests/cli/test_policy_commands.py @@ -12,6 +12,10 @@ from calm.dsl.log import get_logging_handle from calm.dsl.store import Version from calm.dsl.builtins import read_local_file +from calm.dsl.cli.apps import _get_app +from calm.dsl.api import get_api_client +from calm.dsl.cli.constants import POLICY +from tests.utils import poll_runlog_status_policy DSL_CONFIG = json.loads(read_local_file(".tests/config.json")) POLICY_PROJECT = get_approval_project(DSL_CONFIG) @@ -214,6 +218,7 @@ def test_policy_approval_day_two_action(self): self._test_dsl_policy_enable() self._create_and_launch_app(DSL_DAY2_BP) self._run_day2_action() + self._watch_patch_update_app() self._test_get_approval_requests() self._test_approve_policy() @@ -666,4 +671,40 @@ def _run_day2_action(self): "--ignore_runtime_variables", ], ) + if result.exit_code: + cli_res_dict = {"Output": result.output, "Exception": str(result.exception)} + LOG.info(result.output) + LOG.debug( + "Cli Response: {}".format( + json.dumps(cli_res_dict, indent=4, separators=(",", ": ")) + ) + ) + pytest.fail("Update app failure") + self.approval_request_name = "Day_two_operation App " + self.created_app_name + + def _watch_patch_update_app(self): + """ + This helper watches and does polling on policy status until it reaches to POLICY_EXEC stated + """ + client = get_api_client() + app = _get_app(client, self.created_app_name) + app_id = app["metadata"]["uuid"] + url = client.application.ITEM.format(app_id) + "/app_runlogs/list" + payload = {"filter": "application_reference=={}".format(app_id)} + res, err = client.application.poll_action_run(url, payload) + if err: + pytest.fail("[{}] - {}".format(err["code"], err["error"])) + + response = res.json() + for entity in response.get("entities"): + if entity.get("status").get("name") == "patch_update1": + action_uuid = entity.get("metadata").get("uuid") + break + + payload = {"filter": "root_reference=={}".format(action_uuid)} + + state, reasons = poll_runlog_status_policy( + client, [POLICY.STATES.POLICY_EXEC], url, payload + ) + LOG.info("POLICY Run state: {}\n{}".format(state, reasons)) diff --git a/tests/escript/scripts/escript4.py.out b/tests/escript/scripts/escript4.py.out deleted file mode 100644 index 86b5f111..00000000 --- a/tests/escript/scripts/escript4.py.out +++ /dev/null @@ -1,4 +0,0 @@ -python3 -u'"A. Random Developer" ' -False -True diff --git a/tests/escript/scripts/escript5.py.out b/tests/escript/scripts/escript5.py.out deleted file mode 100644 index 2bd2ce86..00000000 --- a/tests/escript/scripts/escript5.py.out +++ /dev/null @@ -1 +0,0 @@ -('cost of', 'Belgian Waffles', 'is', '$5.95', 'with', '650', 'calories') diff --git a/tests/escript/scripts/escript_ast.py.out b/tests/escript/scripts/escript_ast.py.out new file mode 100644 index 00000000..56e419b6 --- /dev/null +++ b/tests/escript/scripts/escript_ast.py.out @@ -0,0 +1,35 @@ +Module( + body=[ + FunctionDef( + name='add', + args=arguments( + posonlyargs=[], + args=[ + arg(arg='a'), + arg(arg='b')], + kwonlyargs=[], + kw_defaults=[], + defaults=[]), + body=[ + Return( + value=BinOp( + left=Name(id='a', ctx=Load()), + op=Add(), + right=Name(id='b', ctx=Load())))], + decorator_list=[]), + Assign( + targets=[ + Name(id='result', ctx=Store())], + value=Call( + func=Name(id='add', ctx=Load()), + args=[ + Constant(value=1), + Constant(value=2)], + keywords=[])), + Expr( + value=Call( + func=Name(id='print', ctx=Load()), + args=[ + Name(id='result', ctx=Load())], + keywords=[]))], + type_ignores=[]) \ No newline at end of file diff --git a/tests/escript/scripts/escript_ast.py.txt b/tests/escript/scripts/escript_ast.py.txt new file mode 100644 index 00000000..4fdbdd30 --- /dev/null +++ b/tests/escript/scripts/escript_ast.py.txt @@ -0,0 +1,12 @@ +# python3;success + +import ast +code_string = """ +def add(a, b): + return a + b + +result = add(1, 2) +print(result) +""" +tree = ast.parse(code_string) +print(ast.dump(tree, indent=2)) \ No newline at end of file diff --git a/tests/escript/scripts/escript_aws.py b/tests/escript/scripts/escript_aws.py new file mode 100644 index 00000000..f5790b21 --- /dev/null +++ b/tests/escript/scripts/escript_aws.py @@ -0,0 +1,10 @@ +# python3;success + +import boto3 +ec2 = boto3.client('ec2', aws_access_key_id='accessKey', aws_secret_access_key='secretKey', region_name='us-east-1') +regions = ec2.describe_regions() +region_names = [] +for i in regions['Regions']: + region_names.append(i['RegionName']) + +print("us-east-2" in region_names) \ No newline at end of file diff --git a/tests/escript/scripts/escript_aws.py.out b/tests/escript/scripts/escript_aws.py.out new file mode 100644 index 00000000..0ca95142 --- /dev/null +++ b/tests/escript/scripts/escript_aws.py.out @@ -0,0 +1 @@ +True diff --git a/tests/escript/scripts/escript_azure.py b/tests/escript/scripts/escript_azure.py new file mode 100644 index 00000000..12818541 --- /dev/null +++ b/tests/escript/scripts/escript_azure.py @@ -0,0 +1,10 @@ +# python3;success + +from azure.identity import ClientSecretCredential +from azure.mgmt.resource import SubscriptionClient +creds = ClientSecretCredential(client_id="client_id_a", client_secret="client_key",tenant_id="tenant_id_a") +subscription_client = SubscriptionClient(credential=creds) +locations = subscription_client.subscriptions.list_locations("subscription_id") +locations = [i.name for i in locations] +sleep(5) +print("eastus2" in locations) \ No newline at end of file diff --git a/tests/escript/scripts/escript_azure.py.out b/tests/escript/scripts/escript_azure.py.out new file mode 100644 index 00000000..0ca95142 --- /dev/null +++ b/tests/escript/scripts/escript_azure.py.out @@ -0,0 +1 @@ +True diff --git a/tests/escript/scripts/escript_base64.py b/tests/escript/scripts/escript_base64.py new file mode 100644 index 00000000..ec4c5b55 --- /dev/null +++ b/tests/escript/scripts/escript_base64.py @@ -0,0 +1,16 @@ +# python3;success + +import base64 + +# Encoding +data = "Python 3 supported script" +data_bytes = data.encode('ascii') +base64_bytes = base64.b64encode(data_bytes) +base64_string = base64_bytes.decode('ascii') +print(base64_string == "UHl0aG9uIDMgc3VwcG9ydGVkIHNjcmlwdA==") + +# Decoding +base64_bytes_decode = base64_string.encode('ascii') +data_bytes = base64.b64decode(base64_bytes_decode) +decoded_data = data_bytes.decode('ascii') +pprint(decoded_data == "Python 3 supported script") \ No newline at end of file diff --git a/tests/escript/scripts/escript_base64.py.out b/tests/escript/scripts/escript_base64.py.out new file mode 100644 index 00000000..dbde4226 --- /dev/null +++ b/tests/escript/scripts/escript_base64.py.out @@ -0,0 +1,2 @@ +True +True diff --git a/tests/escript/scripts/escript_datetime.py b/tests/escript/scripts/escript_datetime.py new file mode 100644 index 00000000..1eafe2bb --- /dev/null +++ b/tests/escript/scripts/escript_datetime.py @@ -0,0 +1,6 @@ +# python3;success + +#Date time module check +from datetime import date +my_date = date(1996, 12, 11) +pprint(str(my_date) == "1996-12-11") \ No newline at end of file diff --git a/tests/escript/scripts/escript_datetime.py.out b/tests/escript/scripts/escript_datetime.py.out new file mode 100644 index 00000000..0ca95142 --- /dev/null +++ b/tests/escript/scripts/escript_datetime.py.out @@ -0,0 +1 @@ +True diff --git a/tests/escript/scripts/escript_difflib.py b/tests/escript/scripts/escript_difflib.py new file mode 100644 index 00000000..f803b4f5 --- /dev/null +++ b/tests/escript/scripts/escript_difflib.py @@ -0,0 +1,12 @@ +# python3;success + +import difflib +from difflib import SequenceMatcher + +# defining the strings +str_1 = "Welcome to" +str_2 = "Welcome to" + +# using the SequenceMatcher() function +my_seq = SequenceMatcher(a = str_1, b = str_2) +print(my_seq.ratio()) \ No newline at end of file diff --git a/tests/escript/scripts/escript_difflib.py.out b/tests/escript/scripts/escript_difflib.py.out new file mode 100644 index 00000000..d3827e75 --- /dev/null +++ b/tests/escript/scripts/escript_difflib.py.out @@ -0,0 +1 @@ +1.0 diff --git a/tests/escript/scripts/escript_invalid_import.py b/tests/escript/scripts/escript_invalid_import.py new file mode 100644 index 00000000..7b43a91f --- /dev/null +++ b/tests/escript/scripts/escript_invalid_import.py @@ -0,0 +1,6 @@ +# python3;failure + +from bs4 import BeautifulSoup +URL = "https://realpython.github.io/fake-jobs/" +page = urlreq(url, verb='GET') +soup = BeautifulSoup(page.content, "html.parser") \ No newline at end of file diff --git a/tests/escript/scripts/escript_invalid_import.py.out b/tests/escript/scripts/escript_invalid_import.py.out new file mode 100644 index 00000000..e0e868cf --- /dev/null +++ b/tests/escript/scripts/escript_invalid_import.py.out @@ -0,0 +1 @@ +Error in script: Syntax Error at line 4 - import from module 'bs4' not allowed diff --git a/tests/escript/scripts/escript_kubernetes.py b/tests/escript/scripts/escript_kubernetes.py new file mode 100644 index 00000000..0dc891a8 --- /dev/null +++ b/tests/escript/scripts/escript_kubernetes.py @@ -0,0 +1,13 @@ +# python3;success + +from kubernetes import client as k8client +aToken = 'token_a' +configuration=k8client.Configuration() +configuration.host="https://{}:{}".format('server', 'kube_port') +configuration.verify_ssl=False +configuration.debug=True +configuration.api_key={"authorization":"Bearer "+ aToken} +k8client.Configuration.set_default(configuration) +v1=k8client.CoreV1Api() +nodes=v1.list_node(watch=False) +print(nodes.items[0].metadata.name == "master0") \ No newline at end of file diff --git a/tests/escript/scripts/escript_kubernetes.py.out b/tests/escript/scripts/escript_kubernetes.py.out new file mode 100644 index 00000000..0ca95142 --- /dev/null +++ b/tests/escript/scripts/escript_kubernetes.py.out @@ -0,0 +1 @@ +True diff --git a/tests/escript/scripts/escript_re.py b/tests/escript/scripts/escript_re.py new file mode 100644 index 00000000..a3c6687e --- /dev/null +++ b/tests/escript/scripts/escript_re.py @@ -0,0 +1,11 @@ +# python3;success + +import re + +#Check if the string starts with "The" and ends with "Spain": +txt = "The rain in Spain" +x = re.search("^The.*Spain$", txt) +if x: + print("YES! We have a match!") +else: + print("No match") \ No newline at end of file diff --git a/tests/escript/scripts/escript_re.py.out b/tests/escript/scripts/escript_re.py.out new file mode 100644 index 00000000..f89b4473 --- /dev/null +++ b/tests/escript/scripts/escript_re.py.out @@ -0,0 +1 @@ +YES! We have a match! diff --git a/tests/escript/scripts/escript4.py b/tests/escript/scripts/escript_requests.py similarity index 99% rename from tests/escript/scripts/escript4.py rename to tests/escript/scripts/escript_requests.py index ad2bd060..9c9abbe7 100644 --- a/tests/escript/scripts/escript4.py +++ b/tests/escript/scripts/escript_requests.py @@ -1,5 +1,4 @@ # python3;success - print("python3") output = urlreq("https://pypi.org/pypi/sampleproject/json", verb="GET") email = output.json()["info"]["author_email"] diff --git a/tests/escript/scripts/escript_requests.py.out b/tests/escript/scripts/escript_requests.py.out new file mode 100644 index 00000000..d251278c --- /dev/null +++ b/tests/escript/scripts/escript_requests.py.out @@ -0,0 +1,4 @@ +python3 +'"A. Random Developer" ' +False +True diff --git a/tests/escript/scripts/escript_simplejson.py b/tests/escript/scripts/escript_simplejson.py new file mode 100644 index 00000000..9f49a854 --- /dev/null +++ b/tests/escript/scripts/escript_simplejson.py @@ -0,0 +1,2 @@ +# python3;success +print(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4 * ' ')) \ No newline at end of file diff --git a/tests/escript/scripts/escript_simplejson.py.out b/tests/escript/scripts/escript_simplejson.py.out new file mode 100644 index 00000000..e8a7cf67 --- /dev/null +++ b/tests/escript/scripts/escript_simplejson.py.out @@ -0,0 +1,4 @@ +{ + "4": 5, + "6": 7 +} diff --git a/tests/escript/scripts/escript_sql_handle.py b/tests/escript/scripts/escript_sql_handle.py new file mode 100644 index 00000000..c8912c83 --- /dev/null +++ b/tests/escript/scripts/escript_sql_handle.py @@ -0,0 +1,16 @@ +# python3;success + +username="xxx" +password="xxxx" +server="xxx" +port="1433" +try: + cnxn = get_sql_handle(server, username, password, port=port, autocommit=True) + cursor = cnxn.cursor() + # List all databases + cursor.execute("""SELECT Name from sys.Databases;""") + for row in cursor: + print(row[0]) + cnxn.close() +except Exception as exp: + print("TODO") \ No newline at end of file diff --git a/tests/escript/scripts/escript_sql_handle.py.out b/tests/escript/scripts/escript_sql_handle.py.out new file mode 100644 index 00000000..1333ed77 --- /dev/null +++ b/tests/escript/scripts/escript_sql_handle.py.out @@ -0,0 +1 @@ +TODO diff --git a/tests/escript/scripts/escript_ujson.py b/tests/escript/scripts/escript_ujson.py new file mode 100644 index 00000000..56bad354 --- /dev/null +++ b/tests/escript/scripts/escript_ujson.py @@ -0,0 +1,2 @@ +# python3;success +print(ujson.dumps({"foo": "bar"}, indent=4)) \ No newline at end of file diff --git a/tests/escript/scripts/escript_ujson.py.out b/tests/escript/scripts/escript_ujson.py.out new file mode 100644 index 00000000..e63d37b6 --- /dev/null +++ b/tests/escript/scripts/escript_ujson.py.out @@ -0,0 +1,3 @@ +{ + "foo": "bar" +} diff --git a/tests/escript/scripts/escript5.py b/tests/escript/scripts/escript_xml.py similarity index 99% rename from tests/escript/scripts/escript5.py rename to tests/escript/scripts/escript_xml.py index 1dd85fbb..2df87943 100644 --- a/tests/escript/scripts/escript5.py +++ b/tests/escript/scripts/escript_xml.py @@ -1,5 +1,4 @@ # python3;success - xml_val = """ Belgian Waffles diff --git a/tests/escript/scripts/escript_xml.py.out b/tests/escript/scripts/escript_xml.py.out new file mode 100644 index 00000000..b2ca8f57 --- /dev/null +++ b/tests/escript/scripts/escript_xml.py.out @@ -0,0 +1 @@ +cost of Belgian Waffles is $5.95 with 650 calories diff --git a/tests/escript/scripts/escript_yaml.py b/tests/escript/scripts/escript_yaml.py new file mode 100644 index 00000000..41e2c1c4 --- /dev/null +++ b/tests/escript/scripts/escript_yaml.py @@ -0,0 +1,8 @@ +# python3;success +names_yaml = """ +- 'eric' +- 'justin' +- 'mary-kate' +""" +names = yaml.safe_load(names_yaml) +print(names) \ No newline at end of file diff --git a/tests/escript/scripts/escript_yaml.py.out b/tests/escript/scripts/escript_yaml.py.out new file mode 100644 index 00000000..214270c7 --- /dev/null +++ b/tests/escript/scripts/escript_yaml.py.out @@ -0,0 +1 @@ +['eric', 'justin', 'mary-kate'] diff --git a/tests/escript/scripts/escript6.py b/tests/escript/scripts/escript_z6.py similarity index 100% rename from tests/escript/scripts/escript6.py rename to tests/escript/scripts/escript_z6.py diff --git a/tests/escript/scripts/escript6.py.out b/tests/escript/scripts/escript_z6.py.out similarity index 100% rename from tests/escript/scripts/escript6.py.out rename to tests/escript/scripts/escript_z6.py.out diff --git a/tests/escript/scripts/parallel_escript_py2.py b/tests/escript/scripts/parallel_escript_py2.py new file mode 100644 index 00000000..206d77e8 --- /dev/null +++ b/tests/escript/scripts/parallel_escript_py2.py @@ -0,0 +1,2 @@ +# python2;success;30 +print "just printing..." diff --git a/tests/escript/scripts/parallel_escript_py2.py.out b/tests/escript/scripts/parallel_escript_py2.py.out new file mode 100644 index 00000000..2a12b6bf --- /dev/null +++ b/tests/escript/scripts/parallel_escript_py2.py.out @@ -0,0 +1 @@ +just printing... diff --git a/tests/escript/scripts/parallel_escript_py3.py b/tests/escript/scripts/parallel_escript_py3.py new file mode 100644 index 00000000..0dc92748 --- /dev/null +++ b/tests/escript/scripts/parallel_escript_py3.py @@ -0,0 +1,2 @@ +# python3;success;30 +print("just printing...") diff --git a/tests/escript/scripts/parallel_escript_py3.py.out b/tests/escript/scripts/parallel_escript_py3.py.out new file mode 100644 index 00000000..2a12b6bf --- /dev/null +++ b/tests/escript/scripts/parallel_escript_py3.py.out @@ -0,0 +1 @@ +just printing... diff --git a/tests/escript/test_escript.py b/tests/escript/test_escript.py index d8e5f18c..9d0e9e10 100644 --- a/tests/escript/test_escript.py +++ b/tests/escript/test_escript.py @@ -11,11 +11,16 @@ from calm.dsl.cli import runbooks from calm.dsl.api import get_api_client from calm.dsl.cli.constants import RUNLOG +import json +from calm.dsl.runbooks import read_local_file +from tests.utils import get_escript_language_from_version + +DSL_CONFIG = json.loads(read_local_file(".tests/config.json")) LOG = get_logging_handle(__name__) -RUNBOOK_DSL_FILE_NAME = "dsl_escript_runbook.py" +RUNBOOK_DSL_FILE_NAME_PREFIX = "dsl_escript_runbook" def poll_runlog_status( @@ -70,17 +75,48 @@ def poll_runlog_status( # ) +def get_actual_script_status(client, runlog_uuid, script_name): + """parse given runbook execution status and provide + actual status map + """ + res, err = client.runbook.list_runlogs(runlog_uuid) + if err: + return (RUNLOG.STATUS.FAILURE, err) + response = res.json() + entities = sorted( + response["entities"], key=lambda x: int(x["metadata"]["creation_time"]) + ) + exp_entity = None + for entity in entities: + if script_name in entity.get("status", {}).get("task_reference", {}).get( + "name", "" + ): + exp_entity = entity + break + if not exp_entity: + return (RUNLOG.STATUS.FAILURE, err) + script_status = exp_entity.get("status", {}).get("state", RUNLOG.STATUS.FAILURE) + LOG.info("Checking status and output for escript: {}".format(script_name)) + res, err = client.runbook.runlog_output(runlog_uuid, exp_entity["metadata"]["uuid"]) + response = res.json() + script_output = response["status"]["output_list"][0]["output"] + return (script_status, script_output) + + +@pytest.mark.escript class TestEscript: def setup_class(self): """setup class method""" folder_path = os.path.dirname(os.path.abspath(__file__)) escript_folder = os.path.join(folder_path, "scripts/") escript_files_list = sorted(os.listdir(escript_folder)) - # {'scipt_file_name': (script_content, script_version, script_output, script_pass)} Eg:- {'escript1':('#python2;success\nprint "hi"', 'py3', 'hi\n', True)} - self.script_map = {} + # {'dsl_file_path': (script_name, script_content, script_version, script_output, script_pass)} + # Eg:- {'/path/to/dsl/file.py':('escript1', '#python2;success\nprint "hi"', 'py3', 'hi\n', True)} + self.script_dsl_file_map = {} for script in escript_files_list: if not script.endswith(".py"): continue + script_name = script.split(".")[0] file_path = os.path.join(escript_folder, script) with open(file_path) as fd: first_line = fd.readline() @@ -92,13 +128,14 @@ def setup_class(self): else: script_pass = True if "python3" in first_line.lower() or "py3" in first_line.lower(): - script_version = "py3" + script_version = "static_py3" elif "python2" in first_line.lower() or "py2" in first_line.lower(): - script_version = "py2" + script_version = "static" else: # default to python2 for now, pls fix python scripts - script_version = "py2" + script_version = "static" script_content = first_line + "\n" + script_content + script_language = get_escript_language_from_version(script_version) try: file_path = "{}.out".format(file_path) with open(file_path) as fd: @@ -106,34 +143,46 @@ def setup_class(self): except IOError: # skip output check if no out file in environment script_output = None - self.script_map[script.split(".")[0]] = ( - script_content, - script_version, - script_output, - script_pass, + provider = script.split(".")[0].split("_") + if len(provider) > 1: + provider = provider[1] + else: + provider = None + provider_config = DSL_CONFIG["provider"].get(provider, None) + if provider_config: + for replace_key, replace_item in provider_config.items(): + script_content = script_content.replace(replace_key, replace_item) + # Let's build a DSL file(s) for python scripts in scripts folder + filename = ( + RUNBOOK_DSL_FILE_NAME_PREFIX + str(uuid.uuid4()).split("-")[0] + ".py" ) - # Let's build a DSL file for python scripts in scripts folder - runbook_dsl_file = folder_path + "/dsl_file/" + RUNBOOK_DSL_FILE_NAME - with open(runbook_dsl_file, "w") as fd: - for line in runbook_dsl_input: - if "#scripts" in line: - for val, script in self.script_map.items(): - fd.write("script_{}_{}='''".format(val, script[1])) + runbook_dsl_file = folder_path + "/dsl_file/" + filename + self.script_dsl_file_map[runbook_dsl_file] = ( + script_name, # python(escript) script content + script_content, # python(escript) script content + script_version, # python(escript) script version + script_output, # python(escript) script output + script_pass, # python(escript) script expected status + script_language, # python(escript) script language + ) + with open(runbook_dsl_file, "w") as fd: + for line in runbook_dsl_input: + if "#scripts" in line: + fd.write("script_{}_{}='''".format(script_name, script_version)) fd.write("\n") - fd.write(script[0]) + fd.write(script_content) fd.write("\n") fd.write("'''") fd.write("\n") - elif "#replace_task" in line: - for val, script in self.script_map.items(): - task_ln = ' Task.Exec.escript(name="{}", script=script_{}_{})'.format( - val, val, script[1] + elif "#replace_task" in line: + task_ln = ' Task.Exec.escript{}(name="{}", script=script_{}_{})'.format( + script_language, script_name, script_name, script_version ) fd.write(task_ln) fd.write("\n") - else: - fd.write(line) - fd.write("\n") + else: + fd.write(line) + fd.write("\n") def test_run_escript_via_runbook(self): """ @@ -155,85 +204,97 @@ def test_run_escript_via_runbook(self): Error at line 3> name 'pi' is not defined """ - print(self.script_map) client = get_api_client() - folder_path = os.path.dirname(os.path.abspath(__file__)) - runbook_dsl_file = folder_path + "/dsl_file/" + RUNBOOK_DSL_FILE_NAME - LOG.info("runbook dsl file used {}".format(RUNBOOK_DSL_FILE_NAME)) - runbook_name = "escript_runbook_{}".format(str(uuid.uuid4())[-12:]) - # Create runbook - res = runbooks.create_runbook_command( - runbook_dsl_file, runbook_name, description="", force=True - ) - try: - # Get runbook uuid - runbook_uuid = runbooks.get_runbook(client, runbook_name)["metadata"][ - "uuid" - ] - # Execute runbook - res, err = client.runbook.run(runbook_uuid, {}) - if err: - LOG.info("run: response: {}\n err: {}".format(res, err)) - assert False, "Runbook execution failed" - response = res.json() - LOG.debug(">> Runbook execute response: {}".format(response)) - runlog_uuid = response["status"]["runlog_uuid"] - - # polling till runbook run gets to terminal state - state, reasons = poll_runlog_status( - client, runlog_uuid, RUNLOG.TERMINAL_STATES, maxWait=30 + errors_map = {} + for runbook_dsl_file, script_details in self.script_dsl_file_map.items(): + LOG.info("runbook dsl file used {}".format(runbook_dsl_file)) + runbook_name = "runbook_{}_{}".format( + script_details[0], str(uuid.uuid4())[-12:] ) - LOG.debug(">> Runbook Run state: {}\n{}".format(state, reasons)) - # assert for overall runbook status - if all([val[3] for key, val in self.script_map.items()]): - expected_overall_status = RUNLOG.STATUS.SUCCESS - else: - expected_overall_status = RUNLOG.STATUS.FAILURE - assert state == expected_overall_status, reasons - - # assert for overall task status ans output - res, err = client.runbook.list_runlogs(runlog_uuid) - response = res.json() - entities = sorted( - response["entities"], key=lambda x: int(x["metadata"]["creation_time"]) + errors_map[runbook_name] = [] + # Create runbook + res = runbooks.create_runbook_command( + runbook_dsl_file, runbook_name, description="", force=True ) - for entity in entities: - script_name = ( - entity.get("status", {}).get("task_reference", {}).get("name", "") - ) - script_status = entity.get("status", {}).get( - "state", RUNLOG.STATUS.FAILURE - ) - if script_name not in self.script_map.keys(): - continue - LOG.info( - "Checking status and output for escript: {}".format(script_name) - ) - res, err = client.runbook.runlog_output( - runlog_uuid, entity["metadata"]["uuid"] - ) + try: + # Get runbook uuid + runbook_uuid = runbooks.get_runbook(client, runbook_name)["metadata"][ + "uuid" + ] + # Execute runbook + res, err = client.runbook.run(runbook_uuid, {}) + if err: + LOG.info("run: response: {}\n err: {}".format(res, err)) + assert False, "Runbook execution failed" response = res.json() - task_output = response["status"]["output_list"][0]["output"] - assert ( - self.script_map[script_name][2] == task_output - ), "Script: {}\nExpected output: {}\nActual output:{}".format( - script_name, self.script_map[script_name][2], task_output + LOG.debug(">> Runbook execute response: {}".format(response)) + runlog_uuid = response["status"]["runlog_uuid"] + + # polling till runbook run gets to terminal state + state, reasons = poll_runlog_status( + client, runlog_uuid, RUNLOG.TERMINAL_STATES, maxWait=30 ) - expected_status = ( - RUNLOG.STATUS.SUCCESS - if self.script_map[script_name][3] - else RUNLOG.STATUS.FAILURE + LOG.debug(">> Runbook Run state: {}\n{}".format(state, reasons)) + # assert for overall runbook status + if script_details[4]: + expected_overall_status = [RUNLOG.STATUS.SUCCESS] + else: + expected_overall_status = [ + RUNLOG.STATUS.FAILURE, + RUNLOG.STATUS.ERROR, + ] + + # check for overall status + if state not in expected_overall_status: + err_msg = """Runbook is in unexpected state + Expected: {} + Actual: {} + """.format( + expected_overall_status, state + ) + if reasons: + err_msg += "due to {}".format(reasons) + errors_map[runbook_name].append(err_msg) + actual_script_status, actual_script_output = get_actual_script_status( + client, runlog_uuid, script_details[0] ) - assert ( - expected_status == script_status - ), "Script: {}\nExpected status:{}\nActual status: {}".format( - script_name, expected_status, script_status + # check for script output + if script_details[3] != actual_script_output: + err_msg = """Mismatch in Output: + Expected output: {} + Actual output: {} + """.format( + script_details[3], actual_script_output + ) + errors_map[runbook_name].append(err_msg) + # check for script status + if actual_script_status not in expected_overall_status: + err_msg = """Mismatch in Status: + Expected status: {} + Actual status: {} + """.format( + expected_overall_status, actual_script_status + ) + errors_map[runbook_name].append(err_msg) + # delete runbook when no failures + if not errors_map[runbook_name]: + runbooks.delete_runbook([runbook_name]) + else: + LOG.error("runbook: {} encountered error".format(runbook_name)) + except Exception as error: + LOG.info( + "Got below exception during test for runbook: {} created using dsl file: {}\n Exception: {}".format( + runbook_name, runbook_dsl_file, error + ) ) - if expected_status != RUNLOG.STATUS.SUCCESS: - # once task failed, next task won't be executed, so skip - break - except Exception as error: - LOG.info("Got exception during test: {}".format(error)) - assert False, error - finally: - runbooks.delete_runbook([runbook_name]) + errors_map[runbook_name].append("Got exception: {}".format(error)) + if any(errors_map.values()): + LOG.info("Got below exception during test") + for runbook, error in errors_map.items(): + if error: + LOG.error( + "\nErrors of runbook: {} can be found below".format(runbook) + ) + for err in error: + LOG.error("{}".format(err)) + assert False, errors_map diff --git a/tests/escript/test_parallel_escript.py b/tests/escript/test_parallel_escript.py new file mode 100644 index 00000000..3b6c41c2 --- /dev/null +++ b/tests/escript/test_parallel_escript.py @@ -0,0 +1,211 @@ +# escript test +import uuid +import pytest +import os +import json + + +from calm.dsl.log import get_logging_handle +from calm.dsl.cli import runbooks +from calm.dsl.api import get_api_client +from calm.dsl.cli.constants import RUNLOG +from calm.dsl.runbooks import read_local_file +from tests.utils import poll_runlog_status, get_escript_language_from_version + + +LOG = get_logging_handle(__name__) + +RUNBOOK_DSL_FILE_NAME_PREFIX = "dsl_escript_parallel_runbook" + +ESCRIPT_BASE_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "scripts/") + +DEFAULT_ESCRIPT_TIMEOUT = 60 + +# create DSL file on the fly +runbook_dsl_input = [ + "from calm.dsl.runbooks import runbook, runbook_json, parallel, branch", + "from calm.dsl.runbooks import RunbookTask as Task", + "", + "@runbook", + "def SampleRunbook():", + " with parallel() as p:", + "#replace_task", + "", +] + + +def dsl_file_udpate( + runbook_dsl_file, + escript, + escript_version="static", + parallel_count=10, +): + escript_name = escript.split(".")[0] + escript_language = get_escript_language_from_version(escript_version) + with open(runbook_dsl_file, "w") as fd: + for line in runbook_dsl_input: + if "#replace_task" in line: + for i in range(parallel_count): + task_ln = ' with branch(p):\n Task.Exec.escript{}(name="{}_{}",filename="{}")'.format( + escript_language, + escript_name, + i + 1, + os.path.join(ESCRIPT_BASE_PATH, escript), + ) + fd.write(task_ln) + fd.write("\n") + else: + fd.write(line) + fd.write("\n") + + +def get_escript_version_status(escript): + with open(os.path.join(ESCRIPT_BASE_PATH, escript)) as fd: + first_line = fd.readline() + if "success" in first_line.lower(): + script_pass = RUNLOG.STATUS.SUCCESS + elif "failure" in first_line.lower(): + script_pass = RUNLOG.STATUS.FAILURE + else: + script_pass = RUNLOG.STATUS.SUCCESS + if "python3" in first_line.lower() or "py3" in first_line.lower(): + script_version = "static_py3" + elif "python2" in first_line.lower() or "py2" in first_line.lower(): + script_version = "static" + else: + # default to python2 for now, pls fix python scripts + script_version = "static" + try: + script_timeout = int(first_line.split(";")[2]) + except IndexError: + script_timeout = DEFAULT_ESCRIPT_TIMEOUT + return (script_version, script_pass, script_timeout) + + +@pytest.mark.escript +class TestEscript: + @pytest.mark.parametrize( + "escript, parallel_count", + [ + pytest.param("parallel_escript_py3.py", 60), + pytest.param("parallel_escript_py2.py", 60), + ], + ) + def test_run_parallel_escript_via_runbook(self, escript, parallel_count): + """ + Test run escript with parallel task + """ + errors_map = {} + client = get_api_client() + folder_path = os.path.dirname(os.path.abspath(__file__)) + runbook_dsl_file = os.path.join( + folder_path, + "dsl_file/", + "{}_{}".format(RUNBOOK_DSL_FILE_NAME_PREFIX, escript), + ) + # Let's build a DSL file for python scripts in scripts folder + ( + script_version, + expected_script_status, + script_timeout, + ) = get_escript_version_status(escript) + dsl_file_udpate( + runbook_dsl_file, + escript, + escript_version=script_version, + parallel_count=parallel_count, + ) + LOG.info("runbook dsl file used {}".format(runbook_dsl_file)) + runbook_name = "escript_parallel_runbook_{}".format(str(uuid.uuid4())[-12:]) + # get script output + try: + file_path = "{}.out".format(os.path.join(ESCRIPT_BASE_PATH, escript)) + with open(file_path) as fd: + exp_script_output = fd.read() + except IOError: + # skip output check if no out file in environment + exp_script_output = None + # Create runbook + res = runbooks.create_runbook_command( + runbook_dsl_file, runbook_name, description="", force=True + ) + try: + # Get runbook uuid + runbook_uuid = runbooks.get_runbook(client, runbook_name)["metadata"][ + "uuid" + ] + # Execute runbook + res, err = client.runbook.run(runbook_uuid, {}) + if err: + LOG.info("run: response: {}\n err: {}".format(res, err)) + assert False, "Runbook execution failed" + response = res.json() + LOG.debug(">> Runbook execute response: {}".format(response)) + runlog_uuid = response["status"]["runlog_uuid"] + + # polling till runbook run gets to terminal state + state, reasons = poll_runlog_status( + client, + runlog_uuid, + RUNLOG.TERMINAL_STATES, + maxWait=script_timeout + parallel_count, + ) + LOG.debug(">> Runbook Run state: {}\n{}".format(state, reasons)) + assert state == expected_script_status, reasons + + # assert for overall task status ans output + res, err = client.runbook.list_runlogs(runlog_uuid) + response = res.json() + entities = sorted( + response["entities"], key=lambda x: int(x["metadata"]["creation_time"]) + ) + for entity in entities: + script_name = ( + entity.get("status", {}).get("task_reference", {}).get("name", "") + ) + script_status = entity.get("status", {}).get( + "state", RUNLOG.STATUS.FAILURE + ) + if escript.split(".")[0] not in script_name: + continue + else: + errors_map[script_name] = [] + LOG.info( + "Checking status and output for escript: {}".format(script_name) + ) + res, err = client.runbook.runlog_output( + runlog_uuid, entity["metadata"]["uuid"] + ) + response = res.json() + actual_output = response["status"]["output_list"][0]["output"] + if expected_script_status != script_status: + err_msg = """Mismatch in status for script: {} + Expected status: {} + Actual status: {} + """.format( + script_name, expected_script_status, script_status + ) + errors_map[script_name].append(err_msg) + if exp_script_output and actual_output != exp_script_output: + err_msg = """Mismatch in output for script: {} + Expected output: {} + Actual output: {} + """.format( + script_name, exp_script_output, actual_output + ) + errors_map[script_name].append(err_msg) + if any(errors_map.values()): + LOG.info("Got below exception during test") + for escript, error in errors_map.items(): + if error: + LOG.error( + "\nErrors of escript: {} can be found below".format(escript) + ) + for err in error: + LOG.error("{}".format(err)) + assert False, errors_map + else: + client.runbook.delete(runbook_uuid) + except Exception as error: + LOG.info("Got exception during test: {}".format(error)) + assert False, error diff --git a/tests/multivm_migrate/specs/VM1_create_spec_editables.yaml b/tests/multivm_migrate/specs/VM1_create_spec_editables.yaml new file mode 100644 index 00000000..ebe048f6 --- /dev/null +++ b/tests/multivm_migrate/specs/VM1_create_spec_editables.yaml @@ -0,0 +1,16 @@ +name: false +resources: + disk_list: + '0': + data_source_reference: false + device_properties: + device_type: false + disk_address: + adapter_type: false + guest_customization: false + memory_size_mib: false + nic_list: + '0': + subnet_reference: false + num_sockets: false + num_vcpus_per_socket: false diff --git a/tests/multivm_migrate/specs/test_bp_python2_tasks_at_all_levels.py b/tests/multivm_migrate/specs/test_bp_python2_tasks_at_all_levels.py new file mode 100644 index 00000000..6f5531b6 --- /dev/null +++ b/tests/multivm_migrate/specs/test_bp_python2_tasks_at_all_levels.py @@ -0,0 +1,240 @@ +# THIS FILE IS AUTOMATICALLY GENERATED. +# Disclaimer: Please test this file before using in production. +""" +Generated blueprint DSL (.py) +""" + +import json # no_qa +import os # no_qa + +from calm.dsl.builtins import * # no_qa +from calm.dsl.constants import PROVIDER + +DSL_CONFIG = json.loads(read_local_file(".tests/config.json")) +CENTOS_HM = DSL_CONFIG["AHV"]["IMAGES"]["DISK"]["CENTOS_HADOOP_MASTER"] +PROJECT = DSL_CONFIG["PROJECTS"]["PROJECT1"] +PROJECT_NAME = PROJECT["NAME"] +ENV_NAME = PROJECT["ENVIRONMENTS"][0]["NAME"] + +# Credentials +BP_CRED_akhil_cred = basic_cred( + "root", + "nutanix/4u", + name="akhil_cred", + type="PASSWORD", + default=True, +) + + +class Service1(Service): + + service_var = CalmVariable.WithOptions.FromTask( + CalmTask.Exec.escript.py2( + name="", + script='print "akhil"', + ), + label="", + is_mandatory=False, + is_hidden=False, + description="", + ) + + @action + def service_action(): + + profile_var = CalmVariable.Simple( + "", + label="", + is_mandatory=False, + is_hidden=False, + runtime=False, + description="", + ) + CalmTask.Exec.escript.py2( + name="service_action_task", + script='print "@@{service_var}@@"', + target=ref(Service1), + ) + + +class testvmcalm_random_hashResources(AhvVmResources): + + memory = 1 + vCPUs = 1 + cores_per_vCPU = 1 + disks = [AhvVmDisk.Disk.Scsi.cloneFromImageService(CENTOS_HM, bootable=True)] + nics = [AhvVmNic.NormalNic.ingress(PROVIDER.AHV.VLAN_1211)] + + guest_customization = AhvVmGC.CloudInit( + filename="testvmcalm_random_hash_cloud_init_data.yaml" + ) + + +class testvmcalm_random_hash(AhvVm): + + name = "testvm-@@{calm_random_hash}@@" + resources = testvmcalm_random_hashResources + + categories = {"AppFamily": "Backup"} + + +class Service_VM(Substrate): + + account = Ref.Account("NTNX_LOCAL_AZ") + os_type = "Linux" + provider_type = "AHV_VM" + provider_spec = testvmcalm_random_hash + provider_spec_editables = read_spec("VM1_create_spec_editables.yaml") + readiness_probe = readiness_probe( + connection_type="SSH", + disabled=False, + retries="5", + connection_port=22, + address="@@{platform.status.resources.nic_list[0].ip_endpoint_list[0].ip}@@", + delay_secs="60", + credential=ref(BP_CRED_akhil_cred), + ) + + @action + def __pre_create__(): + + CalmTask.Exec.escript.py2( + name="Python 2 precreate", + script='print "precreate"', + target=ref(Service_VM), + ) + + @action + def post_action_create(): + + CalmTask.Exec.escript.py2( + name="python 2 post create", + script='print "post create"', + target=ref(Service_VM), + ) + + @action + def __post_delete__(): + + CalmTask.SetVariable.escript.py2( + name="python 2 set var", + script='print "var=postdelete"', + target=ref(Service_VM), + variables=["var"], + ) + + CalmTask.Exec.escript.py2( + name="python 2 post delete", + script='print "@@{var}@@"', + target=ref(Service_VM), + ) + + +class Package1(Package): + + services = [ref(Service1)] + + @action + def __install__(): + + CalmTask.Exec.escript.py2( + name="python2 package install", + script='print "package_install"', + target=ref(Service1), + ) + + @action + def __uninstall__(): + + CalmTask.SetVariable.escript.py2( + name="python2 package uninstall set var", + script='print "pack_var=package_unainsta"', + target=ref(Service1), + variables=["pack_var"], + ) + + CalmTask.Exec.escript.py2( + name="python 2 package uninstall read", + script='print "@@{pack_var}@@"', + target=ref(Service1), + ) + + +class _8488e0af_deployment(Deployment): + + name = "8488e0af_deployment" + min_replicas = "1" + max_replicas = "1" + default_replicas = "1" + + packages = [ref(Package1)] + substrate = ref(Service_VM) + + +class Default(Profile): + + environments = [Ref.Environment(name=ENV_NAME)] + deployments = [_8488e0af_deployment] + + profile_level_var = CalmVariable.WithOptions.FromTask( + CalmTask.Exec.escript.py2(name="", script='print "profile level var"'), + label="", + is_mandatory=False, + is_hidden=False, + description="", + ) + + @action + def pythonaction(name="python action"): + + profile_level_action_var = CalmVariable.WithOptions.FromTask( + CalmTask.Exec.escript.py2( + name="", script='print "profile_level_action_var"' + ), + label="", + is_mandatory=False, + is_hidden=False, + description="", + ) + with parallel() as p0: + with branch(p0): + CalmTask.Exec.escript.py2( + name="read_profile_level_var", + script='print "@@{profile_level_var}@@"', + target=ref(Service1), + ) + + CalmTask.Exec.escript.py2( + name="read profile level action var", + script='print "@@{profile_level_action_var}@@"', + target=ref(Service1), + ) + + Service1.service_action(name="read service action") + + CalmTask.SetVariable.escript.py2( + name="set var in profile level", + script='print "profile_var=akhil"', + target=ref(Service1), + variables=["profile_var"], + ) + + CalmTask.Exec.escript.py2( + name="read set var in profile level", + script='print "@@{profile_var}@@"', + target=ref(Service1), + ) + + +class multi_vm_migrate_blueprint(Blueprint): + + services = [Service1] + packages = [Package1] + substrates = [Service_VM] + profiles = [Default] + credentials = [BP_CRED_akhil_cred] + + +class BpMetadata(Metadata): + + project = Ref.Project(PROJECT_NAME) diff --git a/tests/multivm_migrate/specs/testvmcalm_random_hash_cloud_init_data.yaml b/tests/multivm_migrate/specs/testvmcalm_random_hash_cloud_init_data.yaml new file mode 100644 index 00000000..5a365b41 --- /dev/null +++ b/tests/multivm_migrate/specs/testvmcalm_random_hash_cloud_init_data.yaml @@ -0,0 +1,5 @@ +|- +#cloud-config +package_upgrade: true + - vim + - git diff --git a/tests/multivm_migrate/specs/unicode_name_tasks_blueprint.py b/tests/multivm_migrate/specs/unicode_name_tasks_blueprint.py new file mode 100644 index 00000000..e5881b65 --- /dev/null +++ b/tests/multivm_migrate/specs/unicode_name_tasks_blueprint.py @@ -0,0 +1,228 @@ +# THIS FILE IS AUTOMATICALLY GENERATED. +# Disclaimer: Please test this file before using in production. +""" +Generated blueprint DSL (.py) +""" + +import json # no_qa +import os # no_qa + +from calm.dsl.builtins import * # no_qa +from calm.dsl.constants import PROVIDER + +DSL_CONFIG = json.loads(read_local_file(".tests/config.json")) +CENTOS_HM = DSL_CONFIG["AHV"]["IMAGES"]["DISK"]["CENTOS_HADOOP_MASTER"] +PROJECT = DSL_CONFIG["PROJECTS"]["PROJECT1"] +PROJECT_NAME = PROJECT["NAME"] +ENV_NAME = PROJECT["ENVIRONMENTS"][0]["NAME"] + +# Credentials +BP_CRED_akhil_cred = basic_cred( + "root", + "nutanix/4u", + name="akhil_cred", + type="PASSWORD", + default=True, +) + + +class Service1(Service): + + service_var = CalmVariable.WithOptions.FromTask( + CalmTask.Exec.escript.py2(name="", script='print "平仮名"'), + label="", + is_mandatory=False, + is_hidden=False, + description="", + ) + + @action + def service_action(name="片仮名 service_action"): + + profile_var = CalmVariable.Simple( + "", + label="", + is_mandatory=False, + is_hidden=False, + runtime=False, + description="", + ) + CalmTask.Exec.escript.py2( + name="片仮名 service_action_task", + script='print "@@{service_var}@@"', + target=ref(Service1), + ) + + +class testvmcalm_random_hashResources(AhvVmResources): + + memory = 1 + vCPUs = 1 + cores_per_vCPU = 1 + disks = [AhvVmDisk.Disk.Scsi.cloneFromImageService(CENTOS_HM, bootable=True)] + nics = [AhvVmNic.NormalNic.ingress(PROVIDER.AHV.VLAN_1211)] + + guest_customization = AhvVmGC.CloudInit( + filename="testvmcalm_random_hash_cloud_init_data.yaml" + ) + + +class testvmcalm_random_hash(AhvVm): + + name = "testvm-@@{calm_random_hash}@@" + resources = testvmcalm_random_hashResources + + categories = {"AppFamily": "Backup"} + + +class Service_VM(Substrate): + + account = Ref.Account("NTNX_LOCAL_AZ") + os_type = "Linux" + provider_type = "AHV_VM" + provider_spec = testvmcalm_random_hash + provider_spec_editables = read_spec("VM1_create_spec_editables.yaml") + readiness_probe = readiness_probe( + connection_type="SSH", + disabled=False, + retries="5", + connection_port=22, + address="@@{platform.status.resources.nic_list[0].ip_endpoint_list[0].ip}@@", + delay_secs="60", + credential=ref(BP_CRED_akhil_cred), + ) + + @action + def __pre_create__(): + + CalmTask.Exec.escript.py2( + name="Python 2 precreate 平仮名", + script='print "precreate"', + target=ref(Service_VM), + ) + + @action + def __post_delete__(): + + CalmTask.SetVariable.escript.py2( + name="片仮名 python 2 set var", + script='print "var=postdelete片仮名"', + target=ref(Service_VM), + variables=["var"], + ) + + CalmTask.Exec.escript.py2( + name="片仮名 python 2 post delete", + script='print "@@{var}@@"', + target=ref(Service_VM), + ) + + +class Package1(Package): + + services = [ref(Service1)] + + @action + def __install__(): + + CalmTask.Exec.escript.py2( + name="python2 package install", + script='print "package_install"', + target=ref(Service1), + ) + + @action + def __uninstall__(): + + CalmTask.SetVariable.escript.py2( + name="片仮名 python2 package uninstall set var", + script='print "pack_var=package_unainsta片仮名"', + target=ref(Service1), + variables=["pack_var"], + ) + + CalmTask.Exec.escript.py2( + name="片仮名 python 2 package uninstall read", + script='print "@@{pack_var}@@"', + target=ref(Service1), + ) + + +class _8488e0af_deployment(Deployment): + + name = "8488e0af_deployment" + min_replicas = "1" + max_replicas = "1" + default_replicas = "1" + + packages = [ref(Package1)] + substrate = ref(Service_VM) + + +class Default(Profile): + + environments = [Ref.Environment(name=ENV_NAME)] + deployments = [_8488e0af_deployment] + + profile_level_var = CalmVariable.WithOptions.FromTask( + CalmTask.Exec.escript.py2(name="", script='print "profile level var"'), + label="", + is_mandatory=False, + is_hidden=False, + description="", + ) + + @action + def pythonaction(name="片仮名 片仮名 python action"): + + profile_level_action_var = CalmVariable.WithOptions.FromTask( + CalmTask.Exec.escript.py2( + name="", + script='print "profile_level_action_var"', + ), + label="", + is_mandatory=False, + is_hidden=False, + description="", + ) + CalmTask.Exec.escript.py2( + name="片仮名read_profile_level_var", + script='print "@@{profile_level_var}@@"', + target=ref(Service1), + ) + + CalmTask.Exec.escript.py2( + name="片仮名 read profile level action var", + script='print "@@{profile_level_action_var}@@"', + target=ref(Service1), + ) + + Service1.service_action(name="片仮名 read service action") + + CalmTask.SetVariable.escript.py2( + name="片仮名 set var in profile level", + script='print "profile_var=片仮名"', + target=ref(Service1), + variables=["profile_var"], + ) + + CalmTask.Exec.escript.py2( + name="片仮名 read set var in profile level", + script='print "@@{profile_var}@@"', + target=ref(Service1), + ) + + +class Test_Multi_VM_MIGRATE1697172982(Blueprint): + """'Test Multi VM Migrate'""" + + services = [Service1] + packages = [Package1] + substrates = [Service_VM] + profiles = [Default] + credentials = [BP_CRED_akhil_cred] + + +class BpMetadata(Metadata): + + project = Ref.Project(PROJECT_NAME) diff --git a/tests/multivm_migrate/specs/variable_list_params.py b/tests/multivm_migrate/specs/variable_list_params.py new file mode 100644 index 00000000..9f7fb01b --- /dev/null +++ b/tests/multivm_migrate/specs/variable_list_params.py @@ -0,0 +1,13 @@ +variable_list = [ + { + "value": {"value": "profile_level_var"}, + "context": "Default", + "name": "profile_level_var", + }, + {"value": {"value": "akhil"}, "context": "Service1", "name": "service_var"}, + { + "value": {"value": "profile_level_action_var"}, + "context": "python action", + "name": "profile_level_action_var", + }, +] diff --git a/tests/multivm_migrate/test_multivm_migrate.py b/tests/multivm_migrate/test_multivm_migrate.py new file mode 100644 index 00000000..25f1afa6 --- /dev/null +++ b/tests/multivm_migrate/test_multivm_migrate.py @@ -0,0 +1,589 @@ +import os +import pytest +import re +import time +import json +import sys +import uuid +import traceback +import subprocess +import filecmp +from click.testing import CliRunner + +from calm.dsl.cli import main as cli +from calm.dsl.cli.main import get_api_client +from calm.dsl.log import get_logging_handle +from calm.dsl.builtins import read_local_file +from tests.utils import Application as ApplicationHelper +from calm.dsl.cli.constants import APPLICATION + +LOG = get_logging_handle(__name__) +FOLDER_PATH = os.path.dirname(__file__) + +# Things to test +# Marketplace multi VM migrate. +# Unicode. +# Audit logs +# Invalid scenarios. +# API validations. +# Add new task shell/powershell/escript +# # Proifle level var secret. + +DSL_CONFIG = json.loads(read_local_file(".tests/config.json")) +# projects +PROJECT = DSL_CONFIG["PROJECTS"]["PROJECT1"] +PROJECT_NAME = PROJECT["NAME"] + + +class TestMultiVmMigrate: + app_helper = ApplicationHelper() + + @pytest.mark.parametrize( + "test_file", + [ + { + "DSL_BP_FILEPATH": "specs/test_bp_python2_tasks_at_all_levels.py", + "DSL_LAUNCH_PARAMS": "specs/variable_list_params.py", + "action_to_run": ["python action"], + }, + { + "DSL_BP_FILEPATH": "specs/unicode_name_tasks_blueprint.py", + "DSL_LAUNCH_PARAMS": "specs/variable_list_params.py", + "action_to_run": ["片仮名 片仮名 python action"], + }, + ], + ) + def test_multivm_app_migrate(self, test_file): + err_msg = [] + # Creating the blueprint with Python 2 tasks. + client = get_api_client() + runner = CliRunner() + self.created_dsl_bp_name = "Test_Multi_VM_MIGRATE {}".format(int(time.time())) + LOG.info( + "Creating Bp {} using file {}".format( + self.created_dsl_bp_name, test_file["DSL_BP_FILEPATH"] + ) + ) + result = runner.invoke( + cli, + [ + "create", + "bp", + "--file={}".format( + os.path.join(FOLDER_PATH, test_file["DSL_BP_FILEPATH"]) + ), + "--name={}".format(self.created_dsl_bp_name), + "--description='Test Multi VM Migrate'", + ], + ) + if result.exit_code: + cli_res_dict = {"Output": result.output, "Exception": str(result.exception)} + LOG.debug( + "Cli Response: {}".format( + json.dumps(cli_res_dict, indent=4, separators=(",", ": ")) + ) + ) + LOG.debug( + "Traceback: \n{}".format( + "".join(traceback.format_tb(result.exc_info[2])) + ) + ) + pytest.fail("Stage 1: BP creation from python file failed") + LOG.info("Stage 1: BP creation Success") + + if ( + test_file["DSL_BP_FILEPATH"] + == "specs/test_bp_python2_tasks_at_all_levels.py" + ): + + self.marketplace_bp_name = "Test_Multi_VM_MIGRATE MPI {}".format( + int(time.time()) + ) + self.mpi1_version = "1.0.0" + + # Publish Bp to marketplace manager as new marketplace blueprint + LOG.info( + "Publishing Bp {} as new marketplace blueprint {}".format( + self.created_dsl_bp_name, self.marketplace_bp_name + ) + ) + command = [ + "publish", + "bp", + self.created_dsl_bp_name, + "--version", + self.mpi1_version, + "--name", + self.marketplace_bp_name, + "--with_secrets", + ] + runner = CliRunner() + + result = runner.invoke(cli, command) + if result.exit_code: + cli_res_dict = { + "Output": result.output, + "Exception": str(result.exception), + } + LOG.debug( + "Cli Response: {}".format( + json.dumps(cli_res_dict, indent=4, separators=(",", ": ")) + ) + ) + LOG.debug( + "Traceback: \n{}".format( + "".join(traceback.format_tb(result.exc_info[2])) + ) + ) + pytest.fail( + "Stage2: Publishing of marketplace blueprint as new marketplace item failed" + ) + LOG.info("Success") + + # Approve the blueprint + LOG.info( + "Approving marketplace blueprint {} with version {}".format( + self.marketplace_bp_name, self.mpi1_version + ) + ) + command = [ + "approve", + "marketplace", + "bp", + self.marketplace_bp_name, + "--version", + self.mpi1_version, + ] + + result = runner.invoke(cli, command) + if result.exit_code: + cli_res_dict = { + "Output": result.output, + "Exception": str(result.exception), + } + LOG.debug( + "Cli Response: {}".format( + json.dumps(cli_res_dict, indent=4, separators=(",", ": ")) + ) + ) + LOG.debug( + "Traceback: \n{}".format( + "".join(traceback.format_tb(result.exc_info[2])) + ) + ) + pytest.fail("Stage2: Approving of marketplace blueprint failed") + LOG.info("Success") + + # Publish blueprint to marketplace + LOG.info( + "Publishing marketplace blueprint {} with version {} to marketplace".format( + self.marketplace_bp_name, self.mpi1_version + ) + ) + command = [ + "publish", + "marketplace", + "bp", + self.marketplace_bp_name, + "--version", + self.mpi1_version, + "--project", + PROJECT_NAME, + ] + + result = runner.invoke(cli, command) + if result.exit_code: + cli_res_dict = { + "Output": result.output, + "Exception": str(result.exception), + } + LOG.debug( + "Cli Response: {}".format( + json.dumps(cli_res_dict, indent=4, separators=(",", ": ")) + ) + ) + LOG.debug( + "Traceback: \n{}".format( + "".join(traceback.format_tb(result.exc_info[2])) + ) + ) + pytest.fail( + "Stage2: Publishing of marketplace blueprint to marketplace failed" + ) + LOG.info("Success") + + # Launching the bp in PUBLISHED state(Marketplace Item) with launch_params + self.created_app_name = "Test_MPI_APP_LP_{}".format(str(uuid.uuid4())[-10:]) + LOG.info( + "Launching Marketplace Item {} with version {} with launch_params".format( + self.marketplace_bp_name, self.mpi1_version + ) + ) + command = [ + "launch", + "marketplace", + "item", + self.marketplace_bp_name, + "--version", + self.mpi1_version, + "--project", + PROJECT_NAME, + "--app_name", + self.created_app_name, + "--profile_name", + "Default", + "--launch_params={}".format( + os.path.join(FOLDER_PATH, test_file["DSL_LAUNCH_PARAMS"]) + ), + ] + runner = CliRunner() + + result = runner.invoke(cli, command) + if result.exit_code: + cli_res_dict = { + "Output": result.output, + "Exception": str(result.exception), + } + LOG.debug( + "Cli Response: {}".format( + json.dumps(cli_res_dict, indent=4, separators=(",", ": ")) + ) + ) + LOG.debug( + "Traceback: \n{}".format( + "".join(traceback.format_tb(result.exc_info[2])) + ) + ) + pytest.fail( + "Stage2: Launching of marketplace blueprint in PUBLISHED state failed" + ) + LOG.info("Success") + + else: + # Application create + self.created_app_name = "MultiVM__Migrate {}".format( + self.created_dsl_bp_name + ) + LOG.info("Launching Bp {}".format(self.created_dsl_bp_name)) + result = runner.invoke( + cli, + [ + "launch", + "bp", + self.created_dsl_bp_name, + "--app_name={}".format(self.created_app_name), + "--launch_params={}".format( + os.path.join(FOLDER_PATH, test_file["DSL_LAUNCH_PARAMS"]) + ), + ], + ) + if result.exit_code: + cli_res_dict = { + "Output": result.output, + "Exception": str(result.exception), + } + LOG.debug( + "Cli Response: {}".format( + json.dumps(cli_res_dict, indent=4, separators=(",", ": ")) + ) + ) + LOG.debug( + "Traceback: \n{}".format( + "".join(traceback.format_tb(result.exc_info[2])) + ) + ) + pytest.fail("Stage 2: Blueprint launch failed") + self.app_helper._wait_for_non_busy_state(self.created_app_name) + result = runner.invoke( + cli, ["describe", "app", self.created_app_name, "--out=json"] + ) + app_data = json.loads(result.output) + state = app_data["status"]["state"] + if state not in APPLICATION.STATES.RUNNING: + pytest.fail( + f"Stage 2: Application is not in Running state, current state {state}" + ) + LOG.info("Stage 2: App creation/launch Success") + + # MultiVM Application describe + LOG.info("Describing the application {}".format(self.created_app_name)) + result = runner.invoke( + cli, ["describe", "app-migratable-entities", self.created_app_name] + ) + if result.exit_code: + cli_res_dict = {"Output": result.output, "Exception": str(result.exception)} + LOG.debug( + "Cli Response: {}".format( + json.dumps(cli_res_dict, indent=4, separators=(",", ": ")) + ) + ) + LOG.debug( + "Traceback: \n{}".format( + "".join(traceback.format_tb(result.exc_info[2])) + ) + ) + err_msg.append( + "Stage 3: App migratable entities describe command failed for app {}".format( + self.created_app_name + ) + ) + + # Application decompile + LOG.info("Decompiling the migratable entities {}".format(self.created_app_name)) + result = runner.invoke( + cli, ["decompile", "app-migratable-bp", "{}".format(self.created_app_name)] + ) + if result.exit_code: + cli_res_dict = {"Output": result.output, "Exception": str(result.exception)} + LOG.debug( + "Cli Response: {}".format( + json.dumps(cli_res_dict, indent=4, separators=(",", ": ")) + ) + ) + LOG.debug( + "Traceback: \n{}".format( + "".join(traceback.format_tb(result.exc_info[2])) + ) + ) + pytest.fail("Stage 3: App decompile failed") + LOG.info("Stage 3: App decompile Success") + + # Validating the folder creation and files creation + folder_name = "{}".format( + str( + app_data["status"]["resources"]["app_blueprint_config_reference"][ + "name" + ] + ) + .replace(" ", "") + .replace("-", "") + ) + isExist = os.path.exists(folder_name) + if not isExist: + pytest.fail( + "Stage 4: {} folder is not created after decompile app-migratable-bp ".format( + folder_name + ) + ) + LOG.info("Stage 4: Folder with application name is present post decompile app") + + # Check all the required files created.. + required_files = ["blueprint.py", "scripts"] + for file in required_files: + file_path = os.path.join(folder_name, file) + isExist = os.path.exists(file_path) + + if not isExist: + pytest.fail( + "Required file {} is not created in folder {} after decompile app-migratable-bp".format( + file, folder_name + ) + ) + + if file == "scripts": + if os.listdir(file_path) == []: + pytest.fail( + "Scripts folder is empty after decompiling the app {}".format( + self.created_app_name + ) + ) + + try: + LOG.info("Futurizing the python 2 scripts to Python 3") + + # Futurize works for simple python scripts + subprocess.run( + "futurize --stage1 -w --nobackups {}/*.py".format(file_path), + shell=True, + ) + except subprocess.CalledProcessError as e: + pytest.fail(f"Command {e.cmd} failed with error {e.returncode}") + + # After futurize, removing the additional import added + for script_file in os.listdir(file_path): + script_file_path = os.path.join(file_path, script_file) + with open(script_file_path, "r") as script_file: + data = script_file.read() + data = data.replace( + "from __future__ import print_function", "#script" + ) + + with open(script_file_path, "w+") as script_file: + script_file.write(data) + + elif file == "blueprint.py": + with open(file_path, "r") as blueprint_file: + data = blueprint_file.read() + data = data.replace("py2", "py3") + with open(file_path, "w+") as blueprint_file: + blueprint_file.write(data) + LOG.info("Change of function calls from py2 to py3 is done") + + LOG.info( + "Stage 5: Decompile app-migratable-bp: Success. Required Files {} created".format( + ", ".join(required_files) + ) + ) + + # Renaming the folder + folder_original = "{}_original".format(folder_name) + os.rename(folder_name, folder_original) + + # Application Update + LOG.info("Updating the migratable entity {}".format(self.created_app_name)) + result = runner.invoke( + cli, + [ + "update", + "app-migratable-bp", + self.created_app_name, + "--file={}".format(folder_original + "/blueprint.py"), + ], + ) + if result.exit_code: + cli_res_dict = {"Output": result.output, "Exception": str(result.exception)} + LOG.debug( + "Cli Response: {}".format( + json.dumps(cli_res_dict, indent=4, separators=(",", ": ")) + ) + ) + LOG.debug( + "Traceback: \n{}".format( + "".join(traceback.format_tb(result.exc_info[2])) + ) + ) + pytest.fail("Stage 6: App update failed") + self.app_helper._wait_for_non_busy_state(self.created_app_name) + LOG.info("Stage 6: App update Success") + + # Validate the audit log update + + # Application re decompile + LOG.info( + "Re decompiling the migratable entities {}".format(self.created_app_name) + ) + result = runner.invoke( + cli, ["decompile", "app-migratable-bp", self.created_app_name] + ) + if result.exit_code: + cli_res_dict = {"Output": result.output, "Exception": str(result.exception)} + LOG.debug( + "Cli Response: {}".format( + json.dumps(cli_res_dict, indent=4, separators=(",", ": ")) + ) + ) + LOG.debug( + "Traceback: \n{}".format( + "".join(traceback.format_tb(result.exc_info[2])) + ) + ) + pytest.fail("Stage 7: App Re decompiling after update failed") + LOG.info("Stage 7: App Re decompiling after update Success") + + # Final validation in very concrete way. + files_to_be_compared = ["scripts", "blueprint.py"] + for file in files_to_be_compared: + LOG.info( + f"Stage 8: Comparing the content of {file} in folders {folder_original}, {folder_name}" + ) + if file == "scripts": + for script_file in os.listdir(os.path.join(folder_original, file)): + LOG.info( + f"Comparing the content of {script_file} in folders {file}" + ) + new_file = os.path.join(folder_name, file, script_file) + original_file = os.path.join(folder_original, file, script_file) + val = filecmp.cmp(new_file, original_file) + if not val: + err_msg.append( + f"Stage 8: Contents in {original_file} and {new_file} are not identical, please check diff {val}" + ) + continue + + original_file_path = os.path.join(folder_original, file) + new_file_path = os.path.join(folder_name, file) + bp_val = filecmp.cmp(new_file_path, original_file_path) + if not bp_val: + err_msg.append( + f"Stage 8: Contents in blueprint.py is not identical, please check diff in {original_file_path} and {new_file_path}" + ) + + # Second Check: Run Actions which are updated. + if test_file["action_to_run"]: + LOG.info("Stage 9: checking app from app list api") + params = {"filter": "name=={}".format(self.created_app_name)} + res, err = client.application.list(params=params) + if err: + err_msg.append( + "Stage 9: app does not contain in app list, Error: [{}] - {}".format( + err["code"], err["error"] + ) + ) + + response = res.json() + entities = response.get("entities", None) + app = None + if not entities: + pytest.fail( + "Stage 10: No entities found with the given app name {}".format( + self.created_app_name + ) + ) + app = entities[0] + app_uuid = app["metadata"]["uuid"] + res, err = client.application.read(app_uuid) + if err: + pytest.fail("Stage 11: application get call failed {}".format(err)) + + app = res.json() + + # Run Custom action + actions = test_file["action_to_run"] + self.app_helper.execute_actions(actions, app) + + # Delete the app to cover post delete, package uninstall + # let's wait for few seconds before delete + result = runner.invoke(cli, ["delete", "app", self.created_app_name]) + if result.exit_code: + cli_res_dict = {"Output": result.output, "Exception": str(result.exception)} + LOG.debug( + "Cli Response: {}".format( + json.dumps(cli_res_dict, indent=4, separators=(",", ": ")) + ) + ) + LOG.debug( + "Traceback: \n{}".format( + "".join(traceback.format_tb(result.exc_info[2])) + ) + ) + pytest.fail("Stage 13: App delete failed") + LOG.info("Stage 13: App delete Success") + + # poll for app delete action to be happened correctly + LOG.info("Polling for delete operation on app {}".format(self.created_app_name)) + maxWait = 5 * 60 + count = 0 + poll_interval = 10 + while count < maxWait: + res, err = client.application.read(app_uuid) + if err: + pytest.fail(err) + + res = res.json() + state = res["status"]["state"] + if state == APPLICATION.STATES.DELETED: + LOG.info("App {} is deleted".format(self.created_app_name)) + break + else: + LOG.info("Application state: {}".format(state)) + + count += poll_interval + time.sleep(poll_interval) + + # Print all the failures results + if err_msg: + pytest.fail( + "Error details for test {} {}".format( + test_file["DSL_BP_FILEPATH"], "\n".join(err_msg) + ) + ) diff --git a/tests/project/test_project_update_in_pc.py b/tests/project/test_project_update_in_pc.py index 64779284..62a8a0a3 100644 --- a/tests/project/test_project_update_in_pc.py +++ b/tests/project/test_project_update_in_pc.py @@ -2,6 +2,7 @@ from calm.dsl.builtins import Project from calm.dsl.builtins import Provider, Ref, read_local_file +from tests.utils import get_local_az_overlay_details_from_dsl_config DSL_CONFIG = json.loads(read_local_file(".tests/config.json")) ACCOUNTS = DSL_CONFIG["ACCOUNTS"] @@ -29,6 +30,10 @@ USER = DSL_CONFIG["USERS"][0] USER_NAME = USER["NAME"] +VLAN_NETWORK = DSL_CONFIG["AHV"]["NETWORK"]["VLAN1211"] + +NETWORK1, VPC1, CLUSTER1 = get_local_az_overlay_details_from_dsl_config(DSL_CONFIG) + class TestDslProject(Project): """Sample DSL Project""" @@ -38,6 +43,15 @@ class TestDslProject(Project): account=Ref.Account(NTNX_ACCOUNT_NAME), subnets=[Ref.Subnet(name=NTNX_SUBNET, cluster=NTNX_SUBNET_CLUSTER)], ), + Provider.Ntnx( + account=Ref.Account("NTNX_LOCAL_AZ"), + subnets=[ + Ref.Subnet(name=VLAN_NETWORK, cluster=CLUSTER1), + Ref.Subnet(name=NETWORK1, vpc=VPC1), + ], + clusters=[Ref.Cluster(name=CLUSTER1, account_name="NTNX_LOCAL_AZ")], + vpcs=[Ref.Vpc(name=VPC1, account_name="NTNX_LOCAL_AZ")], + ), Provider.Aws(account=Ref.Account(AWS_ACCOUNT_NAME)), Provider.Azure(account=Ref.Account(AZURE_ACCOUNT_NAME)), Provider.Gcp(account=Ref.Account(GCP_ACCOUNT_NAME)), diff --git a/tests/project/test_project_with_overlay_subnets.py b/tests/project/test_project_with_overlay_subnets.py index fb674b38..33bd397b 100644 --- a/tests/project/test_project_with_overlay_subnets.py +++ b/tests/project/test_project_with_overlay_subnets.py @@ -3,36 +3,10 @@ from calm.dsl.builtins import Project from calm.dsl.builtins import Provider, Ref from calm.dsl.builtins.models.utils import read_local_file +from tests.utils import get_local_az_overlay_details_from_dsl_config DSL_CONFIG = json.loads(read_local_file(".tests/config.json")) - -def get_local_az_overlay_details_from_dsl_config(config): - networks = config["ACCOUNTS"]["NUTANIX_PC"] - local_az_account = None - for account in networks: - if account.get("NAME") == "NTNX_LOCAL_AZ": - local_az_account = account - break - overlay_subnets_list = local_az_account.get("OVERLAY_SUBNETS", []) - vlan_subnets_list = local_az_account.get("SUBNETS", []) - - cluster = "" - vpc = "" - overlay_subnet = "" - - for subnet in overlay_subnets_list: - if subnet["NAME"] == "vpc_subnet_1" and subnet["VPC"] == "vpc_name_1": - overlay_subnet = subnet["NAME"] - vpc = subnet["VPC"] - - for subnet in vlan_subnets_list: - if subnet["NAME"] == config["AHV"]["NETWORK"]["VLAN1211"]: - cluster = subnet["CLUSTER"] - break - return overlay_subnet, vpc, cluster - - VLAN_NETWORK = DSL_CONFIG["AHV"]["NETWORK"]["VLAN1211"] NETWORK1, VPC1, CLUSTER1 = get_local_az_overlay_details_from_dsl_config(DSL_CONFIG) @@ -51,3 +25,8 @@ class TestDslWithOverlaySubnetProject(Project): vpcs=[Ref.Vpc(name=VPC1, account_name="NTNX_LOCAL_AZ")], ), ] + quotas = { + "vcpus": 1, + "storage": 2, + "memory": 1, + } diff --git a/tests/project/test_project_with_overlay_subnets_and_env.py b/tests/project/test_project_with_overlay_subnets_and_env.py index 320af578..caad364e 100644 --- a/tests/project/test_project_with_overlay_subnets_and_env.py +++ b/tests/project/test_project_with_overlay_subnets_and_env.py @@ -9,6 +9,7 @@ from calm.dsl.builtins import Substrate, Environment from calm.dsl.builtins import AhvVmDisk, AhvVmNic, AhvVmGC from calm.dsl.builtins import basic_cred, AhvVmResources, AhvVm +from tests.utils import get_local_az_overlay_details_from_dsl_config CENTOS_KEY = read_local_file(".tests/keys/centos") @@ -45,34 +46,6 @@ DSL_CONFIG = json.loads(read_local_file(".tests/config.json")) - -def get_local_az_overlay_details_from_dsl_config(config): - networks = config["ACCOUNTS"]["NUTANIX_PC"] - local_az_account = None - for account in networks: - if account.get("NAME") == NTNX_ACCOUNT_NAME: - local_az_account = account - break - overlay_subnets_list = local_az_account.get("OVERLAY_SUBNETS", []) - vlan_subnets_list = local_az_account.get("SUBNETS", []) - - cluster = "" - vpc = "" - overlay_subnet = "" - - for subnet in overlay_subnets_list: - if subnet["NAME"] == "vpc_subnet_1" and subnet["VPC"] == "vpc_name_1": - overlay_subnet = subnet["NAME"] - vpc = subnet["VPC"] - break - - for subnet in vlan_subnets_list: - if subnet["NAME"] == config["AHV"]["NETWORK"]["VLAN1211"]: - cluster = subnet["CLUSTER"] - break - return overlay_subnet, vpc, cluster - - VLAN_NETWORK = DSL_CONFIG["AHV"]["NETWORK"]["VLAN1211"] NETWORK1, VPC1, CLUSTER1 = get_local_az_overlay_details_from_dsl_config(DSL_CONFIG) diff --git a/tests/testprep.py b/tests/testprep.py index d69be37e..db49c2d9 100644 --- a/tests/testprep.py +++ b/tests/testprep.py @@ -506,6 +506,29 @@ def add_approval_details(config): add_project_details(config, "POLICY_PROJECTS", "test_approval_policy") +def add_provider_constants(config): + provider_config = { + "provider": { + "azure": { + "client_id_a": "VAULT_AZURE_CLIENT_ID", + "client_key": "VAULT_AZURE_CLIENT_KEY", + "subscription_id": "VAULT_AZURE_SUBSCRIPTION_ID", + "tenant_id_a": "VAULT_AZURE_TENANT_ID", + }, + "aws": { + "accessKey": "VAULT_AWS_ACCESS_KEY", + "secretKey": "VAULT_AWS_SECRET_KEY", + }, + "kubernetes": { + "server": "VAULT_K8_SERVER_IP", + "kube_port": "VAULT_K8_SERVER_PORT", + "token_a": "VAULT_K8_SERVER_TOKEN", + }, + } + } + config.update(provider_config) + + config = {} if os.path.exists(dsl_config_file_location): f = open(dsl_config_file_location, "r") @@ -522,6 +545,7 @@ def add_approval_details(config): add_rerun_report_portal(config) add_vpc_endpoints(config) add_approval_details(config) +add_provider_constants(config) f = open(dsl_config_file_location, "w") f.write(json.dumps(config, indent=4)) f.close() diff --git a/tests/unit/jsons/escript_all_tasks.json b/tests/unit/jsons/escript_all_tasks.json new file mode 100644 index 00000000..49045107 --- /dev/null +++ b/tests/unit/jsons/escript_all_tasks.json @@ -0,0 +1,353 @@ +{ + "endpoint_definition_list": [], + "credential_definition_list": [], + "client_attrs": {}, + "runbook": { + "name": "DslAllEscriptTasks_runbook", + "description": "", + "main_task_local_reference": { + "kind": "app_task", + "name": "DslAllEscriptTasks_dag" + }, + "task_definition_list": [ + { + "name": "DslAllEscriptTasks_dag", + "description": "", + "type": "DAG", + "attrs": { + "edges": [ + { + "from_task_reference": { + "kind": "app_task", + "name": "escript_decision" + }, + "to_task_reference": { + "kind": "app_task", + "name": "escript2_decision" + } + }, + { + "from_task_reference": { + "kind": "app_task", + "name": "escript2_decision" + }, + "to_task_reference": { + "kind": "app_task", + "name": "escript3_decision" + } + } + ] + }, + "child_tasks_local_reference_list": [ + { + "kind": "app_task", + "name": "escript_decision" + }, + { + "kind": "app_task", + "name": "escript2_decision" + }, + { + "kind": "app_task", + "name": "escript3_decision" + } + ], + "variable_list": [], + "retries": "", + "timeout_secs": "" + }, + { + "name": "escript_decision_success_meta_task", + "description": "", + "type": "META", + "attrs": {}, + "child_tasks_local_reference_list": [ + { + "kind": "app_task", + "name": "escript_exec" + }, + { + "kind": "app_task", + "name": "escript_setvar" + } + ], + "variable_list": [], + "retries": "", + "timeout_secs": "" + }, + { + "name": "escript_exec", + "description": "", + "type": "EXEC", + "attrs": { + "script_type": "static", + "script": "print(\"just printing...\")\n" + }, + "child_tasks_local_reference_list": [], + "variable_list": [], + "retries": "", + "timeout_secs": "" + }, + { + "name": "escript_setvar", + "description": "", + "type": "SET_VARIABLE", + "attrs": { + "script_type": "static", + "script": "print(\"var1=abc\")\n", + "eval_variables": [ + "var1" + ] + }, + "child_tasks_local_reference_list": [], + "variable_list": [], + "retries": "", + "timeout_secs": "" + }, + { + "name": "escript_decision_failure_meta_task", + "description": "", + "type": "META", + "attrs": {}, + "child_tasks_local_reference_list": [ + { + "kind": "app_task", + "name": "escript_exec_print" + } + ], + "variable_list": [], + "retries": "", + "timeout_secs": "" + }, + { + "name": "escript_exec_print", + "description": "", + "type": "EXEC", + "attrs": { + "script_type": "static", + "script": "print \"Decision else part\"" + }, + "child_tasks_local_reference_list": [], + "variable_list": [], + "retries": "", + "timeout_secs": "" + }, + { + "name": "escript_decision", + "description": "", + "type": "DECISION", + "attrs": { + "script_type": "static", + "script": "exit(0)\n", + "success_child_reference": { + "kind": "app_task", + "name": "escript_decision_success_meta_task" + }, + "failure_child_reference": { + "kind": "app_task", + "name": "escript_decision_failure_meta_task" + } + }, + "child_tasks_local_reference_list": [], + "variable_list": [], + "retries": "", + "timeout_secs": "" + }, + { + "name": "escript2_decision_success_meta_task", + "description": "", + "type": "META", + "attrs": {}, + "child_tasks_local_reference_list": [ + { + "kind": "app_task", + "name": "escript3_exec_print" + } + ], + "variable_list": [], + "retries": "", + "timeout_secs": "" + }, + { + "name": "escript3_exec_print", + "description": "", + "type": "EXEC", + "attrs": { + "script_type": "static", + "script": "print \"Decision if part\"" + }, + "child_tasks_local_reference_list": [], + "variable_list": [], + "retries": "", + "timeout_secs": "" + }, + { + "name": "escript2_decision_failure_meta_task", + "description": "", + "type": "META", + "attrs": {}, + "child_tasks_local_reference_list": [ + { + "kind": "app_task", + "name": "escript2_exec" + }, + { + "kind": "app_task", + "name": "escript2_setvar" + } + ], + "variable_list": [], + "retries": "", + "timeout_secs": "" + }, + { + "name": "escript2_exec", + "description": "", + "type": "EXEC", + "attrs": { + "script_type": "static", + "script": "print(\"just printing...\")\n" + }, + "child_tasks_local_reference_list": [], + "variable_list": [], + "retries": "", + "timeout_secs": "" + }, + { + "name": "escript2_setvar", + "description": "", + "type": "SET_VARIABLE", + "attrs": { + "script_type": "static", + "script": "print(\"var1=abc\")\n", + "eval_variables": [ + "var1" + ] + }, + "child_tasks_local_reference_list": [], + "variable_list": [], + "retries": "", + "timeout_secs": "" + }, + { + "name": "escript2_decision", + "description": "", + "type": "DECISION", + "attrs": { + "script_type": "static", + "script": "exit(1)\n", + "success_child_reference": { + "kind": "app_task", + "name": "escript2_decision_success_meta_task" + }, + "failure_child_reference": { + "kind": "app_task", + "name": "escript2_decision_failure_meta_task" + } + }, + "child_tasks_local_reference_list": [], + "variable_list": [], + "retries": "", + "timeout_secs": "" + }, + { + "name": "escript3_decision_success_meta_task", + "description": "", + "type": "META", + "attrs": {}, + "child_tasks_local_reference_list": [ + { + "kind": "app_task", + "name": "escript3_exec" + }, + { + "kind": "app_task", + "name": "escript3_setvar" + } + ], + "variable_list": [], + "retries": "", + "timeout_secs": "" + }, + { + "name": "escript3_exec", + "description": "", + "type": "EXEC", + "attrs": { + "script_type": "static_py3", + "script": "print(\"just printing...\")\n" + }, + "child_tasks_local_reference_list": [], + "variable_list": [], + "retries": "", + "timeout_secs": "" + }, + { + "name": "escript3_setvar", + "description": "", + "type": "SET_VARIABLE", + "attrs": { + "script_type": "static_py3", + "script": "print(\"var1=abc\")\n", + "eval_variables": [ + "var1" + ] + }, + "child_tasks_local_reference_list": [], + "variable_list": [], + "retries": "", + "timeout_secs": "" + }, + { + "name": "escript3_decision_failure_meta_task", + "description": "", + "type": "META", + "attrs": {}, + "child_tasks_local_reference_list": [ + { + "kind": "app_task", + "name": "escript3_exec_print" + } + ], + "variable_list": [], + "retries": "", + "timeout_secs": "" + }, + { + "name": "escript3_exec_print", + "description": "", + "type": "EXEC", + "attrs": { + "script_type": "static_py3", + "script": "print(\"Decision else part\")" + }, + "child_tasks_local_reference_list": [], + "variable_list": [], + "retries": "", + "timeout_secs": "" + }, + { + "name": "escript3_decision", + "description": "", + "type": "DECISION", + "attrs": { + "script_type": "static_py3", + "script": "exit(0)\n", + "success_child_reference": { + "kind": "app_task", + "name": "escript3_decision_success_meta_task" + }, + "failure_child_reference": { + "kind": "app_task", + "name": "escript3_decision_failure_meta_task" + } + }, + "child_tasks_local_reference_list": [], + "variable_list": [], + "retries": "", + "timeout_secs": "" + } + ], + "variable_list": [] + } +} diff --git a/tests/unit/scripts/escript_decision_false.py b/tests/unit/scripts/escript_decision_false.py new file mode 100644 index 00000000..63a3a698 --- /dev/null +++ b/tests/unit/scripts/escript_decision_false.py @@ -0,0 +1 @@ +exit(1) diff --git a/tests/unit/scripts/escript_decision_true.py b/tests/unit/scripts/escript_decision_true.py new file mode 100644 index 00000000..ba71a0cc --- /dev/null +++ b/tests/unit/scripts/escript_decision_true.py @@ -0,0 +1 @@ +exit(0) diff --git a/tests/unit/scripts/escript_exec.py b/tests/unit/scripts/escript_exec.py new file mode 100644 index 00000000..20591c16 --- /dev/null +++ b/tests/unit/scripts/escript_exec.py @@ -0,0 +1 @@ +print("just printing...") diff --git a/tests/unit/scripts/escript_setvariable.py b/tests/unit/scripts/escript_setvariable.py new file mode 100644 index 00000000..0a0f9dd9 --- /dev/null +++ b/tests/unit/scripts/escript_setvariable.py @@ -0,0 +1 @@ +print("var1=abc") diff --git a/tests/unit/test_escript_all_tasks.py b/tests/unit/test_escript_all_tasks.py new file mode 100644 index 00000000..b6967b79 --- /dev/null +++ b/tests/unit/test_escript_all_tasks.py @@ -0,0 +1,89 @@ +import uuid +import os +import pytest + +from calm.dsl.runbooks import * +from calm.dsl.runbooks import ( + RunbookTask as CalmTask, +) +from calm.dsl.builtins import CalmTask as CalmVarTask, Metadata + + +@runbook +def DslAllEscriptTasks(endpoints=[], default=False): + "Runbook example with All Escript Type Tasks" + + with CalmTask.Decision.escript( + name="escript_decision", + filename=os.path.join("scripts", "escript_decision_true.py"), + ) as d: + if d.ok: + CalmTask.Exec.escript( + name="escript_exec", filename=os.path.join("scripts", "escript_exec.py") + ) + CalmTask.SetVariable.escript( + name="escript_setvar", + filename=os.path.join("scripts", "escript_setvariable.py"), + variables=["var1"], + ) + else: + CalmTask.Exec.escript( + name="escript_exec_print", script='''print "Decision else part"''' + ) + + with CalmTask.Decision.escript.py2( + name="escript2_decision", + filename=os.path.join("scripts", "escript_decision_false.py"), + ) as d: + if d.ok: + CalmTask.Exec.escript.py2( + name="escript3_exec_print", script='''print "Decision if part"''' + ) + else: + CalmTask.Exec.escript.py2( + name="escript2_exec", + filename=os.path.join("scripts", "escript_exec.py"), + ) + CalmTask.SetVariable.escript.py2( + name="escript2_setvar", + filename=os.path.join("scripts", "escript_setvariable.py"), + variables=["var1"], + ) + + with CalmTask.Decision.escript.py3( + name="escript3_decision", + filename=os.path.join("scripts", "escript_decision_true.py"), + ) as d: + if d.ok: + CalmTask.Exec.escript.py3( + name="escript3_exec", + filename=os.path.join("scripts", "escript_exec.py"), + ) + CalmTask.SetVariable.escript.py3( + name="escript3_setvar", + filename=os.path.join("scripts", "escript_setvariable.py"), + variables=["var1"], + ) + else: + CalmTask.Exec.escript.py3( + name="escript3_exec_print", script="""print("Decision else part")""" + ) + + +def _test_compare_compile_result(Runbook, json_file): + """compares the runbook compilation and known output""" + + print("JSON compilation test for {}".format(Runbook.action_name)) + dir_path = os.path.dirname(os.path.realpath(__file__)) + file_path = os.path.join(dir_path, json_file) + + generated_json = runbook_json(Runbook) + known_json = open(file_path).read() + assert generated_json == known_json + print("JSON compilation successful for {}".format(Runbook.action_name)) + + +@pytest.mark.runbook +@pytest.mark.escript +def test_all_escript_type_tasks(): + _test_compare_compile_result(DslAllEscriptTasks, "./jsons/escript_all_tasks.json") diff --git a/tests/utils.py b/tests/utils.py index be830440..8c21390e 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -7,10 +7,11 @@ from click.testing import CliRunner from calm.dsl.cli import main as cli -from calm.dsl.cli.constants import APPLICATION, ERGON_TASK +from calm.dsl.cli.constants import APPLICATION, ERGON_TASK, RUNLOG from calm.dsl.log import get_logging_handle from calm.dsl.api import get_client_handle_obj from calm.dsl.api.connection import REQUEST +from calm.dsl.cli.main import get_api_client VPC_TUNNEL_NAME = "vpc_name_1" @@ -101,6 +102,104 @@ def get_substrates_platform_data( return None + def execute_actions(self, actions, app): + "This routine execute actions" + client = get_api_client() + app_uuid = app["metadata"]["uuid"] + app_spec = app["spec"] + LOG.info( + "Action Run Stage: Performing actions on application {}".format(app_uuid) + ) + for action_name in actions: + calm_action_name = "action_" + action_name.lower() + LOG.info( + "Action Run Stage. Running action {} on application {}".format( + action_name, app_uuid + ) + ) + action = next( + action + for action in app_spec["resources"]["action_list"] + if action["name"] == calm_action_name or action["name"] == action_name + ) + if not action: + pytest.fail( + "Action Run Stage: No action found matching name {}".format( + action_name + ) + ) + + action_id = action["uuid"] + + app.pop("status", None) + app["spec"] = { + "args": [], + "target_kind": "Application", + "target_uuid": app_uuid, + } + res, err = client.application.run_action(app_uuid, action_id, app) + if err: + pytest.fail( + "Action Run Stage: running action failed [{}] - {}".format( + err["code"], err["error"] + ) + ) + + response = res.json() + runlog_uuid = response["status"]["runlog_uuid"] + LOG.info(f"Runlog uuid of custom action triggered {runlog_uuid}") + + url = client.application.ITEM.format(app_uuid) + "/app_runlogs/list" + payload = {"filter": "root_reference=={}".format(runlog_uuid)} + + maxWait = 5 * 60 + count = 0 + poll_interval = 10 + while count < maxWait: + # call status api + res, err = client.application.poll_action_run(url, payload) + if err: + raise Exception("[{}] - {}".format(err["code"], err["error"])) + response = res.json() + entities = response["entities"] + wait_over = False + if len(entities): + sorted_entities = sorted( + entities, key=lambda x: int(x["metadata"]["creation_time"]) + ) + for runlog in sorted_entities: + state = runlog["status"]["state"] + if state in RUNLOG.FAILURE_STATES: + pytest.fail( + "Action Run Stage: action {} failed".format(action_name) + ) + break + elif state not in RUNLOG.TERMINAL_STATES: + LOG.info( + "Action Run Stage: Action {} is in process".format( + action_name + ) + ) + break + else: + wait_over = True + + if wait_over: + LOG.info( + "Action Run Stage: Action {} completed".format(action_name) + ) + break + + count += poll_interval + time.sleep(poll_interval) + + if count >= maxWait: + pytest.fail( + "Action Run Stage: action {} is not completed in 5 minutes".format( + action_name + ) + ) + class Task: def poll_task_to_state( @@ -253,3 +352,106 @@ def get_approval_project(config): raise exception("No Approval Policy Project Found") project_name = config.get("POLICY_PROJECTS", {}).get("PROJECT1", {}).get("NAME", "") return project_name + + +def poll_runlog_status( + client, runlog_uuid, expected_states, poll_interval=10, maxWait=300 +): + """ + This routine polls for 5mins till the runlog gets into the expected state + Args: + client (obj): client object + runlog_uuid (str): runlog id + expected_states (list): list of expected states + Returns: + (str, list): returns final state of the runlog and reasons list + """ + count = 0 + while count < maxWait: + res, err = client.runbook.poll_action_run(runlog_uuid) + if err: + pytest.fail("[{}] - {}".format(err["code"], err["error"])) + response = res.json() + LOG.debug(response) + state = response["status"]["state"] + reasons = response["status"]["reason_list"] + if state in expected_states: + break + count += poll_interval + time.sleep(poll_interval) + + return state, reasons or [] + + +def poll_runlog_status_policy( + client, expected_states, url, payload, poll_interval=10, maxWait=300 +): + """ + This routine polls policy for 5mins till the runlog gets into the expected state + Args: + client (obj): client object + expected_states (list): list of expected states + url (str): url to poll + payload (dict): payload used for polling + Returns: + (str, list): returns final state of the runlog and reasons list + """ + count = 0 + while count < maxWait: + res, err = client.application.poll_action_run(url, payload) + if err: + pytest.fail("[{}] - {}".format(err["code"], err["error"])) + response = res.json() + entity = response.get("entities") + LOG.info(json.dumps(response)) + if entity: + state = entity[0]["status"]["state"] + reasons = entity[0]["status"]["reason_list"] + if state in expected_states: + break + count += poll_interval + time.sleep(poll_interval) + + return state, reasons or [] + + +def get_escript_language_from_version(script_version="static"): + """Gets escript language for dsl based on escript_version + Args: + script_version(str): Escript version/type: static or static_Py3 + Returns: + script_language(str): Escript DSL specific language: + python2- '', '.py2'; + python3- '.py3'; + """ + if script_version == "static_py3": + script_language = ".py3" + else: + script_language = "" # we can use .py2 as well for static versions + return script_language + + +def get_local_az_overlay_details_from_dsl_config(config): + networks = config["ACCOUNTS"]["NUTANIX_PC"] + local_az_account = None + for account in networks: + if account.get("NAME") == "NTNX_LOCAL_AZ": + local_az_account = account + break + overlay_subnets_list = local_az_account.get("OVERLAY_SUBNETS", []) + vlan_subnets_list = local_az_account.get("SUBNETS", []) + + cluster = "" + vpc = "" + overlay_subnet = "" + + for subnet in overlay_subnets_list: + if subnet["NAME"] == "vpc_subnet_1" and subnet["VPC"] == "vpc_name_1": + overlay_subnet = subnet["NAME"] + vpc = subnet["VPC"] + + for subnet in vlan_subnets_list: + if subnet["NAME"] == config["AHV"]["NETWORK"]["VLAN1211"]: + cluster = subnet["CLUSTER"] + break + return overlay_subnet, vpc, cluster