From 0e2bc496443de83b6995062d6fcefb09a3b6cf30 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Thu, 11 Apr 2024 16:27:49 +0300 Subject: [PATCH 01/24] Fix in literal identity check which happened to work. Signed-off-by: Nashwan Azhari --- coriolis/conductor/rpc/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coriolis/conductor/rpc/server.py b/coriolis/conductor/rpc/server.py index ecdb6d64..b6d45fd8 100644 --- a/coriolis/conductor/rpc/server.py +++ b/coriolis/conductor/rpc/server.py @@ -395,7 +395,7 @@ def get_endpoint(self, ctxt, endpoint_id): def delete_endpoint(self, ctxt, endpoint_id): q_replicas_count = db_api.get_endpoint_replicas_count( ctxt, endpoint_id) - if q_replicas_count is not 0: + if q_replicas_count != 0: raise exception.NotAuthorized("%s replicas would be orphaned!" % q_replicas_count) db_api.delete_endpoint(ctxt, endpoint_id) From 6edecbb6ccf615f2785363354beabf2824f2197d Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Tue, 9 Apr 2024 14:42:14 +0300 Subject: [PATCH 02/24] Add 'scenario' field in DB models layer. Adds the 'scenario' field to the `Replica` DB model and afferent DB API functions for creating/filtering Replicas based on it. The `scenario` can be one of: * `replica` (the auto-default for all newly-created replicas) * `live_migration` (special case for replicas-as-migrations) Signed-off-by: Nashwan Azhari --- coriolis/constants.py | 3 ++ coriolis/db/api.py | 32 ++++++++++++++++--- .../019_add_replica_scenario_field.py | 18 +++++++++++ coriolis/db/sqlalchemy/models.py | 11 ++++++- 4 files changed, 59 insertions(+), 5 deletions(-) create mode 100644 coriolis/db/sqlalchemy/migrate_repo/versions/019_add_replica_scenario_field.py diff --git a/coriolis/constants.py b/coriolis/constants.py index 8d1ec2b5..0e4c35b2 100644 --- a/coriolis/constants.py +++ b/coriolis/constants.py @@ -3,6 +3,9 @@ DEFAULT_CORIOLIS_REGION_NAME = "Default Region" +REPLICA_SCENARIO_REPLICA = "replica" +REPLICA_SCENARIO_LIVE_MIGRATION = "live_migration" + EXECUTION_STATUS_UNEXECUTED = "UNEXECUTED" EXECUTION_STATUS_RUNNING = "RUNNING" EXECUTION_STATUS_COMPLETED = "COMPLETED" diff --git a/coriolis/db/api.py b/coriolis/db/api.py index 76c6d72e..92234ac5 100644 --- a/coriolis/db/api.py +++ b/coriolis/db/api.py @@ -15,6 +15,7 @@ from sqlalchemy.sql import null from coriolis.db.sqlalchemy import models +from coriolis import constants from coriolis import exception from coriolis import utils @@ -424,6 +425,7 @@ def _get_replica_with_tasks_executions_options(q): @enginefacade.reader def get_replicas(context, + replica_scenario=None, include_tasks_executions=False, include_task_info=False, to_dict=False): @@ -433,6 +435,8 @@ def get_replicas(context, if include_task_info: q = q.options(orm.undefer('info')) q = q.filter() + if replica_scenario: + q.filter(models.Replica.scenario == replica_scenario) if is_user_context(context): q = q.filter( models.Replica.project_id == context.project_id) @@ -447,11 +451,17 @@ def get_replicas(context, @enginefacade.reader -def get_replica(context, replica_id, include_task_info=False, to_dict=False): +def get_replica(context, replica_id, + replica_scenario=None, + include_task_info=False, + to_dict=False): q = _soft_delete_aware_query(context, models.Replica) q = _get_replica_with_tasks_executions_options(q) if include_task_info: q = q.options(orm.undefer('info')) + if replica_scenario: + q = q.filter( + models.Replica.scenario == replica_scenario) if is_user_context(context): q = q.filter( models.Replica.project_id == context.project_id) @@ -465,12 +475,20 @@ def get_replica(context, replica_id, include_task_info=False, to_dict=False): @enginefacade.reader -def get_endpoint_replicas_count(context, endpoint_id): +def get_endpoint_replicas_count( + context, endpoint_id, replica_scenario=None): + + scenario_filter_kwargs = {} + if replica_scenario: + scenario_filter_kwargs = {"scenario": replica_scenario} + origin_args = {'origin_endpoint_id': endpoint_id} + origin_args.update(scenario_filter_kwargs) q_origin_count = _soft_delete_aware_query( context, models.Replica).filter_by(**origin_args).count() destination_args = {'destination_endpoint_id': endpoint_id} + destination_args.update(scenario_filter_kwargs) q_destination_count = _soft_delete_aware_query( context, models.Replica).filter_by(**destination_args).count() @@ -516,8 +534,11 @@ def get_replica_migrations(context, replica_id): @enginefacade.reader -def get_migrations(context, include_tasks=False, - include_task_info=False, to_dict=False): +def get_migrations(context, + include_tasks=False, + include_task_info=False, + to_dict=False, + replica_migrations_only=False): q = _soft_delete_aware_query(context, models.Migration) if include_tasks: q = _get_migration_task_query_options(q) @@ -526,6 +547,9 @@ def get_migrations(context, include_tasks=False, if include_task_info: q = q.options(orm.undefer('info')) + if replica_migrations_only: + q.filter(models.Migration.replica_id != None) + args = {} if is_user_context(context): args["project_id"] = context.project_id diff --git a/coriolis/db/sqlalchemy/migrate_repo/versions/019_add_replica_scenario_field.py b/coriolis/db/sqlalchemy/migrate_repo/versions/019_add_replica_scenario_field.py new file mode 100644 index 00000000..e49d361c --- /dev/null +++ b/coriolis/db/sqlalchemy/migrate_repo/versions/019_add_replica_scenario_field.py @@ -0,0 +1,18 @@ +# Copyright 2024 Cloudbase Solutions Srl +# All Rights Reserved. + +import sqlalchemy + + +def upgrade(migrate_engine): + meta = sqlalchemy.MetaData() + meta.bind = migrate_engine + + replica = sqlalchemy.Table( + 'replica', meta, autoload=True) + + replica_scenario = sqlalchemy.Column( + "scenario", sqlalchemy.String(255), nullable=False, + default="replica") + + replica.create_column(replica_scenario) diff --git a/coriolis/db/sqlalchemy/models.py b/coriolis/db/sqlalchemy/models.py index f326837e..beac9c3b 100644 --- a/coriolis/db/sqlalchemy/models.py +++ b/coriolis/db/sqlalchemy/models.py @@ -330,6 +330,9 @@ class Replica(BaseTransferAction): sqlalchemy.String(36), sqlalchemy.ForeignKey( 'base_transfer_action.base_id'), primary_key=True) + scenario = sqlalchemy.Column( + sqlalchemy.String(255), + default=constants.REPLICA_SCENARIO_REPLICA) __mapper_args__ = { 'polymorphic_identity': 'replica', @@ -339,7 +342,9 @@ def to_dict(self, include_task_info=True, include_executions=True): base = super(Replica, self).to_dict( include_task_info=include_task_info, include_executions=include_executions) - base.update({"id": self.id}) + base.update({ + "id": self.id, + "scenario": self.scenario}) return base @@ -368,9 +373,13 @@ def to_dict(self, include_task_info=True, include_tasks=True): base = super(Migration, self).to_dict( include_task_info=include_task_info, include_executions=include_tasks) + replica_scenario_type = None + if self.replica: + replica_scenario_type = self.replica.scenario base.update({ "id": self.id, "replica_id": self.replica_id, + "replica_scenario_type": replica_scenario_type, "shutdown_instances": self.shutdown_instances, "replication_count": self.replication_count, }) From edb6770f6bcd7cb789f69340bd172a6db98c79e2 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Tue, 9 Apr 2024 17:51:16 +0300 Subject: [PATCH 03/24] Update conductor layer for Live Migrations and Deployments. Update the conductor layer for separate handling of Live Migrations and deployments. Signed-off-by: Nashwan Azhari --- coriolis/conductor/rpc/client.py | 26 ++++++++++++- coriolis/conductor/rpc/server.py | 66 ++++++++++++++++++++++++++++++-- coriolis/constants.py | 2 + 3 files changed, 88 insertions(+), 6 deletions(-) diff --git a/coriolis/conductor/rpc/client.py b/coriolis/conductor/rpc/client.py index 3bfd216e..45b5f00d 100644 --- a/coriolis/conductor/rpc/client.py +++ b/coriolis/conductor/rpc/client.py @@ -159,7 +159,9 @@ def cancel_replica_tasks_execution(self, ctxt, replica_id, execution_id, ctxt, 'cancel_replica_tasks_execution', replica_id=replica_id, execution_id=execution_id, force=force) - def create_instances_replica(self, ctxt, origin_endpoint_id, + def create_instances_replica(self, ctxt, + replica_scenario, + origin_endpoint_id, destination_endpoint_id, origin_minion_pool_id, destination_minion_pool_id, @@ -169,6 +171,7 @@ def create_instances_replica(self, ctxt, origin_endpoint_id, notes=None, user_scripts=None): return self._call( ctxt, 'create_instances_replica', + replica_scenario=replica_scenario, origin_endpoint_id=origin_endpoint_id, destination_endpoint_id=destination_endpoint_id, origin_minion_pool_id=origin_minion_pool_id, @@ -214,6 +217,17 @@ def get_migration(self, ctxt, migration_id, include_task_info=False): ctxt, 'get_migration', migration_id=migration_id, include_task_info=include_task_info) + def get_deployments(self, ctxt, include_tasks=False, + include_task_info=False): + return self._call( + ctxt, 'get_deployments', include_tasks=include_tasks, + include_task_info=include_task_info) + + def get_deployment(self, ctxt, deployment_id, include_task_info=False): + return self._call( + ctxt, 'get_deployment', deployment_id=deployment_id, + include_task_info=include_task_info) + def migrate_instances(self, ctxt, origin_endpoint_id, destination_endpoint_id, origin_minion_pool_id, destination_minion_pool_id, @@ -262,6 +276,14 @@ def cancel_migration(self, ctxt, migration_id, force): self._call( ctxt, 'cancel_migration', migration_id=migration_id, force=force) + def delete_deployment(self, ctxt, deployment_id): + self._call( + ctxt, 'delete_deployment', deployment_id=deployment_id) + + def cancel_deployment(self, ctxt, deployment_id, force): + self._call( + ctxt, 'cancel_deployment', deployment_id=deployment_id, force=force) + def set_task_host(self, ctxt, task_id, host): self._call( ctxt, 'set_task_host', task_id=task_id, host=host) @@ -460,7 +482,7 @@ def _rpc_conductor_client(self): return self._rpc_conductor_client_instance @classmethod - def get_progress_update_identifier(self, progress_update): + def get_progress_update_identifier(cls, progress_update): return progress_update['index'] def add_progress_update( diff --git a/coriolis/conductor/rpc/server.py b/coriolis/conductor/rpc/server.py index b6d45fd8..42aa816d 100644 --- a/coriolis/conductor/rpc/server.py +++ b/coriolis/conductor/rpc/server.py @@ -123,6 +123,18 @@ def inner(): return wrapper +def deployment_synchronized(func): + @functools.wraps(func) + def wrapper(self, ctxt, deployment_id, *args, **kwargs): + @lockutils.synchronized( + constants.DEPLOYMENT_LOCK_NAME_FORMAT % deployment_id, + external=True) + def inner(): + return func(self, ctxt, deployment_id, *args, **kwargs) + return inner() + return wrapper + + def tasks_execution_synchronized(func): @functools.wraps(func) def wrapper(self, ctxt, replica_id, execution_id, *args, **kwargs): @@ -1097,6 +1109,12 @@ def get_replica(self, ctxt, replica_id, include_task_info=False): def delete_replica(self, ctxt, replica_id): replica = self._get_replica(ctxt, replica_id) self._check_replica_running_executions(ctxt, replica) + # TODO(aznashwan): update reservation deletion logic if + # the Replica was never successfully deployed and its + # disks were deleted. + # This might not be possible if its executions were deleted, + # but might be possible to set the new 'fulfilled' field within + # the reservation on the licensing server after a successful execution. self._check_delete_reservation_for_transfer(replica) db_api.delete_replica(ctxt, replica_id) @@ -1165,7 +1183,8 @@ def _check_endpoints(ctxt, origin_endpoint, destination_endpoint): destination_endpoint.connection_info)): raise exception.SameDestination() - def create_instances_replica(self, ctxt, origin_endpoint_id, + def create_instances_replica(self, ctxt, replica_scenario, + origin_endpoint_id, destination_endpoint_id, origin_minion_pool_id, destination_minion_pool_id, @@ -1174,6 +1193,14 @@ def create_instances_replica(self, ctxt, origin_endpoint_id, destination_environment, instances, network_map, storage_mappings, notes=None, user_scripts=None): + supported_scenarios = [ + constants.REPLICA_SCENARIO_REPLICA, + constants.REPLICA_SCENARIO_LIVE_MIGRATION] + if replica_scenario not in supported_scenarios: + raise exception.InvalidInput( + message=f"Unsupported Replica scenario '{replica_scenario}'. " + f"Must be one of: {supported_scenarios}") + origin_endpoint = self.get_endpoint(ctxt, origin_endpoint_id) destination_endpoint = self.get_endpoint( ctxt, destination_endpoint_id) @@ -1182,6 +1209,7 @@ def create_instances_replica(self, ctxt, origin_endpoint_id, replica = models.Replica() replica.id = str(uuid.uuid4()) replica.base_id = replica.id + replica.scenario = replica_scenario replica.origin_endpoint_id = origin_endpoint_id replica.origin_minion_pool_id = origin_minion_pool_id replica.destination_endpoint_id = destination_endpoint_id @@ -1202,6 +1230,8 @@ def create_instances_replica(self, ctxt, origin_endpoint_id, self._check_minion_pools_for_action(ctxt, replica) + # TODO(aznashwan): add scenario-appropriate steps for + # defining the Replica reservation: self._check_create_reservation_for_transfer( replica, licensing_client.RESERVATION_TYPE_REPLICA) @@ -1232,6 +1262,20 @@ def get_migration(self, ctxt, migration_id, include_task_info=False): ctxt, migration_id, include_task_info=include_task_info, to_dict=True) + def get_deployments(self, ctxt, include_tasks, + include_task_info=False): + return db_api.get_migrations( + ctxt, include_tasks, + include_task_info=include_task_info, + replica_migrations_only=True, + to_dict=True) + + @deployment_synchronized + def get_deployment(self, ctxt, deployment_id, include_task_info=False): + return self._get_migration( + ctxt, deployment_id, include_task_info=include_task_info, + to_dict=True) + @staticmethod def _check_running_replica_migrations(ctxt, replica_id): migrations = db_api.get_replica_migrations(ctxt, replica_id) @@ -2125,8 +2169,7 @@ def _get_migration(self, ctxt, migration_id, include_task_info=False, "Migration with ID '%s' not found." % migration_id) return migration - @migration_synchronized - def delete_migration(self, ctxt, migration_id): + def _delete_migration(self, ctxt, migration_id): migration = self._get_migration(ctxt, migration_id) execution = migration.executions[0] if execution.status in constants.ACTIVE_EXECUTION_STATUSES: @@ -2136,7 +2179,14 @@ def delete_migration(self, ctxt, migration_id): db_api.delete_migration(ctxt, migration_id) @migration_synchronized - def cancel_migration(self, ctxt, migration_id, force): + def delete_migration(self, ctxt, migration_id): + self._delete_migration(ctxt, migration_id) + + @deployment_synchronized + def delete_deployment(self, ctxt, deployment_id): + self._delete_migration(ctxt, deployment_id) + + def _cancel_migration(self, ctxt, migration_id, force): migration = self._get_migration(ctxt, migration_id) if len(migration.executions) != 1: raise exception.InvalidMigrationState( @@ -2157,6 +2207,14 @@ def cancel_migration(self, ctxt, migration_id, force): external=True): self._cancel_tasks_execution(ctxt, execution, force=force) + @migration_synchronized + def cancel_migration(self, ctxt, migration_id, force): + self._cancel_migration(ctxt, migration_id, force) + + @deployment_synchronized + def cancel_deployment(self, ctxt, deployment_id, force): + self._cancel_migration(ctxt, deployment_id, force) + def _cancel_tasks_execution( self, ctxt, execution, requery=True, force=False): """ Cancels a running Execution by: diff --git a/coriolis/constants.py b/coriolis/constants.py index 0e4c35b2..0d8068b9 100644 --- a/coriolis/constants.py +++ b/coriolis/constants.py @@ -307,6 +307,8 @@ EXECUTION_LOCK_NAME_FORMAT = "execution-%s" ENDPOINT_LOCK_NAME_FORMAT = "endpoint-%s" MIGRATION_LOCK_NAME_FORMAT = "migration-%s" +# NOTE(aznashwan): intentionately left identical to Migration locks. +DEPLOYMENT_LOCK_NAME_FORMAT = "migration-%s" REPLICA_LOCK_NAME_FORMAT = "replica-%s" SCHEDULE_LOCK_NAME_FORMAT = "schedule-%s" REGION_LOCK_NAME_FORMAT = "region-%s" From af04469fd96b2cb94dd4ab6f6b5ce830a6ff11f0 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Wed, 10 Apr 2024 15:38:51 +0300 Subject: [PATCH 04/24] Add 'deployments' and update 'replicas' middle APIs. Signed-off-by: Nashwan Azhari --- coriolis/deployments/__init__.py | 0 coriolis/deployments/api.py | 35 +++++++++++ coriolis/deployments/manager.py | 101 +++++++++++++++++++++++++++++++ coriolis/replicas/api.py | 6 +- 4 files changed, 140 insertions(+), 2 deletions(-) create mode 100644 coriolis/deployments/__init__.py create mode 100644 coriolis/deployments/api.py create mode 100644 coriolis/deployments/manager.py diff --git a/coriolis/deployments/__init__.py b/coriolis/deployments/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/coriolis/deployments/api.py b/coriolis/deployments/api.py new file mode 100644 index 00000000..fa25b6e2 --- /dev/null +++ b/coriolis/deployments/api.py @@ -0,0 +1,35 @@ +# Copyright 2024 Cloudbase Solutions Srl +# All Rights Reserved. + +from coriolis.conductor.rpc import client as rpc_client + + +class API(object): + def __init__(self): + self._rpc_client = rpc_client.ConductorClient() + + def deploy_replica_instances(self, ctxt, replica_id, + instance_osmorphing_minion_pool_mappings, + clone_disks=False, force=False, + skip_os_morphing=False, user_scripts=None): + return self._rpc_client.deploy_replica_instances( + ctxt, replica_id, instance_osmorphing_minion_pool_mappings=( + instance_osmorphing_minion_pool_mappings), + clone_disks=clone_disks, force=force, + skip_os_morphing=skip_os_morphing, + user_scripts=user_scripts) + + def delete(self, ctxt, deployment_id): + self._rpc_client.delete_deployment(ctxt, deployment_id) + + def cancel(self, ctxt, deployment_id, force): + self._rpc_client.cancel_deployment(ctxt, deployment_id, force) + + def get_deployments(self, ctxt, include_tasks=False, + include_task_info=False): + return self._rpc_client.get_deployments( + ctxt, include_tasks, include_task_info=include_task_info) + + def get_deployment(self, ctxt, deployment_id, include_task_info=False): + return self._rpc_client.get_deployment( + ctxt, deployment_id, include_task_info=include_task_info) diff --git a/coriolis/deployments/manager.py b/coriolis/deployments/manager.py new file mode 100644 index 00000000..98943e07 --- /dev/null +++ b/coriolis/deployments/manager.py @@ -0,0 +1,101 @@ +# Copyright 2017 Cloudbase Solutions Srl +# All Rights Reserved. + +import gc +import sys + +import eventlet +from oslo_log import log as logging +from oslo_utils import units + +from coriolis import events +from coriolis.providers import backup_writers +from coriolis import qemu_reader +from coriolis import utils + +LOG = logging.getLogger(__name__) + + +def _copy_volume(volume, disk_image_reader, backup_writer, event_manager): + disk_id = volume["disk_id"] + # for now we assume it is a local file + path = volume["disk_image_uri"] + skip_zeroes = volume.get("zeroed", False) + + with backup_writer.open("", disk_id) as writer: + with disk_image_reader.open(path) as reader: + disk_size = reader.disk_size + + perc_step = event_manager.add_percentage_step( + "Copying data of disk %s" % disk_id, disk_size) + + offset = 0 + max_block_size = 10 * units.Mi # 10 MB + + while offset < disk_size: + allocated, zero_block, block_size = reader.get_block_status( + offset, max_block_size) + if not allocated or zero_block and skip_zeroes: + if not allocated: + LOG.debug( + "Unallocated block detected: %s", block_size) + else: + LOG.debug("Skipping zero block: %s", block_size) + offset += block_size + writer.seek(offset) + else: + buf = reader.read(offset, block_size) + writer.write(buf) + offset += len(buf) + buf = None + gc.collect() + + event_manager.set_percentage_step( + perc_step, offset) + + +def _copy_wrapper(job_args): + disk_id = job_args[0].get("disk_id") + try: + return _copy_volume(*job_args), disk_id, False + except BaseException: + return sys.exc_info(), disk_id, True + + +def copy_disk_data(target_conn_info, volumes_info, event_handler): + # TODO(gsamfira): the disk image should be an URI that can either be local + # (file://) or remote (https://, ftp://, smb://, nfs:// etc). + # This must happen if we are to implement multi-worker scenarios. + # In such cases, it is not guaranteed that the disk sync task + # will be started on the same node onto which the import + # happened. It may also be conceivable, that wherever the disk + # image ends up, we might be able to directly expose it using + # NFS, iSCSI or any other network protocol. In which case, + # we can skip downloading it locally just to sync it. + + event_manager = events.EventManager(event_handler) + + ip = target_conn_info["ip"] + port = target_conn_info.get("port", 22) + username = target_conn_info["username"] + pkey = target_conn_info.get("pkey") + password = target_conn_info.get("password") + event_manager.progress_update("Waiting for connectivity on %s:%s" % ( + ip, port)) + utils.wait_for_port_connectivity(ip, port) + backup_writer = backup_writers.SSHBackupWriter( + ip, port, username, pkey, password, volumes_info) + disk_image_reader = qemu_reader.QEMUDiskImageReader() + + pool = eventlet.greenpool.GreenPool() + job_data = [(vol, disk_image_reader, backup_writer, event_manager) + for vol in volumes_info] + for result, disk_id, error in pool.imap(_copy_wrapper, job_data): + # TODO(gsamfira): There is no use in letting the other disks finish + # sync-ing as we don't save the state of the disk sync anywhere (yet). + # When/If we ever do add this info to the database, keep track of + # failures, and allow any other paralel sync to finish + if error: + event_manager.progress_update( + "Volume \"%s\" failed to sync" % disk_id) + raise result[0](result[1]).with_traceback(result[2]) diff --git a/coriolis/replicas/api.py b/coriolis/replicas/api.py index 66890463..73353ea0 100644 --- a/coriolis/replicas/api.py +++ b/coriolis/replicas/api.py @@ -8,13 +8,15 @@ class API(object): def __init__(self): self._rpc_client = rpc_client.ConductorClient() - def create(self, ctxt, origin_endpoint_id, destination_endpoint_id, + def create(self, ctxt, replica_scenario, + origin_endpoint_id, destination_endpoint_id, origin_minion_pool_id, destination_minion_pool_id, instance_osmorphing_minion_pool_mappings, source_environment, destination_environment, instances, network_map, storage_mappings, notes=None, user_scripts=None): return self._rpc_client.create_instances_replica( - ctxt, origin_endpoint_id, destination_endpoint_id, + ctxt, replica_scenario, + origin_endpoint_id, destination_endpoint_id, origin_minion_pool_id, destination_minion_pool_id, instance_osmorphing_minion_pool_mappings, source_environment, destination_environment, instances, From fcc84c0f2051560f8d489f481c656c644b12e800 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Wed, 10 Apr 2024 17:18:46 +0300 Subject: [PATCH 05/24] Add 'scenario' fields to /replicas API paths. Signed-off-by: Nashwan Azhari --- coriolis/api/v1/replicas.py | 35 ++++++++++++++++++++++++++++++----- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/coriolis/api/v1/replicas.py b/coriolis/api/v1/replicas.py index bf5c6570..6e55b2ef 100644 --- a/coriolis/api/v1/replicas.py +++ b/coriolis/api/v1/replicas.py @@ -5,8 +5,9 @@ from coriolis.api.v1.views import replica_tasks_execution_view from coriolis.api.v1.views import replica_view from coriolis.api import wsgi as api_wsgi -from coriolis.endpoints import api as endpoints_api +from coriolis import constants from coriolis import exception +from coriolis.endpoints import api as endpoints_api from coriolis.policies import replicas as replica_policies from coriolis.replicas import api @@ -25,6 +26,10 @@ LOG = logging.getLogger(__name__) +SUPPORTED_REPLICA_SCENARIOS = [ + constants.REPLICA_SCENARIO_REPLICA, + constants.REPLICA_SCENARIO_LIVE_MIGRATION] + class ReplicaController(api_wsgi.Controller): def __init__(self): @@ -66,6 +71,19 @@ def detail(self, req): def _validate_create_body(self, context, body): replica = body["replica"] + scenario = replica.get("scenario", "") + if scenario: + if scenario not in SUPPORTED_REPLICA_SCENARIOS: + raise exc.HTTPBadRequest( + explanation=f"Unsupported Replica creation scenario " + f"'{scenario}', must be one of: " + f"{SUPPORTED_REPLICA_SCENARIOS}") + else: + scenario = constants.REPLICA_SCENARIO_REPLICA + LOG.warn( + "No Replica 'scenario' field set in Replica body, " + f"defaulting to: '{scenario}'") + origin_endpoint_id = replica["origin_endpoint_id"] destination_endpoint_id = replica["destination_endpoint_id"] destination_environment = replica.get( @@ -118,7 +136,7 @@ def _validate_create_body(self, context, body): destination_environment['storage_mappings'] = storage_mappings - return (origin_endpoint_id, destination_endpoint_id, + return (scenario, origin_endpoint_id, destination_endpoint_id, source_environment, destination_environment, instances, network_map, storage_mappings, notes, origin_minion_pool_id, destination_minion_pool_id, @@ -128,7 +146,7 @@ def create(self, req, body): context = req.environ["coriolis.context"] context.can(replica_policies.get_replicas_policy_label("create")) - (origin_endpoint_id, destination_endpoint_id, + (scenario, origin_endpoint_id, destination_endpoint_id, source_environment, destination_environment, instances, network_map, storage_mappings, notes, origin_minion_pool_id, destination_minion_pool_id, @@ -136,7 +154,7 @@ def create(self, req, body): self._validate_create_body(context, body)) return replica_view.single(self._replica_api.create( - context, origin_endpoint_id, destination_endpoint_id, + context, scenario, origin_endpoint_id, destination_endpoint_id, origin_minion_pool_id, destination_minion_pool_id, instance_osmorphing_minion_pool_mappings, source_environment, destination_environment, instances, network_map, @@ -288,8 +306,15 @@ def _get_merged_replica_values(self, replica, updated_values): @api_utils.format_keyerror_message(resource='replica', method='update') def _validate_update_body(self, id, context, body): - replica = self._replica_api.get_replica(context, id) + + scenario = body.get("scenario", "") + if scenario and scenario != replica["scenario"]: + raise exc.HTTPBadRequest( + explanation=f"Changing Replica creation scenario is not " + f"supported (original scenario is " + f"{replica['scenario']}, received '{scenario}')") + replica_body = body['replica'] origin_endpoint_id = replica_body.get('origin_endpoint_id', None) destination_endpoint_id = replica_body.get( From 5da265dc1ff37fdbb940c423ac8923db3a5ef5de Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Wed, 10 Apr 2024 17:54:44 +0300 Subject: [PATCH 06/24] Add deployments API controller and views. Signed-off-by: Nashwan Azhari --- coriolis/api/v1/deployment_actions.py | 34 ++++++ coriolis/api/v1/deployments.py | 127 +++++++++++++++++++++++ coriolis/api/v1/views/deployment_view.py | 32 ++++++ 3 files changed, 193 insertions(+) create mode 100644 coriolis/api/v1/deployment_actions.py create mode 100644 coriolis/api/v1/deployments.py create mode 100644 coriolis/api/v1/views/deployment_view.py diff --git a/coriolis/api/v1/deployment_actions.py b/coriolis/api/v1/deployment_actions.py new file mode 100644 index 00000000..4b718637 --- /dev/null +++ b/coriolis/api/v1/deployment_actions.py @@ -0,0 +1,34 @@ +# Copyright 2024 Cloudbase Solutions Srl +# All Rights Reserved. + +from coriolis.api import wsgi as api_wsgi +from coriolis import exception +from coriolis.deployments import api +from coriolis.policies import migrations as migration_policies + +from webob import exc + + +class DeploymentActionsController(api_wsgi.Controller): + def __init__(self): + self._deployment_api = api.API() + super(DeploymentActionsController, self).__init__() + + @api_wsgi.action('cancel') + def _cancel(self, req, id, body): + context = req.environ['coriolis.context'] + # TODO(aznashwan): add policy definitions and checks for deployments: + context.can(migration_policies.get_migrations_policy_label("cancel")) + try: + force = (body["cancel"] or {}).get("force", False) + + self._deployment_api.cancel(context, id, force) + raise exc.HTTPNoContent() + except exception.NotFound as ex: + raise exc.HTTPNotFound(explanation=ex.msg) + except exception.InvalidParameterValue as ex: + raise exc.HTTPNotFound(explanation=ex.msg) + + +def create_resource(): + return api_wsgi.Resource(DeploymentActionsController()) diff --git a/coriolis/api/v1/deployments.py b/coriolis/api/v1/deployments.py new file mode 100644 index 00000000..8d6ff1c0 --- /dev/null +++ b/coriolis/api/v1/deployments.py @@ -0,0 +1,127 @@ +# Copyright 2024 Cloudbase Solutions Srl +# All Rights Reserved. + +from coriolis.api.v1 import utils as api_utils +from coriolis.api.v1.views import deployment_view +from coriolis.api import wsgi as api_wsgi +from coriolis.endpoints import api as endpoints_api +from coriolis import exception +from coriolis.deployments import api +from coriolis.policies import migrations as migration_policies + +from oslo_config import cfg as conf +from oslo_log import log as logging +from webob import exc + + +DEPLOYMENTS_API_OPTS = [ + conf.BoolOpt("include_task_info_in_deployments_api", + default=False, + help="Whether or not to expose the internal 'info' field of " + "a Deployment as part of a `GET` request.")] + +CONF = conf.CONF +CONF.register_opts(DEPLOYMENTS_API_OPTS, 'api') + +LOG = logging.getLogger(__name__) + + +class DeploymentsController(api_wsgi.Controller): + def __init__(self): + self._deployment_api = api.API() + self._endpoints_api = endpoints_api.API() + super(DeploymentsController, self).__init__() + + def show(self, req, id): + context = req.environ["coriolis.context"] + # TODO(aznashwan): add policy definitions and checks for deployments: + context.can(migration_policies.get_migrations_policy_label("show")) + deployment = self._deployment_api.get_deployment( + context, id, + include_task_info=CONF.api.include_task_info_in_deployments_api) + if not deployment: + raise exc.HTTPNotFound() + + return deployment_view.single(deployment) + + def _list(self, req): + show_deleted = api_utils._get_show_deleted( + req.GET.get("show_deleted", None)) + context = req.environ["coriolis.context"] + context.show_deleted = show_deleted + # TODO(aznashwan): add policy definitions and checks for deployments: + context.can(migration_policies.get_migrations_policy_label("list")) + return deployment_view.collection( + self._deployment_api.get_deployments( + context, + include_tasks=CONF.api.include_task_info_in_deployments_api, + include_task_info=CONF.api.include_task_info_in_deployments_api + )) + + def index(self, req): + return self._list(req) + + def detail(self, req): + return self._list(req) + + @api_utils.format_keyerror_message(resource='deployment', method='create') + def _validate_deployment_input(self, context, body): + deployment = body["deployment"] + + replica_id = deployment.get("replica_id", "") + + if not replica_id: + raise exc.HTTPBadRequest( + explanation=f"Missing 'replica_id' field from deployment " + f"body. A deployment can be created strictly " + f"based on an existing Replica.") + + clone_disks = deployment.get("clone_disks", True) + force = deployment.get("force", False) + skip_os_morphing = deployment.get("skip_os_morphing", False) + instance_osmorphing_minion_pool_mappings = deployment.get( + 'instance_osmorphing_minion_pool_mappings', {}) + user_scripts = deployment.get('user_scripts', {}) + api_utils.validate_user_scripts(user_scripts) + user_scripts = api_utils.normalize_user_scripts( + user_scripts, deployment.get("instances", [])) + + return ( + replica_id, force, clone_disks, skip_os_morphing, + instance_osmorphing_minion_pool_mappings, + user_scripts) + + + def create(self, req, body): + deployment_body = body.get("deployment", {}) + context = req.environ['coriolis.context'] + # TODO(aznashwan): add policy definitions and checks for deployments: + context.can(migration_policies.get_migrations_policy_label("create")) + + (replica_id, force, clone_disks, skip_os_morphing, + instance_osmorphing_minion_pool_mappings, + user_scripts) = self._validate_deployment_input( + context, deployment_body) + + # NOTE: destination environment for replica should have been + # validated upon its creation. + deployment = self._deployment_api.deploy_replica_instances( + context, replica_id, instance_osmorphing_minion_pool_mappings, + clone_disks, force, skip_os_morphing, + user_scripts=user_scripts) + + return deployment_view.single(deployment) + + def delete(self, req, id): + context = req.environ['coriolis.context'] + # TODO(aznashwan): add policy definitions and checks for deployments: + context.can(migration_policies.get_migrations_policy_label("delete")) + try: + self._deployment_api.delete(context, id) + raise exc.HTTPNoContent() + except exception.NotFound as ex: + raise exc.HTTPNotFound(explanation=ex.msg) + + +def create_resource(): + return api_wsgi.Resource(DeploymentsController()) diff --git a/coriolis/api/v1/views/deployment_view.py b/coriolis/api/v1/views/deployment_view.py new file mode 100644 index 00000000..fb4186f3 --- /dev/null +++ b/coriolis/api/v1/views/deployment_view.py @@ -0,0 +1,32 @@ +# Copyright 2024 Cloudbase Solutions Srl +# All Rights Reserved. + +from coriolis.api.v1.views import replica_tasks_execution_view as view +from coriolis.api.v1.views import utils as view_utils + + +def _format_deployment(deployment, keys=None): + deployment_dict = view_utils.format_opt(deployment, keys) + + if len(deployment_dict.get("executions", [])): + execution = view.format_replica_tasks_execution( + deployment_dict["executions"][0], keys) + del deployment_dict["executions"] + else: + execution = {} + + tasks = execution.get("tasks") + if tasks: + deployment_dict["tasks"] = tasks + + return deployment_dict + + +def single(deployment, keys=None): + return {"deployment": _format_deployment(deployment, keys)} + + +def collection(deployments, keys=None): + formatted_deployments = [_format_deployment(m, keys) + for m in deployments] + return {'deployments': formatted_deployments} From 2683310f810d2fd790611df33c70b7be756e2e2c Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Wed, 10 Apr 2024 18:01:28 +0300 Subject: [PATCH 07/24] Hook in '/deployments' path to API router. Signed-off-by: Nashwan Azhari --- coriolis/api/v1/router.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/coriolis/api/v1/router.py b/coriolis/api/v1/router.py index 94286f23..d8c49b3c 100644 --- a/coriolis/api/v1/router.py +++ b/coriolis/api/v1/router.py @@ -5,6 +5,8 @@ from coriolis import api from coriolis.api.v1 import diagnostics +from coriolis.api.v1 import deployments +from coriolis.api.v1 import deployment_actions from coriolis.api.v1 import endpoint_actions from coriolis.api.v1 import endpoint_destination_minion_pool_options from coriolis.api.v1 import endpoint_destination_options @@ -154,6 +156,21 @@ def _setup_routes(self, mapper, ext_mgr): action='action', conditions={'method': 'POST'}) + self.resources['deployments'] = deployments.create_resource() + mapper.resource('deployment', 'deployments', + controller=self.resources['deployments'], + collection={'detail': 'GET'}, + member={'action': 'POST'}) + + deployments_actions_resource = deployment_actions.create_resource() + self.resources['deployment_actions'] = deployments_actions_resource + deployment_path = '/{project_id}/deployment/{id}' + mapper.connect('deployment_actions', + deployment_path + '/actions', + controller=self.resources['deployment_actions'], + action='action', + conditions={'method': 'POST'}) + self.resources['replicas'] = replicas.create_resource() mapper.resource('replica', 'replicas', controller=self.resources['replicas'], From b26bd9f8bde6df29d9f53b52471df832d502c6ca Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Wed, 10 Apr 2024 18:06:25 +0300 Subject: [PATCH 08/24] Add dedicated deployments oslo_policy definitions. Signed-off-by: Nashwan Azhari --- coriolis/api/v1/deployment_actions.py | 5 +- coriolis/api/v1/deployments.py | 14 ++--- coriolis/policies/deployments.py | 80 +++++++++++++++++++++++++++ coriolis/policy.py | 6 +- etc/coriolis/policy.yaml | 7 +++ 5 files changed, 98 insertions(+), 14 deletions(-) create mode 100644 coriolis/policies/deployments.py diff --git a/coriolis/api/v1/deployment_actions.py b/coriolis/api/v1/deployment_actions.py index 4b718637..200b0f01 100644 --- a/coriolis/api/v1/deployment_actions.py +++ b/coriolis/api/v1/deployment_actions.py @@ -4,7 +4,7 @@ from coriolis.api import wsgi as api_wsgi from coriolis import exception from coriolis.deployments import api -from coriolis.policies import migrations as migration_policies +from coriolis.policies import deployments as deployment_policies from webob import exc @@ -17,8 +17,7 @@ def __init__(self): @api_wsgi.action('cancel') def _cancel(self, req, id, body): context = req.environ['coriolis.context'] - # TODO(aznashwan): add policy definitions and checks for deployments: - context.can(migration_policies.get_migrations_policy_label("cancel")) + context.can(deployment_policies.get_deployments_policy_label("cancel")) try: force = (body["cancel"] or {}).get("force", False) diff --git a/coriolis/api/v1/deployments.py b/coriolis/api/v1/deployments.py index 8d6ff1c0..f69c7784 100644 --- a/coriolis/api/v1/deployments.py +++ b/coriolis/api/v1/deployments.py @@ -7,7 +7,7 @@ from coriolis.endpoints import api as endpoints_api from coriolis import exception from coriolis.deployments import api -from coriolis.policies import migrations as migration_policies +from coriolis.policies import deployments as deployment_policies from oslo_config import cfg as conf from oslo_log import log as logging @@ -34,8 +34,7 @@ def __init__(self): def show(self, req, id): context = req.environ["coriolis.context"] - # TODO(aznashwan): add policy definitions and checks for deployments: - context.can(migration_policies.get_migrations_policy_label("show")) + context.can(deployment_policies.get_deployments_policy_label("show")) deployment = self._deployment_api.get_deployment( context, id, include_task_info=CONF.api.include_task_info_in_deployments_api) @@ -49,8 +48,7 @@ def _list(self, req): req.GET.get("show_deleted", None)) context = req.environ["coriolis.context"] context.show_deleted = show_deleted - # TODO(aznashwan): add policy definitions and checks for deployments: - context.can(migration_policies.get_migrations_policy_label("list")) + context.can(deployment_policies.get_deployments_policy_label("list")) return deployment_view.collection( self._deployment_api.get_deployments( context, @@ -95,8 +93,7 @@ def _validate_deployment_input(self, context, body): def create(self, req, body): deployment_body = body.get("deployment", {}) context = req.environ['coriolis.context'] - # TODO(aznashwan): add policy definitions and checks for deployments: - context.can(migration_policies.get_migrations_policy_label("create")) + context.can(deployment_policies.get_deployments_policy_label("create")) (replica_id, force, clone_disks, skip_os_morphing, instance_osmorphing_minion_pool_mappings, @@ -114,8 +111,7 @@ def create(self, req, body): def delete(self, req, id): context = req.environ['coriolis.context'] - # TODO(aznashwan): add policy definitions and checks for deployments: - context.can(migration_policies.get_migrations_policy_label("delete")) + context.can(deployment_policies.get_deployments_policy_label("delete")) try: self._deployment_api.delete(context, id) raise exc.HTTPNoContent() diff --git a/coriolis/policies/deployments.py b/coriolis/policies/deployments.py new file mode 100644 index 00000000..77e20bb0 --- /dev/null +++ b/coriolis/policies/deployments.py @@ -0,0 +1,80 @@ +# Copyright 2018 Cloudbase Solutions Srl +# All Rights Reserved. + +from oslo_policy import policy + +from coriolis.policies import base + + +DEPLOYMENTS_POLICY_PREFIX = "%s:deployments" % base.CORIOLIS_POLICIES_PREFIX +DEPLOYMENTS_POLICY_DEFAULT_RULE = "rule:admin_or_owner" + + +def get_deployments_policy_label(rule_label): + return "%s:%s" % ( + DEPLOYMENTS_POLICY_PREFIX, rule_label) + + +DEPLOYMENTS_POLICY_DEFAULT_RULES = [ + policy.DocumentedRuleDefault( + get_deployments_policy_label('create'), + DEPLOYMENTS_POLICY_DEFAULT_RULE, + "Create a deployment", + [ + { + "path": "/deployments", + "method": "POST" + } + ] + ), + policy.DocumentedRuleDefault( + get_deployments_policy_label('list'), + DEPLOYMENTS_POLICY_DEFAULT_RULE, + "List deployments", + [ + { + "path": "/deployments", + "method": "GET" + } + ] + ), + policy.DocumentedRuleDefault( + get_deployments_policy_label('show'), + DEPLOYMENTS_POLICY_DEFAULT_RULE, + "Show details for a deployment", + [ + { + "path": "/deployment/{deployment_id}", + "method": "GET" + } + ] + ), + # TODO(aznashwan): deployment actions should ideally be + # declared in a separate module + policy.DocumentedRuleDefault( + get_deployments_policy_label('cancel'), + DEPLOYMENTS_POLICY_DEFAULT_RULE, + "Cancel a running Migration", + [ + { + "path": "/deployments/{deployment_id}/actions/", + "method": "POST" + } + ] + ), + policy.DocumentedRuleDefault( + get_deployments_policy_label('delete'), + DEPLOYMENTS_POLICY_DEFAULT_RULE, + "Delete Migration", + [ + { + "path": "/deployment/{deployment_id}", + "method": "DELETE" + } + ] + ) +] + + +def list_rules(): + return DEPLOYMENTS_POLICY_DEFAULT_RULES diff --git a/coriolis/policy.py b/coriolis/policy.py index fb5694f3..37ad18f3 100644 --- a/coriolis/policy.py +++ b/coriolis/policy.py @@ -9,6 +9,7 @@ from coriolis import exception from coriolis.policies import base +from coriolis.policies import deployments from coriolis.policies import diagnostics from coriolis.policies import endpoints from coriolis.policies import general @@ -28,8 +29,9 @@ _ENFORCER = None DEFAULT_POLICIES_MODULES = [ - base, endpoints, general, migrations, replicas, replica_schedules, - replica_tasks_executions, diagnostics, regions, services, minion_pools] + base, deployments, endpoints, general, migrations, replicas, + replica_schedules, replica_tasks_executions, diagnostics, regions, + services, minion_pools] def reset(): diff --git a/etc/coriolis/policy.yaml b/etc/coriolis/policy.yaml index 0c6654c3..7622de59 100644 --- a/etc/coriolis/policy.yaml +++ b/etc/coriolis/policy.yaml @@ -22,6 +22,13 @@ "migration:migrations:cancel": "rule:admin_or_owner" "migration:migrations:delete": "rule:admin_or_owner" +"migration:deployments:create": "rule:admin_or_owner" +"migration:deployments:list": "rule:admin_or_owner" +"migration:deployments:show": "rule:admin_or_owner" +"migration:deployments:show_execution": "rule:admin_or_owner" +"migration:deployments:cancel": "rule:admin_or_owner" +"migration:deployments:delete": "rule:admin_or_owner" + "migration:replicas:create": "rule:admin_or_owner" "migration:replicas:list": "rule:admin_or_owner" "migration:replicas:show": "rule:admin_or_owner" From 253fbefb466fdec71f4d488da97fa9444965fad2 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Thu, 11 Apr 2024 16:45:26 +0300 Subject: [PATCH 09/24] Fix POST /deployment API body key error. Signed-off-by: Nashwan Azhari --- coriolis/api/v1/deployments.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/coriolis/api/v1/deployments.py b/coriolis/api/v1/deployments.py index f69c7784..202ab018 100644 --- a/coriolis/api/v1/deployments.py +++ b/coriolis/api/v1/deployments.py @@ -91,14 +91,13 @@ def _validate_deployment_input(self, context, body): def create(self, req, body): - deployment_body = body.get("deployment", {}) context = req.environ['coriolis.context'] context.can(deployment_policies.get_deployments_policy_label("create")) (replica_id, force, clone_disks, skip_os_morphing, instance_osmorphing_minion_pool_mappings, user_scripts) = self._validate_deployment_input( - context, deployment_body) + context, body) # NOTE: destination environment for replica should have been # validated upon its creation. From 3ebecdd74e5dd11cab2a1fde5de73dccd7caaed6 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Mon, 27 May 2024 18:13:01 +0300 Subject: [PATCH 10/24] Fix licensing client env flag loading. Signed-off-by: Nashwan Azhari --- coriolis/licensing/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coriolis/licensing/client.py b/coriolis/licensing/client.py index da291d63..82cedab4 100644 --- a/coriolis/licensing/client.py +++ b/coriolis/licensing/client.py @@ -44,7 +44,7 @@ def from_env(cls): "instantiate licensing client.") return None allow_untrusted = os.environ.get( - "LICENSING_SERVER_ALLOW_UNTRUSTED", False) + "LICENSING_SERVER_ALLOW_UNTRUSTED", None) != None client = cls( base_url, appliance_id=None, allow_untrusted=allow_untrusted) appliance_ids = client.get_appliances() From c119df3e428e3b9c37ed446ad0ee45ccb47690bb Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Thu, 30 May 2024 14:44:01 +0300 Subject: [PATCH 11/24] Postpone task execution status setting. Signed-off-by: Nashwan Azhari --- coriolis/conductor/rpc/server.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/coriolis/conductor/rpc/server.py b/coriolis/conductor/rpc/server.py index 42aa816d..815e91e4 100644 --- a/coriolis/conductor/rpc/server.py +++ b/coriolis/conductor/rpc/server.py @@ -2389,8 +2389,6 @@ def _cancel_tasks_execution( def _set_tasks_execution_status( self, ctxt, execution, new_execution_status): previous_execution_status = execution.status - execution = db_api.set_execution_status( - ctxt, execution.id, new_execution_status) LOG.info( "Tasks execution %(id)s (action %(action)s) status updated " "from %(old_status)s to %(new_status)s", @@ -2438,6 +2436,9 @@ def _set_tasks_execution_status( execution.id, execution.type, execution.action_id, new_execution_status) + execution = db_api.set_execution_status( + ctxt, execution.id, new_execution_status) + @parent_tasks_execution_synchronized def set_task_host(self, ctxt, task_id, host): """ Saves the ID of the worker host which has accepted From 1093a00c6f56c30c5ae46962d94b1a753666fa41 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Thu, 30 May 2024 14:25:51 +0300 Subject: [PATCH 12/24] Fix deployment cancellation action API route. Signed-off-by: Nashwan Azhari --- coriolis/api/v1/router.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coriolis/api/v1/router.py b/coriolis/api/v1/router.py index d8c49b3c..a47eca43 100644 --- a/coriolis/api/v1/router.py +++ b/coriolis/api/v1/router.py @@ -164,7 +164,7 @@ def _setup_routes(self, mapper, ext_mgr): deployments_actions_resource = deployment_actions.create_resource() self.resources['deployment_actions'] = deployments_actions_resource - deployment_path = '/{project_id}/deployment/{id}' + deployment_path = '/{project_id}/deployments/{id}' mapper.connect('deployment_actions', deployment_path + '/actions', controller=self.resources['deployment_actions'], From 7286ce32ecd964e4b58aae396a932aee89ee21e3 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Thu, 30 May 2024 12:15:32 +0300 Subject: [PATCH 13/24] Hook in new reservation fulfillment logic in conductor. Signed-off-by: Nashwan Azhari --- coriolis/conductor/rpc/server.py | 196 +++++++++++++++++++++++++------ coriolis/exception.py | 6 + coriolis/licensing/client.py | 6 + 3 files changed, 174 insertions(+), 34 deletions(-) diff --git a/coriolis/conductor/rpc/server.py b/coriolis/conductor/rpc/server.py index 815e91e4..74bce421 100644 --- a/coriolis/conductor/rpc/server.py +++ b/coriolis/conductor/rpc/server.py @@ -45,6 +45,13 @@ "A fatal deadlock has occurred. Further debugging is required. " "Please review the Conductor logs and contact support for assistance.") +SCENARIO_TYPE_TO_LICENSING_RESERVATION_MAP = { + constants.REPLICA_SCENARIO_REPLICA: + licensing_client.RESERVATION_TYPE_REPLICA, + constants.REPLICA_SCENARIO_LIVE_MIGRATION: + licensing_client.RESERVATION_TYPE_MIGRATION +} + def endpoint_synchronized(func): @functools.wraps(func) @@ -290,40 +297,119 @@ def _check_delete_reservation_for_transfer(self, transfer_action): "action with ID '%s'. Skipping. Exception\n%s", reservation_id, action_id, utils.get_exception_details()) - def _check_create_reservation_for_transfer( - self, transfer_action, transfer_type): - action_id = transfer_action.base_id + def _create_reservation_for_replica(self, replica): + action_id = replica.base_id + scenario = replica.scenario + reservation_type = SCENARIO_TYPE_TO_LICENSING_RESERVATION_MAP.get( + scenario, None) + if not reservation_type: + raise exception.LicensingException( + message="Could not determine reservation type for replica " + f"'{action_id}' with scenario '{replica.scenario}'.") if not self._licensing_client: LOG.warn( "Licensing client not instantiated. Skipping creation of " "reservation for transfer action '%s'", action_id) return - ninstances = len(transfer_action.instances) + ninstances = len(replica.instances) LOG.debug( "Attempting to create '%s' reservation for %d instances for " "transfer action with ID '%s'.", - transfer_type, ninstances, action_id) + reservation_type, ninstances, action_id) reservation = self._licensing_client.add_reservation( - transfer_type, ninstances) - transfer_action.reservation_id = reservation['id'] + reservation_type, ninstances) + + LOG.info( + f"Sucessfully created licensing reservation for transfer " + f"with ID '{action_id}' with properties: {reservation}") + replica.reservation_id = reservation['id'] + + return reservation - def _check_reservation_for_transfer( - self, transfer_action, reservation_type): + def _get_licensing_reservation_for_action(self, transfer_action): action_id = transfer_action.base_id + if not self._licensing_client: + LOG.warn( + f"Licensing client not instantiated. Skipping getting " + f"reservation for transfer action '{action_id}'") + return None + + reservation_id = transfer_action.reservation_id + if not reservation_id: + LOG.warn( + f"No reservation_id set on transfer action '{action_id}'") + return None + + return self._licensing_client.get_reservation(reservation_id) + + def _check_mark_reservation_fulfilled( + self, transfer_action, must_unfulfilled=False): + action_id = transfer_action.id + reservation = self._get_licensing_reservation_for_action( + transfer_action) + if not reservation: + LOG.info( + f"No licensing reservation found for transfer action " + f"'{action_id}'. Skipping marking fulfilled.") + return + + reservation_id = reservation['id'] + fulfilled = reservation.get("fulfilled_at", None) + if fulfilled: + if must_unfulfilled: + raise exception.Conflict( + f"A licensing reservation with ID {reservation_id} " + "already exists and has been marked as fulfilled " + "within the licensing server. Please create a new " + "transfer operation in order to obtain a new " + "reservation.") + LOG.debug( + f"Reservation with ID '{reservation_id}' for transfer " + f"transfer action '{action_id}' was already marked as fulfilled") + else: + self._licensing_client.mark_reservation_fulfilled(reservation_id) + LOG.debug( + f"Successfully marked reservation with ID '{reservation_id}' for " + f"transfer action '{action_id}' as fulfilled") + + def _check_reservation_for_replica(self, replica): + scenario = replica.scenario + reservation_type = SCENARIO_TYPE_TO_LICENSING_RESERVATION_MAP.get( + scenario, None) + if not reservation_type: + raise exception.LicensingException( + message="Could not determine reservation type for replica " + f"'{replica.id}' with scenario '{replica.scenario}'.") + + action_id = replica.base_id if not self._licensing_client: LOG.warn( "Licensing client not instantiated. Skipping checking of " "reservation for transfer action '%s'", action_id) return - reservation_id = transfer_action.reservation_id + reservation_id = replica.reservation_id if reservation_id: LOG.debug( "Attempting to check reservation with ID '%s' for transfer " "action '%s'", reservation_id, action_id) try: - transfer_action.reservation_id = ( + reservation = self._licensing_client.get_reservation( + reservation_id) + + fulfilled_at = reservation.get("fulfilled_at", None) + if scenario == constants.REPLICA_SCENARIO_LIVE_MIGRATION and ( + fulfilled_at): + raise exception.LicensingException( + message=f"The Live Migration operation with ID " + f"'{replica.id}' (licensing reservation " + f"'{reservation_id}' has already been " + f"fulfilled on {fulfilled_at}. Please " + f"create a new Live Migration operation " + f"to create a new licensing reservation.") + + replica.reservation_id = ( self._licensing_client.check_refresh_reservation( reservation_id)['id']) except Exception as ex: @@ -331,7 +417,7 @@ def _check_reservation_for_transfer( if exc_code in [404, 409]: if exc_code == 409: LOG.debug( - "Server-side exception occurred while trying to " + "Licensing-side conflict occurred while trying to " "check the existing reservation '%s' for action " "'%s'. Attempting to create a new reservation. " "Trace was: %s", @@ -344,15 +430,14 @@ def _check_reservation_for_transfer( "reservation. Trace was: %s", reservation_id, action_id, utils.get_exception_details()) - self._check_create_reservation_for_transfer( - transfer_action, reservation_type) + self._create_reservation_for_replica(replica) else: raise ex else: - LOG.debug( - "Transfer action '%s' has no reservation ID set.", action_id) - self._check_create_reservation_for_transfer( - transfer_action, reservation_type) + LOG.info( + f"Transfer action '{action_id}' has no reservation ID set, " + f"attempting to create a new one for it") + self._create_reservation_for_replica(replica) def create_endpoint(self, ctxt, name, endpoint_type, description, connection_info, mapped_regions=None): @@ -825,8 +910,7 @@ def _check_task_cls_param_requirements(task, instance_task_info_keys): @replica_synchronized def execute_replica_tasks(self, ctxt, replica_id, shutdown_instances): replica = self._get_replica(ctxt, replica_id, include_task_info=True) - self._check_reservation_for_transfer( - replica, licensing_client.RESERVATION_TYPE_REPLICA) + self._check_reservation_for_replica(replica) self._check_replica_running_executions(ctxt, replica) self._check_minion_pools_for_action(ctxt, replica) @@ -1109,12 +1193,6 @@ def get_replica(self, ctxt, replica_id, include_task_info=False): def delete_replica(self, ctxt, replica_id): replica = self._get_replica(ctxt, replica_id) self._check_replica_running_executions(ctxt, replica) - # TODO(aznashwan): update reservation deletion logic if - # the Replica was never successfully deployed and its - # disks were deleted. - # This might not be possible if its executions were deleted, - # but might be possible to set the new 'fulfilled' field within - # the reservation on the licensing server after a successful execution. self._check_delete_reservation_for_transfer(replica) db_api.delete_replica(ctxt, replica_id) @@ -1230,10 +1308,7 @@ def create_instances_replica(self, ctxt, replica_scenario, self._check_minion_pools_for_action(ctxt, replica) - # TODO(aznashwan): add scenario-appropriate steps for - # defining the Replica reservation: - self._check_create_reservation_for_transfer( - replica, licensing_client.RESERVATION_TYPE_REPLICA) + self._create_reservation_for_replica(replica) db_api.add_replica(ctxt, replica) LOG.info("Replica created: %s", replica.id) @@ -1327,8 +1402,7 @@ def deploy_replica_instances( instance_osmorphing_minion_pool_mappings=None, skip_os_morphing=False, user_scripts=None): replica = self._get_replica(ctxt, replica_id, include_task_info=True) - self._check_reservation_for_transfer( - replica, licensing_client.RESERVATION_TYPE_REPLICA) + self._check_reservation_for_replica(replica) self._check_replica_running_executions(ctxt, replica) self._check_valid_replica_tasks_execution(replica, force) user_scripts = user_scripts or replica.user_scripts @@ -1809,8 +1883,7 @@ def migrate_instances( migration.instance_osmorphing_minion_pool_mappings = ( instance_osmorphing_minion_pool_mappings) - self._check_create_reservation_for_transfer( - migration, licensing_client.RESERVATION_TYPE_MIGRATION) + self._create_reservation_for_replica(migration) self._check_minion_pools_for_action(ctxt, migration) @@ -2386,6 +2459,57 @@ def _cancel_tasks_execution( "No new tasks were started for execution '%s' following " "state advancement after cancellation.", execution.id) + def _update_reservation_fulfillment_for_execution(self, ctxt, execution): + """ Updates the reservation fulfillment status for the parent + transfer action of the given execution based on its type. + + Replica transfers are marked as fulfilled as soon as a Replica + Execution is successfully completed. + Live migration transfers are marked as fulfilled as soon as they + are deployed for the first (and only) time. + """ + if execution.type not in ( + constants.EXECUTION_TYPE_REPLICA_EXECUTION, + constants.EXECUTION_TYPE_REPLICA_DEPLOY): + LOG.debug( + f"Skipping setting reservation fulfillment for execution " + f"'{execution.id}' of type '{execution.type}'.") + return + + if execution.type not in ( + constants.EXECUTION_TYPE_REPLICA_EXECUTION, + constants.EXECUTION_TYPE_REPLICA_DEPLOY): + LOG.debug( + f"Skipping setting replica fulfillment for execution " + f"'{execution.id}' of type '{execution.type}'.") + return + + transfer_action = execution.action + transfer_id = transfer_action.base_id + if transfer_action.type == constants.TRANSFER_ACTION_TYPE_MIGRATION: + deployment = self._get_migration(ctxt, transfer_id) + transfer_id = deployment.replica_id + transfer_action = self._get_replica( + ctxt, transfer_id, include_task_info=False) + else: + transfer_action = self._get_replica( + ctxt, execution.action_id, include_task_info=False) + + if transfer_action.scenario == constants.REPLICA_SCENARIO_REPLICA and ( + execution.type == constants.EXECUTION_TYPE_REPLICA_EXECUTION): + self._check_mark_reservation_fulfilled( + transfer_action, must_unfulfilled=False) + elif transfer_action.scenario == constants.REPLICA_SCENARIO_LIVE_MIGRATION and ( + execution.type == constants.EXECUTION_TYPE_REPLICA_DEPLOY): + self._check_mark_reservation_fulfilled( + transfer_action, must_unfulfilled=False) + else: + LOG.debug( + f"Skipping setting replica fulfillment for execution " + f"'{execution.id}' of type '{execution.type}' on parent" + f"action {transfer_id} of scenario type " + f"{transfer_action.scenatio}.") + def _set_tasks_execution_status( self, ctxt, execution, new_execution_status): previous_execution_status = execution.status @@ -2396,6 +2520,10 @@ def _set_tasks_execution_status( "action": execution.action_id, "old_status": previous_execution_status}) + if new_execution_status == constants.EXECUTION_STATUS_COMPLETED: + self._update_reservation_fulfillment_for_execution( + ctxt, execution) + if new_execution_status in constants.FINALIZED_EXECUTION_STATUSES: # NOTE(aznashwan): because the taskflow flows within the minion # manager cannot [currently] be cancelled and are destined to diff --git a/coriolis/exception.py b/coriolis/exception.py index a1c779f0..db311782 100644 --- a/coriolis/exception.py +++ b/coriolis/exception.py @@ -137,6 +137,12 @@ class Conflict(CoriolisException): safe = True +class LicensingException(Conflict): + message = _("Licensing exception occurred") + code = 409 + safe = True + + class AdminRequired(NotAuthorized): message = _("User does not have admin privileges") diff --git a/coriolis/licensing/client.py b/coriolis/licensing/client.py index 82cedab4..ee5f71d8 100644 --- a/coriolis/licensing/client.py +++ b/coriolis/licensing/client.py @@ -214,6 +214,12 @@ def check_refresh_reservation(self, reservation_id): "/reservations/%s/refresh" % reservation_id, None, response_key="reservation") + def mark_reservation_fulfilled(self, reservation_id): + """ Marks the given reservation as fulfilled. """ + return self._post( + "/reservations/%s/fulfill" % reservation_id, None, + response_key="reservation") + def delete_reservation(self, reservation_id, raise_on_404=False): """ Deletes a reservation by its ID. Unless `raise_on_404` is set, ignores not found reservations. From 5dbbd77e3a58a4070302b84ee8b5c968804a1013 Mon Sep 17 00:00:00 2001 From: Nashwan Azhari Date: Mon, 17 Jun 2024 15:28:20 +0300 Subject: [PATCH 14/24] Minor bug fixes. Signed-off-by: Nashwan Azhari --- coriolis/conductor/rpc/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coriolis/conductor/rpc/server.py b/coriolis/conductor/rpc/server.py index 74bce421..6c917dde 100644 --- a/coriolis/conductor/rpc/server.py +++ b/coriolis/conductor/rpc/server.py @@ -2508,7 +2508,7 @@ def _update_reservation_fulfillment_for_execution(self, ctxt, execution): f"Skipping setting replica fulfillment for execution " f"'{execution.id}' of type '{execution.type}' on parent" f"action {transfer_id} of scenario type " - f"{transfer_action.scenatio}.") + f"{transfer_action.scenario}.") def _set_tasks_execution_status( self, ctxt, execution, new_execution_status): From fd7e1d899929d1031470998f72a03a0a13189793 Mon Sep 17 00:00:00 2001 From: Daniel Vincze Date: Thu, 8 Aug 2024 16:54:48 +0300 Subject: [PATCH 15/24] Remove `/migrations` API path and update unit tests accordingly --- coriolis/api/v1/deployment_actions.py | 6 +- coriolis/api/v1/deployments.py | 20 +- coriolis/api/v1/migration_actions.py | 33 - coriolis/api/v1/migrations.py | 189 ------ coriolis/api/v1/replicas.py | 2 +- coriolis/api/v1/router.py | 23 +- coriolis/conductor/rpc/client.py | 51 +- coriolis/conductor/rpc/server.py | 419 +------------ coriolis/db/api.py | 3 +- coriolis/licensing/client.py | 2 +- coriolis/migrations/__init__.py | 0 coriolis/migrations/api.py | 54 -- coriolis/migrations/manager.py | 101 ---- coriolis/tests/api/v1/__init__py | 0 .../v1/data/replicas_validate_create_body.yml | 3 +- .../tests/api/v1/test_migration_actions.py | 114 ---- coriolis/tests/api/v1/test_migrations.py | 260 -------- coriolis/tests/api/v1/test_replicas.py | 2 +- coriolis/tests/api/v1/test_router.py | 43 +- coriolis/tests/conductor/rpc/test_client.py | 43 +- coriolis/tests/conductor/rpc/test_server.py | 562 +----------------- coriolis/tests/migrations/__init__.py | 0 coriolis/tests/migrations/test_api.py | 93 --- coriolis/tests/replicas/test_api.py | 6 +- 24 files changed, 68 insertions(+), 1961 deletions(-) delete mode 100644 coriolis/api/v1/migration_actions.py delete mode 100644 coriolis/api/v1/migrations.py delete mode 100644 coriolis/migrations/__init__.py delete mode 100644 coriolis/migrations/api.py delete mode 100644 coriolis/migrations/manager.py delete mode 100644 coriolis/tests/api/v1/__init__py delete mode 100644 coriolis/tests/api/v1/test_migration_actions.py delete mode 100644 coriolis/tests/api/v1/test_migrations.py delete mode 100644 coriolis/tests/migrations/__init__.py delete mode 100644 coriolis/tests/migrations/test_api.py diff --git a/coriolis/api/v1/deployment_actions.py b/coriolis/api/v1/deployment_actions.py index 200b0f01..c7f5034f 100644 --- a/coriolis/api/v1/deployment_actions.py +++ b/coriolis/api/v1/deployment_actions.py @@ -1,13 +1,13 @@ # Copyright 2024 Cloudbase Solutions Srl # All Rights Reserved. +from webob import exc + from coriolis.api import wsgi as api_wsgi -from coriolis import exception from coriolis.deployments import api +from coriolis import exception from coriolis.policies import deployments as deployment_policies -from webob import exc - class DeploymentActionsController(api_wsgi.Controller): def __init__(self): diff --git a/coriolis/api/v1/deployments.py b/coriolis/api/v1/deployments.py index 202ab018..8b92743a 100644 --- a/coriolis/api/v1/deployments.py +++ b/coriolis/api/v1/deployments.py @@ -1,19 +1,18 @@ # Copyright 2024 Cloudbase Solutions Srl # All Rights Reserved. +from oslo_config import cfg as conf +from oslo_log import log as logging +from webob import exc + from coriolis.api.v1 import utils as api_utils from coriolis.api.v1.views import deployment_view from coriolis.api import wsgi as api_wsgi +from coriolis.deployments import api from coriolis.endpoints import api as endpoints_api from coriolis import exception -from coriolis.deployments import api from coriolis.policies import deployments as deployment_policies -from oslo_config import cfg as conf -from oslo_log import log as logging -from webob import exc - - DEPLOYMENTS_API_OPTS = [ conf.BoolOpt("include_task_info_in_deployments_api", default=False, @@ -70,9 +69,9 @@ def _validate_deployment_input(self, context, body): if not replica_id: raise exc.HTTPBadRequest( - explanation=f"Missing 'replica_id' field from deployment " - f"body. A deployment can be created strictly " - f"based on an existing Replica.") + explanation="Missing 'replica_id' field from deployment " + "body. A deployment can be created strictly " + "based on an existing Replica.") clone_disks = deployment.get("clone_disks", True) force = deployment.get("force", False) @@ -89,14 +88,13 @@ def _validate_deployment_input(self, context, body): instance_osmorphing_minion_pool_mappings, user_scripts) - def create(self, req, body): context = req.environ['coriolis.context'] context.can(deployment_policies.get_deployments_policy_label("create")) (replica_id, force, clone_disks, skip_os_morphing, instance_osmorphing_minion_pool_mappings, - user_scripts) = self._validate_deployment_input( + user_scripts) = self._validate_deployment_input( context, body) # NOTE: destination environment for replica should have been diff --git a/coriolis/api/v1/migration_actions.py b/coriolis/api/v1/migration_actions.py deleted file mode 100644 index 64bd5b7d..00000000 --- a/coriolis/api/v1/migration_actions.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. - -from coriolis.api import wsgi as api_wsgi -from coriolis import exception -from coriolis.migrations import api -from coriolis.policies import migrations as migration_policies - -from webob import exc - - -class MigrationActionsController(api_wsgi.Controller): - def __init__(self): - self._migration_api = api.API() - super(MigrationActionsController, self).__init__() - - @api_wsgi.action('cancel') - def _cancel(self, req, id, body): - context = req.environ['coriolis.context'] - context.can(migration_policies.get_migrations_policy_label("cancel")) - try: - force = (body["cancel"] or {}).get("force", False) - - self._migration_api.cancel(context, id, force) - raise exc.HTTPNoContent() - except exception.NotFound as ex: - raise exc.HTTPNotFound(explanation=ex.msg) - except exception.InvalidParameterValue as ex: - raise exc.HTTPNotFound(explanation=ex.msg) - - -def create_resource(): - return api_wsgi.Resource(MigrationActionsController()) diff --git a/coriolis/api/v1/migrations.py b/coriolis/api/v1/migrations.py deleted file mode 100644 index 403b843e..00000000 --- a/coriolis/api/v1/migrations.py +++ /dev/null @@ -1,189 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. - -from coriolis.api.v1 import utils as api_utils -from coriolis.api.v1.views import migration_view -from coriolis.api import wsgi as api_wsgi -from coriolis.endpoints import api as endpoints_api -from coriolis import exception -from coriolis.migrations import api -from coriolis.policies import migrations as migration_policies - -from oslo_config import cfg as conf -from oslo_log import log as logging -from webob import exc - - -MIGRATIONS_API_OPTS = [ - conf.BoolOpt("include_task_info_in_migrations_api", - default=False, - help="Whether or not to expose the internal 'info' field of " - "a Migration as part of a `GET` request.")] - -CONF = conf.CONF -CONF.register_opts(MIGRATIONS_API_OPTS, 'api') - -LOG = logging.getLogger(__name__) - - -class MigrationController(api_wsgi.Controller): - def __init__(self): - self._migration_api = api.API() - self._endpoints_api = endpoints_api.API() - super(MigrationController, self).__init__() - - def show(self, req, id): - context = req.environ["coriolis.context"] - context.can(migration_policies.get_migrations_policy_label("show")) - migration = self._migration_api.get_migration( - context, id, - include_task_info=CONF.api.include_task_info_in_migrations_api) - if not migration: - raise exc.HTTPNotFound() - - return migration_view.single(migration) - - def _list(self, req): - show_deleted = api_utils._get_show_deleted( - req.GET.get("show_deleted", None)) - context = req.environ["coriolis.context"] - context.show_deleted = show_deleted - context.can(migration_policies.get_migrations_policy_label("list")) - return migration_view.collection( - self._migration_api.get_migrations( - context, - include_tasks=CONF.api.include_task_info_in_migrations_api, - include_task_info=CONF.api.include_task_info_in_migrations_api - )) - - def index(self, req): - return self._list(req) - - def detail(self, req): - return self._list(req) - - @api_utils.format_keyerror_message(resource='migration', method='create') - def _validate_migration_input(self, context, body): - migration = body["migration"] - origin_endpoint_id = migration["origin_endpoint_id"] - destination_endpoint_id = migration["destination_endpoint_id"] - origin_minion_pool_id = migration.get('origin_minion_pool_id') - destination_minion_pool_id = migration.get( - 'destination_minion_pool_id') - instance_osmorphing_minion_pool_mappings = migration.get( - 'instance_osmorphing_minion_pool_mappings', {}) - instances = api_utils.validate_instances_list_for_transfer( - migration.get('instances')) - extras = [ - instance - for instance in instance_osmorphing_minion_pool_mappings - if instance not in instances] - if extras: - raise ValueError( - "One or more instance OSMorphing pool mappings were " - "provided for instances (%s) which are not part of the " - "migration's declared instances (%s)" % (extras, instances)) - - notes = migration.get("notes") - skip_os_morphing = migration.get("skip_os_morphing", False) - shutdown_instances = migration.get( - "shutdown_instances", False) - replication_count = int(migration.get("replication_count", 2)) - if replication_count not in range(1, 11): - raise ValueError( - "'replication_count' must be an integer between 1 and 10." - " Got: %s" % replication_count) - - source_environment = migration.get("source_environment", {}) - self._endpoints_api.validate_source_environment( - context, origin_endpoint_id, source_environment) - - network_map = migration.get("network_map", {}) - api_utils.validate_network_map(network_map) - - # TODO(aznashwan): until the provider plugin interface is updated - # to have separate 'network_map' and 'storage_mappings' fields, - # we add them as part of the destination environment: - destination_environment = migration.get( - "destination_environment", {}) - destination_environment['network_map'] = network_map - self._endpoints_api.validate_target_environment( - context, destination_endpoint_id, destination_environment) - - storage_mappings = migration.get("storage_mappings", {}) - api_utils.validate_storage_mappings(storage_mappings) - # NOTE(aznashwan): we validate the destination environment for the - # import provider before appending the 'storage_mappings' parameter - # for plugins with strict property name checks which do not yet - # support storage mapping features: - destination_environment['storage_mappings'] = storage_mappings - - return (origin_endpoint_id, destination_endpoint_id, - origin_minion_pool_id, destination_minion_pool_id, - instance_osmorphing_minion_pool_mappings, source_environment, - destination_environment, instances, notes, - skip_os_morphing, replication_count, - shutdown_instances, network_map, storage_mappings) - - def create(self, req, body): - migration_body = body.get("migration", {}) - context = req.environ['coriolis.context'] - context.can(migration_policies.get_migrations_policy_label("create")) - user_scripts = migration_body.get('user_scripts', {}) - api_utils.validate_user_scripts(user_scripts) - user_scripts = api_utils.normalize_user_scripts( - user_scripts, migration_body.get("instances", [])) - replica_id = migration_body.get("replica_id") - if replica_id: - clone_disks = migration_body.get("clone_disks", True) - force = migration_body.get("force", False) - skip_os_morphing = migration_body.get("skip_os_morphing", False) - instance_osmorphing_minion_pool_mappings = migration_body.get( - 'instance_osmorphing_minion_pool_mappings', {}) - - # NOTE: destination environment for replica should have been - # validated upon its creation. - migration = self._migration_api.deploy_replica_instances( - context, replica_id, instance_osmorphing_minion_pool_mappings, - clone_disks, force, skip_os_morphing, - user_scripts=user_scripts) - else: - (origin_endpoint_id, - destination_endpoint_id, - origin_minion_pool_id, - destination_minion_pool_id, - instance_osmorphing_minion_pool_mappings, - source_environment, - destination_environment, - instances, - notes, - skip_os_morphing, - replication_count, - shutdown_instances, - network_map, - storage_mappings) = self._validate_migration_input( - context, body) - migration = self._migration_api.migrate_instances( - context, origin_endpoint_id, destination_endpoint_id, - origin_minion_pool_id, destination_minion_pool_id, - instance_osmorphing_minion_pool_mappings, - source_environment, destination_environment, instances, - network_map, storage_mappings, replication_count, - shutdown_instances, notes=notes, - skip_os_morphing=skip_os_morphing, - user_scripts=user_scripts) - - return migration_view.single(migration) - - def delete(self, req, id): - context = req.environ['coriolis.context'] - context.can(migration_policies.get_migrations_policy_label("delete")) - try: - self._migration_api.delete(context, id) - raise exc.HTTPNoContent() - except exception.NotFound as ex: - raise exc.HTTPNotFound(explanation=ex.msg) - - -def create_resource(): - return api_wsgi.Resource(MigrationController()) diff --git a/coriolis/api/v1/replicas.py b/coriolis/api/v1/replicas.py index 6e55b2ef..1d1aac9c 100644 --- a/coriolis/api/v1/replicas.py +++ b/coriolis/api/v1/replicas.py @@ -6,8 +6,8 @@ from coriolis.api.v1.views import replica_view from coriolis.api import wsgi as api_wsgi from coriolis import constants -from coriolis import exception from coriolis.endpoints import api as endpoints_api +from coriolis import exception from coriolis.policies import replicas as replica_policies from coriolis.replicas import api diff --git a/coriolis/api/v1/router.py b/coriolis/api/v1/router.py index a47eca43..28a44f7d 100644 --- a/coriolis/api/v1/router.py +++ b/coriolis/api/v1/router.py @@ -4,9 +4,9 @@ from oslo_log import log as logging from coriolis import api -from coriolis.api.v1 import diagnostics -from coriolis.api.v1 import deployments from coriolis.api.v1 import deployment_actions +from coriolis.api.v1 import deployments +from coriolis.api.v1 import diagnostics from coriolis.api.v1 import endpoint_actions from coriolis.api.v1 import endpoint_destination_minion_pool_options from coriolis.api.v1 import endpoint_destination_options @@ -16,8 +16,6 @@ from coriolis.api.v1 import endpoint_source_options from coriolis.api.v1 import endpoint_storage from coriolis.api.v1 import endpoints -from coriolis.api.v1 import migration_actions -from coriolis.api.v1 import migrations from coriolis.api.v1 import minion_pool_actions from coriolis.api.v1 import minion_pools from coriolis.api.v1 import provider_schemas @@ -141,21 +139,6 @@ def _setup_routes(self, mapper, ext_mgr): 'providers/{platform_name}/schemas/{provider_type}', controller=self.resources['provider_schemas']) - self.resources['migrations'] = migrations.create_resource() - mapper.resource('migration', 'migrations', - controller=self.resources['migrations'], - collection={'detail': 'GET'}, - member={'action': 'POST'}) - - migration_actions_resource = migration_actions.create_resource() - self.resources['migration_actions'] = migration_actions_resource - migration_path = '/{project_id}/migrations/{id}' - mapper.connect('migration_actions', - migration_path + '/actions', - controller=self.resources['migration_actions'], - action='action', - conditions={'method': 'POST'}) - self.resources['deployments'] = deployments.create_resource() mapper.resource('deployment', 'deployments', controller=self.resources['deployments'], @@ -164,7 +147,7 @@ def _setup_routes(self, mapper, ext_mgr): deployments_actions_resource = deployment_actions.create_resource() self.resources['deployment_actions'] = deployments_actions_resource - deployment_path = '/{project_id}/deployments/{id}' + deployment_path = '/{project_id}/deployments/{id}' mapper.connect('deployment_actions', deployment_path + '/actions', controller=self.resources['deployment_actions'], diff --git a/coriolis/conductor/rpc/client.py b/coriolis/conductor/rpc/client.py index 45b5f00d..c11b5bfa 100644 --- a/coriolis/conductor/rpc/client.py +++ b/coriolis/conductor/rpc/client.py @@ -9,7 +9,6 @@ from coriolis import events from coriolis import rpc - VERSION = "1.0" LOG = logging.getLogger(__name__) @@ -206,17 +205,6 @@ def delete_replica_disks(self, ctxt, replica_id): return self._call( ctxt, 'delete_replica_disks', replica_id=replica_id) - def get_migrations(self, ctxt, include_tasks=False, - include_task_info=False): - return self._call( - ctxt, 'get_migrations', include_tasks=include_tasks, - include_task_info=include_task_info) - - def get_migration(self, ctxt, migration_id, include_task_info=False): - return self._call( - ctxt, 'get_migration', migration_id=migration_id, - include_task_info=include_task_info) - def get_deployments(self, ctxt, include_tasks=False, include_task_info=False): return self._call( @@ -228,34 +216,6 @@ def get_deployment(self, ctxt, deployment_id, include_task_info=False): ctxt, 'get_deployment', deployment_id=deployment_id, include_task_info=include_task_info) - def migrate_instances(self, ctxt, origin_endpoint_id, - destination_endpoint_id, origin_minion_pool_id, - destination_minion_pool_id, - instance_osmorphing_minion_pool_mappings, - source_environment, destination_environment, - instances, network_map, storage_mappings, - replication_count, shutdown_instances=False, - notes=None, skip_os_morphing=False, - user_scripts=None): - return self._call( - ctxt, 'migrate_instances', - origin_endpoint_id=origin_endpoint_id, - destination_endpoint_id=destination_endpoint_id, - origin_minion_pool_id=origin_minion_pool_id, - destination_minion_pool_id=destination_minion_pool_id, - instance_osmorphing_minion_pool_mappings=( - instance_osmorphing_minion_pool_mappings), - destination_environment=destination_environment, - instances=instances, - notes=notes, - replication_count=replication_count, - shutdown_instances=shutdown_instances, - skip_os_morphing=skip_os_morphing, - network_map=network_map, - storage_mappings=storage_mappings, - source_environment=source_environment, - user_scripts=user_scripts) - def deploy_replica_instances( self, ctxt, replica_id, instance_osmorphing_minion_pool_mappings=None, clone_disks=False, @@ -268,21 +228,14 @@ def deploy_replica_instances( skip_os_morphing=skip_os_morphing, user_scripts=user_scripts) - def delete_migration(self, ctxt, migration_id): - self._call( - ctxt, 'delete_migration', migration_id=migration_id) - - def cancel_migration(self, ctxt, migration_id, force): - self._call( - ctxt, 'cancel_migration', migration_id=migration_id, force=force) - def delete_deployment(self, ctxt, deployment_id): self._call( ctxt, 'delete_deployment', deployment_id=deployment_id) def cancel_deployment(self, ctxt, deployment_id, force): self._call( - ctxt, 'cancel_deployment', deployment_id=deployment_id, force=force) + ctxt, 'cancel_deployment', deployment_id=deployment_id, + force=force) def set_task_host(self, ctxt, task_id, host): self._call( diff --git a/coriolis/conductor/rpc/server.py b/coriolis/conductor/rpc/server.py index 6c917dde..d2272491 100644 --- a/coriolis/conductor/rpc/server.py +++ b/coriolis/conductor/rpc/server.py @@ -366,12 +366,13 @@ def _check_mark_reservation_fulfilled( "reservation.") LOG.debug( f"Reservation with ID '{reservation_id}' for transfer " - f"transfer action '{action_id}' was already marked as fulfilled") + f"transfer action '{action_id}' was already marked as " + f"fulfilled") else: self._licensing_client.mark_reservation_fulfilled(reservation_id) LOG.debug( - f"Successfully marked reservation with ID '{reservation_id}' for " - f"transfer action '{action_id}' as fulfilled") + f"Successfully marked reservation with ID '{reservation_id}' " + f"for transfer action '{action_id}' as fulfilled") def _check_reservation_for_replica(self, replica): scenario = replica.scenario @@ -1324,13 +1325,6 @@ def _get_replica(self, ctxt, replica_id, include_task_info=False, "Replica with ID '%s' not found." % replica_id) return replica - def get_migrations(self, ctxt, include_tasks, - include_task_info=False): - return db_api.get_migrations( - ctxt, include_tasks, - include_task_info=include_task_info, - to_dict=True) - @migration_synchronized def get_migration(self, ctxt, migration_id, include_task_info=False): return self._get_migration( @@ -1840,398 +1834,6 @@ def report_migration_minions_allocation_error( ctxt, execution, constants.EXECUTION_STATUS_ERROR_ALLOCATING_MINIONS) - def migrate_instances( - self, ctxt, origin_endpoint_id, destination_endpoint_id, - origin_minion_pool_id, destination_minion_pool_id, - instance_osmorphing_minion_pool_mappings, source_environment, - destination_environment, instances, network_map, storage_mappings, - replication_count, shutdown_instances=False, notes=None, - skip_os_morphing=False, user_scripts=None): - origin_endpoint = self.get_endpoint(ctxt, origin_endpoint_id) - destination_endpoint = self.get_endpoint( - ctxt, destination_endpoint_id) - self._check_endpoints(ctxt, origin_endpoint, destination_endpoint) - - destination_provider_types = self._get_provider_types( - ctxt, destination_endpoint) - - migration = models.Migration() - migration.id = str(uuid.uuid4()) - migration.base_id = migration.id - migration.origin_endpoint_id = origin_endpoint_id - migration.destination_endpoint_id = destination_endpoint_id - migration.destination_environment = destination_environment - migration.source_environment = source_environment - migration.network_map = network_map - migration.storage_mappings = storage_mappings - migration.last_execution_status = constants.EXECUTION_STATUS_UNEXECUTED - execution = models.TasksExecution() - execution.status = constants.EXECUTION_STATUS_UNEXECUTED - execution.number = 1 - execution.type = constants.EXECUTION_TYPE_MIGRATION - migration.executions = [execution] - migration.instances = instances - migration.info = {} - migration.user_scripts = user_scripts or {} - migration.notes = notes - migration.shutdown_instances = shutdown_instances - migration.replication_count = replication_count - migration.origin_minion_pool_id = origin_minion_pool_id - migration.destination_minion_pool_id = destination_minion_pool_id - if instance_osmorphing_minion_pool_mappings is None: - instance_osmorphing_minion_pool_mappings = {} - migration.instance_osmorphing_minion_pool_mappings = ( - instance_osmorphing_minion_pool_mappings) - - self._create_reservation_for_replica(migration) - - self._check_minion_pools_for_action(ctxt, migration) - - for instance in instances: - migration.info[instance] = { - "volumes_info": [], - "source_environment": source_environment, - "target_environment": destination_environment, - "user_scripts": self._get_instance_scripts( - user_scripts, instance), - # NOTE: we must explicitly set this in each VM's info - # to prevent the Replica disks from being cloned: - "clone_disks": False} - # TODO(aznashwan): have these passed separately to the relevant - # provider methods (they're currently passed directly inside - # dest-env by the API service when accepting the call) - # "network_map": network_map, - # "storage_mappings": storage_mappings, - - get_instance_info_task = self._create_task( - instance, - constants.TASK_TYPE_GET_INSTANCE_INFO, - execution) - - validate_migration_source_inputs_task = self._create_task( - instance, - constants.TASK_TYPE_VALIDATE_MIGRATION_SOURCE_INPUTS, - execution) - - validate_migration_destination_inputs_task = self._create_task( - instance, - constants.TASK_TYPE_VALIDATE_MIGRATION_DESTINATION_INPUTS, - execution, - depends_on=[get_instance_info_task.id]) - - migration_resources_task_ids = [] - validate_origin_minion_task = None - deploy_migration_source_resources_task = None - migration_resources_task_deps = [ - get_instance_info_task.id, - validate_migration_source_inputs_task.id] - if migration.origin_minion_pool_id: - # NOTE: these values are required for the - # _check_execution_tasks_sanity call but - # will be populated later when the pool - # allocations actually happen: - migration.info[instance].update({ - "origin_minion_machine_id": None, - "origin_minion_provider_properties": None, - "origin_minion_connection_info": None}) - validate_origin_minion_task = self._create_task( - instance, - constants.TASK_TYPE_VALIDATE_SOURCE_MINION_POOL_COMPATIBILITY, # noqa: E501 - execution, - depends_on=migration_resources_task_deps) - migration_resources_task_ids.append( - validate_origin_minion_task.id) - else: - deploy_migration_source_resources_task = self._create_task( - instance, - constants.TASK_TYPE_DEPLOY_MIGRATION_SOURCE_RESOURCES, - execution, depends_on=migration_resources_task_deps) - migration_resources_task_ids.append( - deploy_migration_source_resources_task.id) - - create_instance_disks_task = self._create_task( - instance, constants.TASK_TYPE_CREATE_INSTANCE_DISKS, - execution, depends_on=[ - validate_migration_source_inputs_task.id, - validate_migration_destination_inputs_task.id]) - - validate_destination_minion_task = None - attach_destination_minion_disks_task = None - deploy_migration_target_resources_task = None - if migration.destination_minion_pool_id: - # NOTE: these values are required for the - # _check_execution_tasks_sanity call but - # will be populated later when the pool - # allocations actually happen: - migration.info[instance].update({ - "destination_minion_machine_id": None, - "destination_minion_provider_properties": None, - "destination_minion_connection_info": None, - "destination_minion_backup_writer_connection_info": None}) - ttyp = ( - constants.TASK_TYPE_VALIDATE_DESTINATION_MINION_POOL_COMPATIBILITY) # noqa: E501 - validate_destination_minion_task = self._create_task( - instance, ttyp, execution, depends_on=[ - validate_migration_destination_inputs_task.id]) - - attach_destination_minion_disks_task = self._create_task( - instance, - constants.TASK_TYPE_ATTACH_VOLUMES_TO_DESTINATION_MINION, - execution, depends_on=[ - validate_destination_minion_task.id, - create_instance_disks_task.id]) - migration_resources_task_ids.append( - attach_destination_minion_disks_task.id) - else: - deploy_migration_target_resources_task = self._create_task( - instance, - constants.TASK_TYPE_DEPLOY_MIGRATION_TARGET_RESOURCES, - execution, depends_on=[create_instance_disks_task.id]) - migration_resources_task_ids.append( - deploy_migration_target_resources_task.id) - - validate_osmorphing_minion_task = None - if not skip_os_morphing and ( - instance in instance_osmorphing_minion_pool_mappings): - # NOTE: these values are required for the - # _check_execution_tasks_sanity call but - # will be populated later when the pool - # allocations actually happen: - migration.info[instance].update({ - "osmorphing_minion_machine_id": None, - "osmorphing_minion_provider_properties": None, - "osmorphing_minion_connection_info": None}) - validate_osmorphing_minion_task = self._create_task( - instance, - constants.TASK_TYPE_VALIDATE_OSMORPHING_MINION_POOL_COMPATIBILITY, # noqa: E501 - execution, depends_on=[ - validate_migration_destination_inputs_task.id]) - migration_resources_task_ids.append( - validate_osmorphing_minion_task.id) - - last_sync_task = None - first_sync_task = None - for i in range(migration.replication_count): - # insert SHUTDOWN_INSTANCES task before the last sync: - if i == (migration.replication_count - 1) and ( - migration.shutdown_instances): - shutdown_deps = migration_resources_task_ids - if last_sync_task: - shutdown_deps = [last_sync_task.id] - last_sync_task = self._create_task( - instance, constants.TASK_TYPE_SHUTDOWN_INSTANCE, - execution, depends_on=shutdown_deps) - - replication_deps = migration_resources_task_ids - if last_sync_task: - replication_deps = [last_sync_task.id] - - last_sync_task = self._create_task( - instance, constants.TASK_TYPE_REPLICATE_DISKS, - execution, depends_on=replication_deps) - if not first_sync_task: - first_sync_task = last_sync_task - - release_origin_minion_task = None - delete_source_resources_task = None - source_resource_cleanup_task = None - if migration.origin_minion_pool_id: - release_origin_minion_task = self._create_task( - instance, - constants.TASK_TYPE_RELEASE_SOURCE_MINION, # noqa: E501 - execution, - depends_on=[ - validate_origin_minion_task.id, - last_sync_task.id], - on_error=True) - source_resource_cleanup_task = release_origin_minion_task - else: - delete_source_resources_task = self._create_task( - instance, - constants.TASK_TYPE_DELETE_MIGRATION_SOURCE_RESOURCES, - execution, depends_on=[ - deploy_migration_source_resources_task.id, - last_sync_task.id], - on_error=True) - source_resource_cleanup_task = delete_source_resources_task - - cleanup_source_storage_task = self._create_task( - instance, constants.TASK_TYPE_CLEANUP_INSTANCE_SOURCE_STORAGE, - execution, depends_on=[ - first_sync_task.id, - source_resource_cleanup_task.id], - on_error=True) - - target_resources_cleanup_task = None - if migration.destination_minion_pool_id: - detach_volumes_from_destination_minion_task = ( - self._create_task( - instance, - constants.TASK_TYPE_DETACH_VOLUMES_FROM_DESTINATION_MINION, # noqa: E501 - execution, - depends_on=[ - attach_destination_minion_disks_task.id, - last_sync_task.id], - on_error=True)) - - release_destination_minion_task = self._create_task( - instance, - constants.TASK_TYPE_RELEASE_DESTINATION_MINION, - execution, depends_on=[ - validate_destination_minion_task.id, - detach_volumes_from_destination_minion_task.id], - on_error=True) - target_resources_cleanup_task = release_destination_minion_task - else: - delete_destination_resources_task = self._create_task( - instance, - constants.TASK_TYPE_DELETE_MIGRATION_TARGET_RESOURCES, - execution, depends_on=[ - deploy_migration_target_resources_task.id, - last_sync_task.id], - on_error=True) - target_resources_cleanup_task = ( - delete_destination_resources_task) - - deploy_instance_task = self._create_task( - instance, constants.TASK_TYPE_DEPLOY_INSTANCE_RESOURCES, - execution, depends_on=[ - last_sync_task.id, - target_resources_cleanup_task.id]) - - depends_on = [deploy_instance_task.id] - osmorphing_resources_cleanup_task = None - if not skip_os_morphing: - task_deploy_os_morphing_resources = None - task_delete_os_morphing_resources = None - attach_osmorphing_minion_volumes_task = None - last_osmorphing_resources_deployment_task = None - if instance in ( - migration.instance_osmorphing_minion_pool_mappings): - osmorphing_vol_attachment_deps = [ - validate_osmorphing_minion_task.id] - osmorphing_vol_attachment_deps.extend(depends_on) - attach_osmorphing_minion_volumes_task = self._create_task( - instance, - constants.TASK_TYPE_ATTACH_VOLUMES_TO_OSMORPHING_MINION, # noqa: E501 - execution, depends_on=osmorphing_vol_attachment_deps) - last_osmorphing_resources_deployment_task = ( - attach_osmorphing_minion_volumes_task) - - collect_osmorphing_info_task = self._create_task( - instance, - constants.TASK_TYPE_COLLECT_OSMORPHING_INFO, - execution, - depends_on=[attach_osmorphing_minion_volumes_task.id]) - last_osmorphing_resources_deployment_task = ( - collect_osmorphing_info_task) - else: - task_deploy_os_morphing_resources = self._create_task( - instance, - constants.TASK_TYPE_DEPLOY_OS_MORPHING_RESOURCES, - execution, depends_on=depends_on) - last_osmorphing_resources_deployment_task = ( - task_deploy_os_morphing_resources) - - task_osmorphing = self._create_task( - instance, constants.TASK_TYPE_OS_MORPHING, - execution, depends_on=[ - last_osmorphing_resources_deployment_task.id]) - - depends_on = [task_osmorphing.id] - - if instance in ( - migration.instance_osmorphing_minion_pool_mappings): - detach_osmorphing_minion_volumes_task = self._create_task( - instance, - constants.TASK_TYPE_DETACH_VOLUMES_FROM_OSMORPHING_MINION, # noqa: E501 - execution, depends_on=[ - attach_osmorphing_minion_volumes_task.id, - task_osmorphing.id], - on_error=True) - - release_osmorphing_minion_task = self._create_task( - instance, - constants.TASK_TYPE_RELEASE_OSMORPHING_MINION, - execution, depends_on=[ - validate_osmorphing_minion_task.id, - detach_osmorphing_minion_volumes_task.id], - on_error=True) - depends_on.append(release_osmorphing_minion_task.id) - osmorphing_resources_cleanup_task = ( - release_osmorphing_minion_task) - else: - task_delete_os_morphing_resources = ( - self._create_task( - instance, constants.TASK_TYPE_DELETE_OS_MORPHING_RESOURCES, # noqa: E501 - execution, depends_on=[ - task_deploy_os_morphing_resources.id, - task_osmorphing.id], - on_error=True)) - - depends_on.append(task_delete_os_morphing_resources.id) - osmorphing_resources_cleanup_task = ( - task_delete_os_morphing_resources) - - if (constants.PROVIDER_TYPE_INSTANCE_FLAVOR in - destination_provider_types): - get_optimal_flavor_task = self._create_task( - instance, constants.TASK_TYPE_GET_OPTIMAL_FLAVOR, - execution, depends_on=depends_on) - depends_on = [get_optimal_flavor_task.id] - - finalize_deployment_task = self._create_task( - instance, - constants.TASK_TYPE_FINALIZE_INSTANCE_DEPLOYMENT, - execution, depends_on=depends_on) - - cleanup_failed_deployment_task = self._create_task( - instance, - constants.TASK_TYPE_CLEANUP_FAILED_INSTANCE_DEPLOYMENT, - execution, depends_on=[ - deploy_instance_task.id, - finalize_deployment_task.id], - on_error_only=True) - - cleanup_deps = [ - create_instance_disks_task.id, - cleanup_source_storage_task.id, - target_resources_cleanup_task.id, - cleanup_failed_deployment_task.id] - if osmorphing_resources_cleanup_task: - cleanup_deps.append(osmorphing_resources_cleanup_task.id) - self._create_task( - instance, constants.TASK_TYPE_CLEANUP_INSTANCE_TARGET_STORAGE, - execution, depends_on=cleanup_deps, - on_error_only=True) - - self._check_execution_tasks_sanity(execution, migration.info) - db_api.add_migration(ctxt, migration) - LOG.info("Migration added to DB: %s", migration.id) - - uses_minion_pools = any([ - migration.origin_minion_pool_id, - migration.destination_minion_pool_id, - migration.instance_osmorphing_minion_pool_mappings]) - if uses_minion_pools: - # NOTE: we lock on the migration ID to ensure the minion - # allocation confirmations don't come in too early: - with lockutils.lock( - constants.MIGRATION_LOCK_NAME_FORMAT % migration.id, - external=True): - (self._minion_manager_client - .allocate_minion_machines_for_migration( - ctxt, migration, include_transfer_minions=True, - include_osmorphing_minions=not skip_os_morphing) - ) - self._set_tasks_execution_status( - ctxt, execution, - constants.EXECUTION_STATUS_AWAITING_MINION_ALLOCATIONS) - else: - self._begin_tasks(ctxt, migration, execution) - - return self.get_migration(ctxt, migration.id) - def _get_migration(self, ctxt, migration_id, include_task_info=False, to_dict=False): migration = db_api.get_migration( @@ -2251,10 +1853,6 @@ def _delete_migration(self, ctxt, migration_id): "'%s' state." % (migration_id, execution.status)) db_api.delete_migration(ctxt, migration_id) - @migration_synchronized - def delete_migration(self, ctxt, migration_id): - self._delete_migration(ctxt, migration_id) - @deployment_synchronized def delete_deployment(self, ctxt, deployment_id): self._delete_migration(ctxt, deployment_id) @@ -2280,10 +1878,6 @@ def _cancel_migration(self, ctxt, migration_id, force): external=True): self._cancel_tasks_execution(ctxt, execution, force=force) - @migration_synchronized - def cancel_migration(self, ctxt, migration_id, force): - self._cancel_migration(ctxt, migration_id, force) - @deployment_synchronized def cancel_deployment(self, ctxt, deployment_id, force): self._cancel_migration(ctxt, deployment_id, force) @@ -2495,11 +2089,12 @@ def _update_reservation_fulfillment_for_execution(self, ctxt, execution): transfer_action = self._get_replica( ctxt, execution.action_id, include_task_info=False) - if transfer_action.scenario == constants.REPLICA_SCENARIO_REPLICA and ( + scenario = transfer_action.scenario + if scenario == constants.REPLICA_SCENARIO_REPLICA and ( execution.type == constants.EXECUTION_TYPE_REPLICA_EXECUTION): self._check_mark_reservation_fulfilled( transfer_action, must_unfulfilled=False) - elif transfer_action.scenario == constants.REPLICA_SCENARIO_LIVE_MIGRATION and ( + elif scenario == constants.REPLICA_SCENARIO_LIVE_MIGRATION and ( execution.type == constants.EXECUTION_TYPE_REPLICA_DEPLOY): self._check_mark_reservation_fulfilled( transfer_action, must_unfulfilled=False) diff --git a/coriolis/db/api.py b/coriolis/db/api.py index 92234ac5..65be5d45 100644 --- a/coriolis/db/api.py +++ b/coriolis/db/api.py @@ -15,7 +15,6 @@ from sqlalchemy.sql import null from coriolis.db.sqlalchemy import models -from coriolis import constants from coriolis import exception from coriolis import utils @@ -548,7 +547,7 @@ def get_migrations(context, q = q.options(orm.undefer('info')) if replica_migrations_only: - q.filter(models.Migration.replica_id != None) + q.filter(models.Migration.replica_id is not None) args = {} if is_user_context(context): diff --git a/coriolis/licensing/client.py b/coriolis/licensing/client.py index ee5f71d8..967ce8eb 100644 --- a/coriolis/licensing/client.py +++ b/coriolis/licensing/client.py @@ -44,7 +44,7 @@ def from_env(cls): "instantiate licensing client.") return None allow_untrusted = os.environ.get( - "LICENSING_SERVER_ALLOW_UNTRUSTED", None) != None + "LICENSING_SERVER_ALLOW_UNTRUSTED", None) is not None client = cls( base_url, appliance_id=None, allow_untrusted=allow_untrusted) appliance_ids = client.get_appliances() diff --git a/coriolis/migrations/__init__.py b/coriolis/migrations/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/coriolis/migrations/api.py b/coriolis/migrations/api.py deleted file mode 100644 index 7918825f..00000000 --- a/coriolis/migrations/api.py +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. - -from coriolis.conductor.rpc import client as rpc_client - - -class API(object): - def __init__(self): - self._rpc_client = rpc_client.ConductorClient() - - def migrate_instances(self, ctxt, origin_endpoint_id, - destination_endpoint_id, origin_minion_pool_id, - destination_minion_pool_id, - instance_osmorphing_minion_pool_mappings, - source_environment, destination_environment, - instances, network_map, storage_mappings, - replication_count, - shutdown_instances, notes=None, - skip_os_morphing=False, user_scripts=None): - return self._rpc_client.migrate_instances( - ctxt, origin_endpoint_id, destination_endpoint_id, - origin_minion_pool_id, destination_minion_pool_id, - instance_osmorphing_minion_pool_mappings, source_environment, - destination_environment, instances, network_map, - storage_mappings, replication_count, - shutdown_instances=shutdown_instances, - notes=notes, skip_os_morphing=skip_os_morphing, - user_scripts=user_scripts) - - def deploy_replica_instances(self, ctxt, replica_id, - instance_osmorphing_minion_pool_mappings, - clone_disks=False, force=False, - skip_os_morphing=False, user_scripts=None): - return self._rpc_client.deploy_replica_instances( - ctxt, replica_id, instance_osmorphing_minion_pool_mappings=( - instance_osmorphing_minion_pool_mappings), - clone_disks=clone_disks, force=force, - skip_os_morphing=skip_os_morphing, - user_scripts=user_scripts) - - def delete(self, ctxt, migration_id): - self._rpc_client.delete_migration(ctxt, migration_id) - - def cancel(self, ctxt, migration_id, force): - self._rpc_client.cancel_migration(ctxt, migration_id, force) - - def get_migrations(self, ctxt, include_tasks=False, - include_task_info=False): - return self._rpc_client.get_migrations( - ctxt, include_tasks, include_task_info=include_task_info) - - def get_migration(self, ctxt, migration_id, include_task_info=False): - return self._rpc_client.get_migration( - ctxt, migration_id, include_task_info=include_task_info) diff --git a/coriolis/migrations/manager.py b/coriolis/migrations/manager.py deleted file mode 100644 index 98943e07..00000000 --- a/coriolis/migrations/manager.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright 2017 Cloudbase Solutions Srl -# All Rights Reserved. - -import gc -import sys - -import eventlet -from oslo_log import log as logging -from oslo_utils import units - -from coriolis import events -from coriolis.providers import backup_writers -from coriolis import qemu_reader -from coriolis import utils - -LOG = logging.getLogger(__name__) - - -def _copy_volume(volume, disk_image_reader, backup_writer, event_manager): - disk_id = volume["disk_id"] - # for now we assume it is a local file - path = volume["disk_image_uri"] - skip_zeroes = volume.get("zeroed", False) - - with backup_writer.open("", disk_id) as writer: - with disk_image_reader.open(path) as reader: - disk_size = reader.disk_size - - perc_step = event_manager.add_percentage_step( - "Copying data of disk %s" % disk_id, disk_size) - - offset = 0 - max_block_size = 10 * units.Mi # 10 MB - - while offset < disk_size: - allocated, zero_block, block_size = reader.get_block_status( - offset, max_block_size) - if not allocated or zero_block and skip_zeroes: - if not allocated: - LOG.debug( - "Unallocated block detected: %s", block_size) - else: - LOG.debug("Skipping zero block: %s", block_size) - offset += block_size - writer.seek(offset) - else: - buf = reader.read(offset, block_size) - writer.write(buf) - offset += len(buf) - buf = None - gc.collect() - - event_manager.set_percentage_step( - perc_step, offset) - - -def _copy_wrapper(job_args): - disk_id = job_args[0].get("disk_id") - try: - return _copy_volume(*job_args), disk_id, False - except BaseException: - return sys.exc_info(), disk_id, True - - -def copy_disk_data(target_conn_info, volumes_info, event_handler): - # TODO(gsamfira): the disk image should be an URI that can either be local - # (file://) or remote (https://, ftp://, smb://, nfs:// etc). - # This must happen if we are to implement multi-worker scenarios. - # In such cases, it is not guaranteed that the disk sync task - # will be started on the same node onto which the import - # happened. It may also be conceivable, that wherever the disk - # image ends up, we might be able to directly expose it using - # NFS, iSCSI or any other network protocol. In which case, - # we can skip downloading it locally just to sync it. - - event_manager = events.EventManager(event_handler) - - ip = target_conn_info["ip"] - port = target_conn_info.get("port", 22) - username = target_conn_info["username"] - pkey = target_conn_info.get("pkey") - password = target_conn_info.get("password") - event_manager.progress_update("Waiting for connectivity on %s:%s" % ( - ip, port)) - utils.wait_for_port_connectivity(ip, port) - backup_writer = backup_writers.SSHBackupWriter( - ip, port, username, pkey, password, volumes_info) - disk_image_reader = qemu_reader.QEMUDiskImageReader() - - pool = eventlet.greenpool.GreenPool() - job_data = [(vol, disk_image_reader, backup_writer, event_manager) - for vol in volumes_info] - for result, disk_id, error in pool.imap(_copy_wrapper, job_data): - # TODO(gsamfira): There is no use in letting the other disks finish - # sync-ing as we don't save the state of the disk sync anywhere (yet). - # When/If we ever do add this info to the database, keep track of - # failures, and allow any other paralel sync to finish - if error: - event_manager.progress_update( - "Volume \"%s\" failed to sync" % disk_id) - raise result[0](result[1]).with_traceback(result[2]) diff --git a/coriolis/tests/api/v1/__init__py b/coriolis/tests/api/v1/__init__py deleted file mode 100644 index e69de29b..00000000 diff --git a/coriolis/tests/api/v1/data/replicas_validate_create_body.yml b/coriolis/tests/api/v1/data/replicas_validate_create_body.yml index b4c9059b..dacd4d9c 100644 --- a/coriolis/tests/api/v1/data/replicas_validate_create_body.yml +++ b/coriolis/tests/api/v1/data/replicas_validate_create_body.yml @@ -19,6 +19,7 @@ storage_mappings: "mock_storage_mappings" exception_raised: False expected_result: + - replica - mock_origin_endpoint_id - mock_destination_endpoint_id - mock_source_environment @@ -52,5 +53,5 @@ user_scripts: "mock_user_scripts" storage_mappings: "mock_storage_mappings" exception_raised: "One or more instance OSMorphing pool mappings were" - expected_result: + expected_result: diff --git a/coriolis/tests/api/v1/test_migration_actions.py b/coriolis/tests/api/v1/test_migration_actions.py deleted file mode 100644 index 563bd6af..00000000 --- a/coriolis/tests/api/v1/test_migration_actions.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright 2023 Cloudbase Solutions Srl -# All Rights Reserved. - -from unittest import mock - -from webob import exc - -from coriolis.api.v1 import migration_actions -from coriolis import exception -from coriolis.migrations import api -from coriolis.tests import test_base -from coriolis.tests import testutils - - -class MigrationActionsControllerTestCase(test_base.CoriolisBaseTestCase): - """Test suite for the Coriolis Migration Actions v1 API""" - - def setUp(self): - super(MigrationActionsControllerTestCase, self).setUp() - self.migration_actions = migration_actions.MigrationActionsController() - - @mock.patch.object(api.API, 'cancel') - def test__cancel( - self, - mock_cancel, - ): - mock_req = mock.Mock() - mock_context = mock.Mock() - mock_req.environ = {'coriolis.context': mock_context} - id = mock.sentinel.id - mock_body = { - 'cancel': { - 'force': False - } - } - - self.assertRaises( - exc.HTTPNoContent, - testutils.get_wrapped_function(self.migration_actions._cancel), - mock_req, - id, - mock_body - ) - - mock_context.can.assert_called_once_with("migration:migrations:cancel") - mock_cancel.assert_called_once_with(mock_context, id, False) - - @mock.patch.object(api.API, 'cancel') - def test__cancel_empty( - self, - mock_cancel, - ): - mock_req = mock.Mock() - mock_context = mock.Mock() - mock_req.environ = {'coriolis.context': mock_context} - id = mock.sentinel.id - mock_body = {'cancel': {}} - - self.assertRaises( - exc.HTTPNoContent, - testutils.get_wrapped_function(self.migration_actions._cancel), - mock_req, - id, - mock_body - ) - - mock_context.can.assert_called_once_with("migration:migrations:cancel") - mock_cancel.assert_called_once_with(mock_context, id, False) - - @mock.patch.object(api.API, 'cancel') - def test__cancel_not_found( - self, - mock_cancel, - ): - mock_req = mock.Mock() - mock_context = mock.Mock() - mock_req.environ = {'coriolis.context': mock_context} - id = mock.sentinel.id - mock_body = {'cancel': {}} - mock_cancel.side_effect = exception.NotFound() - - self.assertRaises( - exc.HTTPNotFound, - testutils.get_wrapped_function(self.migration_actions._cancel), - mock_req, - id, - mock_body - ) - - mock_context.can.assert_called_once_with("migration:migrations:cancel") - mock_cancel.assert_called_once_with(mock_context, id, False) - - @mock.patch.object(api.API, 'cancel') - def test__cancel_invalid_parameter_value( - self, - mock_cancel, - ): - mock_req = mock.Mock() - mock_context = mock.Mock() - mock_req.environ = {'coriolis.context': mock_context} - id = mock.sentinel.id - mock_body = {'cancel': {}} - mock_cancel.side_effect = exception.InvalidParameterValue("err") - - self.assertRaises( - exc.HTTPNotFound, - testutils.get_wrapped_function(self.migration_actions._cancel), - mock_req, - id, - mock_body - ) - - mock_context.can.assert_called_once_with("migration:migrations:cancel") - mock_cancel.assert_called_once_with(mock_context, id, False) diff --git a/coriolis/tests/api/v1/test_migrations.py b/coriolis/tests/api/v1/test_migrations.py deleted file mode 100644 index 67232718..00000000 --- a/coriolis/tests/api/v1/test_migrations.py +++ /dev/null @@ -1,260 +0,0 @@ -# Copyright 2023 Cloudbase Solutions Srl -# All Rights Reserved. - -from unittest import mock -from webob import exc - -import ddt - -from coriolis.api.v1 import migrations -from coriolis.api.v1 import utils as api_utils -from coriolis.api.v1.views import migration_view -from coriolis.endpoints import api as endpoints_api -from coriolis import exception -from coriolis.migrations import api -from coriolis.tests import test_base -from coriolis.tests import testutils - - -@ddt.ddt -class MigrationControllerTestCase(test_base.CoriolisBaseTestCase): - """Test suite for the Coriolis Migrations v1 API""" - - def setUp(self): - super(MigrationControllerTestCase, self).setUp() - self.migrations = migrations.MigrationController() - - @mock.patch.object(migration_view, 'single') - @mock.patch.object(api.API, 'get_migration') - @mock.patch('coriolis.api.v1.migrations.CONF') - def test_show( - self, - mock_conf, - mock_get_migration, - mock_single - ): - mock_req = mock.Mock() - mock_context = mock.Mock() - mock_req.environ = {'coriolis.context': mock_context} - id = mock.sentinel.id - mock_conf.api.include_task_info_in_migrations_api = False - - result = self.migrations.show(mock_req, id) - - self.assertEqual( - mock_single.return_value, - result - ) - - mock_context.can.assert_called_once_with("migration:migrations:show") - mock_get_migration.assert_called_once_with( - mock_context, id, include_task_info=False - ) - mock_single.assert_called_once_with(mock_get_migration.return_value) - - @mock.patch.object(api.API, 'get_migration') - @mock.patch('coriolis.api.v1.migrations.CONF') - def test_show_no_migration( - self, - mock_conf, - mock_get_migration - ): - mock_req = mock.Mock() - mock_context = mock.Mock() - mock_req.environ = {'coriolis.context': mock_context} - id = mock.sentinel.id - mock_conf.api.include_task_info_in_migrations_api = False - mock_get_migration.return_value = None - - self.assertRaises( - exc.HTTPNotFound, - self.migrations.show, - mock_req, - id - ) - - mock_context.can.assert_called_once_with("migration:migrations:show") - mock_get_migration.assert_called_once_with( - mock_context, id, include_task_info=False - ) - - @mock.patch.object(migration_view, 'collection') - @mock.patch.object(api.API, 'get_migrations') - @mock.patch.object(api_utils, '_get_show_deleted') - @mock.patch('coriolis.api.v1.migrations.CONF') - def test__list( - self, - mock_conf, - mock__get_show_deleted, - mock_get_migrations, - mock_collection - ): - mock_req = mock.Mock() - mock_context = mock.Mock() - mock_req.environ = {'coriolis.context': mock_context} - mock_conf.api.include_task_info_in_migrations_api = False - - result = self.migrations._list(mock_req) - - self.assertEqual( - mock_collection.return_value, - result - ) - self.assertEqual( - mock_context.show_deleted, - mock__get_show_deleted.return_value - ) - - mock__get_show_deleted.assert_called_once_with( - mock_req.GET.get.return_value) - mock_context.can.assert_called_once_with("migration:migrations:list") - mock_get_migrations.assert_called_once_with( - mock_context, - include_tasks=False, - include_task_info=False - ) - - @mock.patch.object(api_utils, 'validate_storage_mappings') - @mock.patch.object(endpoints_api.API, 'validate_target_environment') - @mock.patch.object(api_utils, 'validate_network_map') - @mock.patch.object(endpoints_api.API, 'validate_source_environment') - @mock.patch.object(api_utils, 'validate_instances_list_for_transfer') - @ddt.file_data('data/migration_validate_input.yml') - @ddt.unpack - def test__validate_migration_input( - self, - mock_validate_instances_list_for_transfer, - mock_validate_source_environment, - mock_validate_network_map, - mock_validate_target_environment, - mock_validate_storage_mappings, - config, - raises_value_error, - ): - mock_context = mock.Mock() - mock_validate_instances_list_for_transfer.return_value = \ - config['migration']['instances'] - - if raises_value_error: - self.assertRaises( - ValueError, - testutils.get_wrapped_function( - self.migrations._validate_migration_input), - self.migrations, - context=mock_context, - body=config - ) - mock_validate_instances_list_for_transfer.assert_called_once() - else: - testutils.get_wrapped_function( - self.migrations._validate_migration_input)( - self.migrations, - context=mock_context, # type: ignore - body=config, # type: ignore - ) - mock_validate_source_environment.assert_called_once_with( - mock_context, - config['migration']['origin_endpoint_id'], - config['migration']['source_environment'] - ) - mock_validate_network_map.assert_called_once_with( - config['migration']['network_map'] - ) - mock_validate_target_environment.assert_called_once_with( - mock_context, - config['migration']['destination_endpoint_id'], - config['migration']['destination_environment'] - ) - mock_validate_storage_mappings.assert_called_once_with( - config['migration']['storage_mappings'] - ) - mock_validate_instances_list_for_transfer.assert_called_once_with( - config['migration']['instances'], - ) - - @mock.patch.object(migration_view, 'single') - @mock.patch.object(migrations.MigrationController, - '_validate_migration_input') - @mock.patch.object(api_utils, 'normalize_user_scripts') - @mock.patch.object(api_utils, 'validate_user_scripts') - @ddt.file_data('data/migration_create.yml') - @ddt.unpack - def test_create( - self, - mock_validate_user_scripts, - mock_normalize_user_scripts, - mock__validate_migration_input, - mock_single, - config, - expected_api_method, - validation_expected, - ): - with mock.patch.object(api.API, - expected_api_method) as mock_api_method: - mock_req = mock.Mock() - mock_context = mock.Mock() - mock_req.environ = {'coriolis.context': mock_context} - mock__validate_migration_input.return_value = \ - (mock.sentinel.value,) * 14 - - result = self.migrations.create(mock_req, config) - - self.assertEqual( - mock_single.return_value, - result - ) - - mock_context.can.assert_called_once_with( - "migration:migrations:create") - mock_validate_user_scripts.assert_called_once_with( - config['migration']['user_scripts']) - mock_normalize_user_scripts.assert_called_once_with( - config['migration']['user_scripts'], - config['migration']['instances'] - ) - if validation_expected: - mock__validate_migration_input.assert_called_once_with( - mock_context, config) - mock_api_method.assert_called_once() - mock_single.assert_called_once_with(mock_api_method.return_value) - - @mock.patch.object(api.API, 'delete') - def test_delete( - self, - mock_delete - ): - mock_req = mock.Mock() - mock_context = mock.Mock() - mock_req.environ = {'coriolis.context': mock_context} - id = mock.sentinel.id - - self.assertRaises( - exc.HTTPNoContent, - self.migrations.delete, - mock_req, - id - ) - - mock_context.can.assert_called_once_with("migration:migrations:delete") - mock_delete.assert_called_once_with(mock_context, id) - - @mock.patch.object(api.API, 'delete') - def test_delete_not_found( - self, - mock_delete - ): - mock_req = mock.Mock() - mock_context = mock.Mock() - mock_req.environ = {'coriolis.context': mock_context} - id = mock.sentinel.id - mock_delete.side_effect = exception.NotFound() - - self.assertRaises( - exc.HTTPNotFound, - self.migrations.delete, - mock_req, - id - ) - - mock_context.can.assert_called_once_with("migration:migrations:delete") - mock_delete.assert_called_once_with(mock_context, id) diff --git a/coriolis/tests/api/v1/test_replicas.py b/coriolis/tests/api/v1/test_replicas.py index ff1cdecc..4f9bdbe0 100644 --- a/coriolis/tests/api/v1/test_replicas.py +++ b/coriolis/tests/api/v1/test_replicas.py @@ -200,7 +200,7 @@ def test_create( mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} mock_body = {} - mock_validate_create_body.return_value = (mock.sentinel.value,) * 12 + mock_validate_create_body.return_value = (mock.sentinel.value,) * 13 result = self.replicas.create(mock_req, mock_body) diff --git a/coriolis/tests/api/v1/test_router.py b/coriolis/tests/api/v1/test_router.py index 29d13a19..c945ec8e 100644 --- a/coriolis/tests/api/v1/test_router.py +++ b/coriolis/tests/api/v1/test_router.py @@ -3,6 +3,8 @@ from unittest import mock +from coriolis.api.v1 import deployment_actions +from coriolis.api.v1 import deployments from coriolis.api.v1 import diagnostics from coriolis.api.v1 import endpoint_actions from coriolis.api.v1 import endpoint_destination_minion_pool_options @@ -13,8 +15,6 @@ from coriolis.api.v1 import endpoint_source_options from coriolis.api.v1 import endpoint_storage from coriolis.api.v1 import endpoints -from coriolis.api.v1 import migration_actions -from coriolis.api.v1 import migrations from coriolis.api.v1 import minion_pool_actions from coriolis.api.v1 import minion_pools from coriolis.api.v1 import provider_schemas @@ -37,14 +37,14 @@ def setUp(self): super(APIRouterTestCase, self).setUp() self.router = router.APIRouter() + @mock.patch.object(deployments, 'create_resource') + @mock.patch.object(deployment_actions, 'create_resource') @mock.patch.object(diagnostics, 'create_resource') @mock.patch.object(replica_schedules, 'create_resource') @mock.patch.object(replica_tasks_execution_actions, 'create_resource') @mock.patch.object(replica_tasks_executions, 'create_resource') @mock.patch.object(replica_actions, 'create_resource') @mock.patch.object(replicas, 'create_resource') - @mock.patch.object(migration_actions, 'create_resource') - @mock.patch.object(migrations, 'create_resource') @mock.patch.object(provider_schemas, 'create_resource') @mock.patch.object(endpoint_source_options, 'create_resource') @mock.patch.object(endpoint_destination_options, 'create_resource') @@ -78,14 +78,14 @@ def test_setup_routes( mock_endpoint_destination_options_create_resource, mock_endpoint_source_options_create_resource, mock_provider_schemas_create_resource, - mock_migrations_create_resource, - mock_migration_actions_create_resource, mock_replicas_create_resource, mock_replica_actions_create_resource, mock_replica_tasks_executions_create_resource, mock_replica_tasks_execution_actions_create_resource, mock_replica_schedules_create_resource, mock_diagnostics_create_resource, + mock_deployment_actions_create_resource, + mock_deployments_create_resource ): ext_mgr = mock.sentinel.ext_mgr mapper = mock.Mock() @@ -160,12 +160,6 @@ def test_setup_routes( 'providers/{platform_name}/schemas/{provider_type}', controller=mock_provider_schemas_create_resource.return_value, ), - mock.call( - 'migration', 'migrations', - controller=mock_migrations_create_resource.return_value, - collection={'detail': 'GET'}, - member={'action': 'POST'} - ), mock.call( 'replica', 'replicas', controller=mock_replicas_create_resource.return_value, @@ -192,6 +186,12 @@ def test_setup_routes( 'diagnostics', 'diagnostics', controller=mock_diagnostics_create_resource.return_value, ), + mock.call( + 'deployment', 'deployments', + controller=mock_deployments_create_resource.return_value, + collection={'detail': 'GET'}, + member={'action': 'POST'} + ), ] connect_calls = [ @@ -211,14 +211,6 @@ def test_setup_routes( action='action', conditions={'method': 'POST'} ), - mock.call( - 'migration_actions', - '/{project_id}/migrations/{id}/actions', - controller= - mock_migration_actions_create_resource.return_value, - action='action', - conditions={'method': 'POST'} - ), mock.call( 'replica_actions', '/{project_id}/replicas/{id}/actions', @@ -235,10 +227,17 @@ def test_setup_routes( action='action', conditions={'method': 'POST'} ), + mock.call( + 'deployment_actions', '/{project_id}/deployments/{id}/actions', + controller=( + mock_deployment_actions_create_resource.return_value), + action='action', + conditions={"method": "POST"} + ), ] self.router._setup_routes(mapper, ext_mgr) mapper.redirect.assert_called_once_with("", "/") - mapper.resource.assert_has_calls(resource_calls) - mapper.connect.assert_has_calls(connect_calls) + mapper.resource.assert_has_calls(resource_calls, any_order=True) + mapper.connect.assert_has_calls(connect_calls, any_order=True) diff --git a/coriolis/tests/conductor/rpc/test_client.py b/coriolis/tests/conductor/rpc/test_client.py index c03b8654..98f9c2a7 100644 --- a/coriolis/tests/conductor/rpc/test_client.py +++ b/coriolis/tests/conductor/rpc/test_client.py @@ -7,8 +7,8 @@ from coriolis import constants from coriolis.tests import test_base - INSTANCE_ARGS = { + "replica_scenario": "mock_replica_scenario", "origin_endpoint_id": "mock_origin_endpoint_id", "destination_endpoint_id": "mock_destination_endpoint_id", "origin_minion_pool_id": "mock_origin_minion_pool_id", @@ -223,34 +223,6 @@ def test_delete_replica_disks(self): } self._test(self.client.delete_replica_disks, args) - def test_get_migrations(self): - args = { - "include_tasks": False, - "include_task_info": False, - } - self._test(self.client.get_migrations, args) - - def test_get_migration(self): - args = { - "migration_id": "mock_migration_id", - "include_task_info": False, - } - self._test(self.client.get_migration, args) - - def test_migrate_instances(self): - args = { - **INSTANCE_ARGS, - "replication_count": 1, - "shutdown_instances": False, - "skip_os_morphing": False - } - new_args = { - "notes": None, - "user_scripts": None - } - args.update(new_args) - self._test(self.client.migrate_instances, args) - def test_deploy_replica_instances(self): args = { "replica_id": "mock_replica_id", @@ -262,19 +234,6 @@ def test_deploy_replica_instances(self): } self._test(self.client.deploy_replica_instances, args) - def test_delete_migration(self): - args = { - "migration_id": "mock_migration_id" - } - self._test(self.client.delete_migration, args) - - def test_cancel_migration(self): - args = { - "migration_id": "mock_migration_id", - "force": "mock_force" - } - self._test(self.client.cancel_migration, args) - def test_set_task_host(self): args = { "task_id": "mock_task_id", diff --git a/coriolis/tests/conductor/rpc/test_server.py b/coriolis/tests/conductor/rpc/test_server.py index 37ca2714..6c78c0ad 100644 --- a/coriolis/tests/conductor/rpc/test_server.py +++ b/coriolis/tests/conductor/rpc/test_server.py @@ -17,7 +17,6 @@ from coriolis.db.sqlalchemy import models from coriolis import exception from coriolis import keystone -from coriolis.licensing import client as licensing_client from coriolis import schemas from coriolis.tests import test_base from coriolis.tests import testutils @@ -136,136 +135,6 @@ def test_check_delete_reservation_for_transfer_delete_fails(self): transfer_action.reservation_id ) - def test_check_create_reservation_for_transfer(self): - transfer_action = mock.Mock() - transfer_action.instances = ['instance_1', 'instance_2'] - transfer_type = mock.sentinel.transfer_type - self._licensing_client.add_reservation.return_value = { - 'id': mock.sentinel.id - } - self.server._check_create_reservation_for_transfer( - transfer_action, transfer_type) - self._licensing_client.add_reservation.assert_called_once_with( - mock.sentinel.transfer_type, - 2 - ) - self.assertEqual( - transfer_action.reservation_id, - mock.sentinel.id - ) - - def test_check_create_reservation_for_transfer_no_licensing_client(self): - transfer_action = mock.Mock() - transfer_type = mock.sentinel.transfer_type - self.server._licensing_client = None - with self.assertLogs( - 'coriolis.conductor.rpc.server', level=logging.WARNING): - self.server._check_create_reservation_for_transfer( - transfer_action, transfer_type) - - def test_check_reservation_for_transfer(self): - transfer_action = mock.Mock() - transfer_action.reservation_id = mock.sentinel.reservation_id - reservation_type = mock.sentinel.reservation_type - self._licensing_client.check_refresh_reservation.return_value = { - 'id': mock.sentinel.reservation_id} - self.server._check_reservation_for_transfer( - transfer_action, reservation_type) - (self._licensing_client.check_refresh_reservation. - assert_called_once_with)( - mock.sentinel.reservation_id) - - def test_check_reservation_for_transfer_no_licensing_client( - self - ): - transfer_action = mock.Mock() - reservation_type = mock.sentinel.reservation_type - self.server._licensing_client = None - with self.assertLogs( - 'coriolis.conductor.rpc.server', level=logging.WARNING): - self.server._check_reservation_for_transfer( - transfer_action, reservation_type) - - @mock.patch.object(server.ConductorServerEndpoint, - '_check_create_reservation_for_transfer') - def test_check_reservation_for_transfer_no_reservation_id( - self, - mock_check_create_reservation_for_transfer - ): - transfer_action = mock.Mock() - transfer_action.reservation_id = None - reservation_type = mock.sentinel.reservation_type - self.server._check_reservation_for_transfer( - transfer_action, reservation_type) - self._licensing_client.check_refresh_reservation.assert_not_called() - mock_check_create_reservation_for_transfer.assert_called_once_with( - transfer_action, reservation_type - ) - - @mock.patch.object(server.ConductorServerEndpoint, - '_check_create_reservation_for_transfer') - def test_check_reservation_for_transfer_exc_code_404( - self, - mock_check_create_reservation_for_transfer - ): - transfer_action = mock.Mock() - transfer_action.reservation_id = mock.sentinel.reservation_id - reservation_type = mock.sentinel.reservation_type - ex = CoriolisTestException() - ex.code = 404 - self._licensing_client.check_refresh_reservation.side_effect = ex - self.server._check_reservation_for_transfer( - transfer_action, reservation_type) - (self._licensing_client.check_refresh_reservation - .assert_called_once_with)( - mock.sentinel.reservation_id) - mock_check_create_reservation_for_transfer.assert_called_once_with( - transfer_action, reservation_type - ) - - @mock.patch.object(server.ConductorServerEndpoint, - '_check_create_reservation_for_transfer') - def test_check_reservation_for_transfer_exc_code_409( - self, - mock_check_create_reservation_for_transfer - ): - transfer_action = mock.Mock() - transfer_action.reservation_id = mock.sentinel.reservation_id - reservation_type = mock.sentinel.reservation_type - ex = CoriolisTestException() - ex.code = 409 - self._licensing_client.check_refresh_reservation.side_effect = ex - self.server._check_reservation_for_transfer( - transfer_action, reservation_type) - (self._licensing_client.check_refresh_reservation - .assert_called_once_with)( - mock.sentinel.reservation_id) - mock_check_create_reservation_for_transfer.assert_called_once_with( - transfer_action, reservation_type - ) - - @mock.patch.object(server.ConductorServerEndpoint, - '_check_create_reservation_for_transfer') - def test_check_reservation_for_transfer_exc_code_not_excepted( - self, - mock_check_create_reservation_for_transfer - ): - transfer_action = mock.Mock() - transfer_action.reservation_id = mock.sentinel.reservation_id - reservation_type = mock.sentinel.reservation_type - ex = CoriolisTestException() - self._licensing_client.check_refresh_reservation.side_effect = ex - self.assertRaises( - CoriolisTestException, - self.server._check_reservation_for_transfer, - transfer_action, - reservation_type - ) - (self._licensing_client.check_refresh_reservation - .assert_called_once_with)( - mock.sentinel.reservation_id) - mock_check_create_reservation_for_transfer.assert_not_called() - @mock.patch.object(server.ConductorServerEndpoint, "get_endpoint") @mock.patch.object(db_api, "delete_endpoint") @mock.patch.object(db_api, "update_endpoint") @@ -1343,7 +1212,7 @@ def convert_to_task(task_config): ) @mock.patch.object( server.ConductorServerEndpoint, - "_check_reservation_for_transfer" + "_check_reservation_for_replica" ) @mock.patch.object( server.ConductorServerEndpoint, @@ -1422,10 +1291,7 @@ def create_task_side_effect( mock.sentinel.replica_id, include_task_info=True, ) - mock_check_reservation.assert_called_once_with( - mock_replica, - licensing_client.RESERVATION_TYPE_REPLICA - ) + mock_check_reservation.assert_called_once_with(mock_replica) mock_check_replica_running_executions.assert_called_once_with( mock.sentinel.context, mock_replica) mock_check_minion_pools_for_action.assert_called_once_with( @@ -2033,7 +1899,7 @@ def test_check_endpoints_same_destination_connection_info(self): @mock.patch.object(server.ConductorServerEndpoint, 'get_replica') @mock.patch.object(db_api, 'add_replica') @mock.patch.object(server.ConductorServerEndpoint, - '_check_create_reservation_for_transfer') + '_create_reservation_for_replica') @mock.patch.object(server.ConductorServerEndpoint, '_check_minion_pools_for_action') @mock.patch.object(models, 'Replica') @@ -2045,7 +1911,7 @@ def test_create_instances_replica( mock_check_endpoints, mock_replica, mock_check_minion_pools_for_action, - mock_check_create_reservation_for_transfer, + mock_create_reservation_for_replica, mock_add_replica, mock_get_replica ): @@ -2054,6 +1920,7 @@ def test_create_instances_replica( mock_replica.return_value = mock.Mock() result = self.server.create_instances_replica( mock.sentinel.context, + constants.REPLICA_SCENARIO_REPLICA, mock.sentinel.origin_endpoint_id, mock.sentinel.destination_endpoint_id, mock.sentinel.origin_minion_pool_id, @@ -2111,10 +1978,8 @@ def test_create_instances_replica( ) mock_check_minion_pools_for_action.assert_called_once_with( mock.sentinel.context, mock_replica.return_value) - mock_check_create_reservation_for_transfer.assert_called_once_with( - mock_replica.return_value, - licensing_client.RESERVATION_TYPE_REPLICA - ) + mock_create_reservation_for_replica.assert_called_once_with( + mock_replica.return_value) mock_add_replica.assert_called_once_with( mock.sentinel.context, mock_replica.return_value) mock_get_replica.assert_called_once_with( @@ -2157,24 +2022,6 @@ def test__get_replica_not_found(self, mock_get_replica): to_dict=False ) - @mock.patch.object(db_api, 'get_migrations') - def test_get_migrations(self, mock_get_migrations): - result = self.server.get_migrations( - mock.sentinel.context, - mock.sentinel.migration_id, - include_task_info=False - ) - self.assertEqual( - mock_get_migrations.return_value, - result - ) - mock_get_migrations.assert_called_once_with( - mock.sentinel.context, - mock.sentinel.migration_id, - include_task_info=False, - to_dict=True - ) - @mock.patch.object(server.ConductorServerEndpoint, '_get_migration') def test_get_migration(self, mock_get_migration): result = testutils.get_wrapped_function(self.server.get_migration)( @@ -2374,7 +2221,7 @@ def test_get_provider_types(self, mock_get_available_providers): ) @mock.patch.object( server.ConductorServerEndpoint, - '_check_reservation_for_transfer' + '_check_reservation_for_replica' ) @mock.patch.object( server.ConductorServerEndpoint, @@ -2452,7 +2299,7 @@ def test_deploy_replica_instance( mock_get_endpoint, mock_check_valid_replica_tasks_execution, mock_check_replica_running_executions, - mock_check_reservation_for_transfer, + mock_check_reservation_for_replica, mock_get_replica, config, expected_tasks, @@ -2515,10 +2362,8 @@ def call_deploy_replica_instance(): mock.sentinel.replica_id, include_task_info=True, ) - mock_check_reservation_for_transfer.assert_called_once_with( - mock_get_replica.return_value, - licensing_client.RESERVATION_TYPE_REPLICA - ) + mock_check_reservation_for_replica.assert_called_once_with( + mock_get_replica.return_value) mock_check_replica_running_executions.assert_called_once_with( mock.sentinel.context, mock_get_replica.return_value @@ -3295,289 +3140,6 @@ def test_report_migration_minions_allocation_error_unexpected_status( mock_cancel_tasks_execution.assert_not_called() mock_set_tasks_execution_status.assert_not_called() - @mock.patch.object( - server.ConductorServerEndpoint, - "get_endpoint" - ) - @mock.patch.object( - server.ConductorServerEndpoint, - "_check_endpoints" - ) - @mock.patch.object( - server.ConductorServerEndpoint, - "_get_provider_types" - ) - @mock.patch.object(models, "Migration") - @mock.patch.object(uuid, "uuid4") - @mock.patch.object(models, "TasksExecution") - @mock.patch.object( - server.ConductorServerEndpoint, - "_check_create_reservation_for_transfer" - ) - @mock.patch.object( - server.ConductorServerEndpoint, - "_check_minion_pools_for_action" - ) - @mock.patch.object( - server.ConductorServerEndpoint, - "_get_instance_scripts" - ) - @mock.patch.object( - server.ConductorServerEndpoint, - "_create_task" - ) - @mock.patch.object( - server.ConductorServerEndpoint, - "_check_execution_tasks_sanity" - ) - @mock.patch.object( - db_api, - "add_migration" - ) - @mock.patch.object( - lockutils, - "lock" - ) - @mock.patch.object( - server.ConductorServerEndpoint, - "_minion_manager_client" - ) - @mock.patch.object( - server.ConductorServerEndpoint, - "_set_tasks_execution_status" - ) - @mock.patch.object( - server.ConductorServerEndpoint, - "_begin_tasks" - ) - @mock.patch.object( - server.ConductorServerEndpoint, - "get_migration" - ) - @ddt.file_data("data/migrate_instances_config.yml") - @ddt.unpack - def test_migrate_instances( - self, - mock_get_migration, - mock_begin_tasks, - mock_set_tasks_execution_status, - mock_minion_manager_client, - mock_lock, - mock_add_migration, - mock_check_execution_tasks_sanity, - mock_create_task, - mock_get_instance_scripts, - mock_check_minion_pools_for_action, - mock_check_create_reservation_for_transfer, - mock_tasks_execution, - mock_uuid4, - mock_migration, - mock_get_provider_types, - mock_check_endpoints, - mock_get_endpoint, - config, - expected_tasks, - ): - has_origin_minion_pool = config.get( - 'has_origin_minion_pool', False - ) - has_destination_minion_pool = config.get( - 'has_destination_minion_pool', False - ) - has_os_morphing_pool = config.get( - 'has_os_morphing_pool', False - ) - shutdown_instances = config.get('shutdown_instances', False) - skip_os_morphing = config.get('skip_os_morphing', True) - get_optimal_flavor = config.get('get_optimal_flavor', False) - - if get_optimal_flavor: - mock_get_provider_types.return_value = [ - constants.PROVIDER_TYPE_INSTANCE_FLAVOR - ] - - instances = [ - mock.sentinel.instance_1, - mock.sentinel.instance_2, - ] - instance_osmorphing_minion_pool_mappings = {} - if has_os_morphing_pool: - instance_osmorphing_minion_pool_mappings = { - mock.sentinel.instance_1: mock.sentinel.minion_pool_1, - mock.sentinel.instance_2: mock.sentinel.minion_pool_2, - } - - replication_count = 2 - - def create_task_side_effect( - instance, - task_type, - execution, - depends_on=None, - on_error=False, - on_error_only=False - ): - return mock.Mock( - id=task_type, - type=task_type, - instance=instance, - execution=execution, - depends_on=depends_on, - on_error=on_error, - on_error_only=on_error_only, - ) - - mock_create_task.side_effect = create_task_side_effect - - migration = self.server.migrate_instances( - mock.sentinel.context, - mock.sentinel.origin_endpoint_id, - mock.sentinel.destination_endpoint_id, - has_origin_minion_pool - and mock.sentinel.origin_minion_pool_id, - has_destination_minion_pool - and mock.sentinel.destination_minion_pool_id, - instance_osmorphing_minion_pool_mappings, - mock.sentinel.source_environment, - mock.sentinel.destination_environment, - instances, - mock.sentinel.network_map, - mock.sentinel.storage_mappings, - replication_count, - shutdown_instances=shutdown_instances, - notes=mock.sentinel.notes, - skip_os_morphing=skip_os_morphing, - user_scripts=mock.sentinel.user_scripts, - ) - - mock_get_endpoint.assert_has_calls([ - mock.call( - mock.sentinel.context, - mock.sentinel.origin_endpoint_id, - ), - mock.call( - mock.sentinel.context, - mock.sentinel.destination_endpoint_id, - ), - ]) - - mock_check_endpoints.assert_called_once_with( - mock.sentinel.context, - mock_get_endpoint.return_value, - mock_get_endpoint.return_value, - ) - - self.assertEqual( - mock_migration.return_value.last_execution_status, - constants.EXECUTION_STATUS_UNEXECUTED, - ) - self.assertEqual( - mock_tasks_execution.return_value.status, - constants.EXECUTION_STATUS_UNEXECUTED, - ) - self.assertEqual( - mock_tasks_execution.return_value.type, - constants.EXECUTION_TYPE_MIGRATION, - ) - - mock_check_create_reservation_for_transfer.assert_called_once_with( - mock_migration.return_value, - licensing_client.RESERVATION_TYPE_MIGRATION, - ) - - mock_check_minion_pools_for_action.assert_called_once_with( - mock.sentinel.context, - mock_migration.return_value, - ) - - for instance in instances: - mock_get_instance_scripts.assert_any_call( - mock.sentinel.user_scripts, - instance, - ) - mock_create_task.assert_has_calls([ - mock.call( - instance, - constants.TASK_TYPE_GET_INSTANCE_INFO, - mock_tasks_execution.return_value, - ), - mock.call( - instance, - constants.TASK_TYPE_VALIDATE_MIGRATION_SOURCE_INPUTS, - mock_tasks_execution.return_value, - ), - mock.call( - instance, - constants.TASK_TYPE_VALIDATE_MIGRATION_DESTINATION_INPUTS, - mock_tasks_execution.return_value, - depends_on=[constants.TASK_TYPE_GET_INSTANCE_INFO] - ), - ]) - - # tasks defined in the yaml config - for task in expected_tasks: - kwargs = {} - if 'on_error' in task: - kwargs = {'on_error': task['on_error']} - if 'on_error_only' in task: - kwargs = {'on_error_only': task['on_error_only']} - mock_create_task.assert_has_calls([ - mock.call( - instance, - task['type'], - mock_tasks_execution.return_value, - depends_on=task['depends_on'], - **kwargs, - ) - ]) - - mock_check_execution_tasks_sanity.assert_called_once_with( - mock_tasks_execution.return_value, - mock_migration.return_value.info, - ) - - mock_add_migration.assert_called_once_with( - mock.sentinel.context, - mock_migration.return_value, - ) - - if any([ - has_origin_minion_pool, - has_destination_minion_pool, - has_os_morphing_pool, - ]): - mock_lock.assert_any_call( - constants.MIGRATION_LOCK_NAME_FORMAT - % mock_migration.return_value.id, - external=True, - ) - mock_minion_manager_client\ - .allocate_minion_machines_for_migration\ - .assert_called_once_with( - mock.sentinel.context, - mock_migration.return_value, - include_transfer_minions=True, - include_osmorphing_minions=not skip_os_morphing, - ) - mock_set_tasks_execution_status.assert_called_once_with( - mock.sentinel.context, - mock_tasks_execution.return_value, - constants.EXECUTION_STATUS_AWAITING_MINION_ALLOCATIONS - ) - else: - mock_begin_tasks.assert_called_once_with( - mock.sentinel.context, - mock_migration.return_value, - mock_tasks_execution.return_value, - ) - - mock_get_migration.assert_called_once_with( - mock.sentinel.context, - mock_migration.return_value.id, - ) - - self.assertEqual(migration, mock_get_migration.return_value) - @mock.patch.object(db_api, 'get_tasks_execution') @mock.patch.object( server.ConductorServerEndpoint, @@ -3725,106 +3287,6 @@ def test__get_migration( to_dict=False ) - @mock.patch.object(db_api, 'delete_migration') - @mock.patch.object(server.ConductorServerEndpoint, '_get_migration') - def test_delete_migration( - self, - mock_get_migration, - mock_delete_migration - ): - migration = mock.Mock() - execution = mock.Mock() - execution.status = constants.EXECUTION_STATUS_COMPLETED - migration.executions = [execution] - mock_get_migration.return_value = migration - - testutils.get_wrapped_function(self.server.delete_migration)( - self.server, - mock.sentinel.context, - mock.sentinel.migration_id - ) - - mock_get_migration.assert_called_once_with( - mock.sentinel.context, - mock.sentinel.migration_id - ) - mock_delete_migration.assert_called_once_with( - mock.sentinel.context, - mock.sentinel.migration_id - ) - mock_get_migration.reset_mock() - mock_delete_migration.reset_mock() - execution.status = constants.EXECUTION_STATUS_RUNNING - - self.assertRaises( - exception.InvalidMigrationState, - testutils.get_wrapped_function(self.server.delete_migration), - self.server, - mock.sentinel.context, - mock.sentinel.migration_id - ) - - mock_get_migration.assert_called_once_with( - mock.sentinel.context, - mock.sentinel.migration_id - ) - mock_delete_migration.assert_not_called() - - @mock.patch.object(server.ConductorServerEndpoint, - '_cancel_tasks_execution') - @mock.patch.object(lockutils, 'lock') - @mock.patch.object(server.ConductorServerEndpoint, '_get_migration') - @ddt.file_data("data/cancel_migration_config.yml") - @ddt.unpack - def test_cancel_migration( - self, - mock_get_migration, - mock_lock, - mock_cancel_tasks_execution, - config, - raises_exception - ): - migration = mock.Mock() - migration.executions = [] - statuses = config.get('execution_statuses', []) - for status in statuses: - execution = mock.Mock() - execution.status = getattr(constants, status) - migration.executions.append(execution) - mock_get_migration.return_value = migration - force = config.get('force', False) - - if raises_exception: - self.assertRaises( - exception.InvalidMigrationState, - testutils.get_wrapped_function(self.server.cancel_migration), - self.server, - mock.sentinel.context, - mock.sentinel.migration_id, - force=force - ) - else: - testutils.get_wrapped_function(self.server.cancel_migration)( - self.server, - mock.sentinel.context, - mock.sentinel.migration_id, - force=force - ) - mock_lock.assert_called_once_with( - constants.EXECUTION_LOCK_NAME_FORMAT % execution.id, - external=True - ) - mock_cancel_tasks_execution.assert_called_once_with( - mock.sentinel.context, - execution, - force=force - ) - - mock_get_migration.assert_called_once_with( - mock.sentinel.context, - mock.sentinel.migration_id - ) - @mock.patch.object(db_api, 'get_tasks_execution') @mock.patch.object( server.ConductorServerEndpoint, @@ -3946,7 +3408,7 @@ def call_set_tasks_execution_status(new_execution_status): ) mock_delete_trust.assert_not_called() mock_get_action.assert_called_once_with( - context, mock_set_execution_status.return_value.action_id) + context, execution.action_id) mock_deallocate_minion_machines_for_action.assert_called_once_with( context, mock_get_action.return_value) diff --git a/coriolis/tests/migrations/__init__.py b/coriolis/tests/migrations/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/coriolis/tests/migrations/test_api.py b/coriolis/tests/migrations/test_api.py deleted file mode 100644 index 4fa4fa4a..00000000 --- a/coriolis/tests/migrations/test_api.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2024 Cloudbase Solutions Srl -# All Rights Reserved. - -from unittest import mock - -from coriolis.migrations import api as migrations_module -from coriolis.tests import test_base - - -class APITestCase(test_base.CoriolisBaseTestCase): - """Test suite for the Coriolis Migrations API.""" - - def setUp(self): - super(APITestCase, self).setUp() - self.api = migrations_module.API() - self.rpc_client = mock.MagicMock() - self.api._rpc_client = self.rpc_client - self.ctxt = mock.sentinel.ctxt - self.migration_id = mock.sentinel.migration_id - - def test_migrate_instances(self): - origin_endpoint_id = mock.sentinel.origin_endpoint_id - destination_endpoint_id = mock.sentinel.destination_endpoint_id - origin_minion_pool_id = mock.sentinel.origin_minion_pool_id - destination_minion_pool_id = mock.sentinel.destination_minion_pool_id - instance_osmorphing_minion_pool_mappings = ( - mock.sentinel.instance_osmorphing_minion_pool_mappings) - source_environment = mock.sentinel.source_environment - destination_environment = mock.sentinel.destination_environment - instances = mock.sentinel.instances - network_map = mock.sentinel.network_map - storage_mappings = mock.sentinel.storage_mappings - replication_count = mock.sentinel.replication_count - shutdown_instances = mock.sentinel.shutdown_instances - - result = self.api.migrate_instances( - self.ctxt, origin_endpoint_id, destination_endpoint_id, - origin_minion_pool_id, destination_minion_pool_id, - instance_osmorphing_minion_pool_mappings, source_environment, - destination_environment, instances, network_map, storage_mappings, - replication_count, shutdown_instances) - self.rpc_client.migrate_instances.assert_called_once_with( - self.ctxt, origin_endpoint_id, destination_endpoint_id, - origin_minion_pool_id, destination_minion_pool_id, - instance_osmorphing_minion_pool_mappings, source_environment, - destination_environment, instances, network_map, storage_mappings, - replication_count, shutdown_instances=shutdown_instances, - notes=None, skip_os_morphing=False, user_scripts=None) - self.assertEqual(result, - self.rpc_client.migrate_instances.return_value) - - def test_deploy_replica_instances(self): - replica_id = mock.sentinel.replica_id - instance_osmorphing_minion_pool_mappings = ( - mock.sentinel.instance_osmorphing_minion_pool_mappings) - - result = self.api.deploy_replica_instances( - self.ctxt, replica_id, instance_osmorphing_minion_pool_mappings) - - self.rpc_client.deploy_replica_instances.assert_called_once_with( - self.ctxt, replica_id, - instance_osmorphing_minion_pool_mappings=( - instance_osmorphing_minion_pool_mappings), - clone_disks=False, force=False, skip_os_morphing=False, - user_scripts=None) - self.assertEqual(result, - self.rpc_client.deploy_replica_instances.return_value) - - def test_delete(self): - self.api.delete(self.ctxt, self.migration_id) - self.rpc_client.delete_migration.assert_called_once_with( - self.ctxt, self.migration_id) - - def test_cancel(self): - self.api.cancel(self.ctxt, self.migration_id, True) - self.rpc_client.cancel_migration.assert_called_once_with( - self.ctxt, self.migration_id, True) - - def test_get_migrations(self): - result = self.api.get_migrations(self.ctxt, include_tasks=False, - include_task_info=False) - - self.rpc_client.get_migrations.assert_called_once_with( - self.ctxt, False, include_task_info=False) - self.assertEqual(result, self.rpc_client.get_migrations.return_value) - - def test_get_migration(self): - result = self.api.get_migration(self.ctxt, self.migration_id, - include_task_info=False) - - self.rpc_client.get_migration.assert_called_once_with( - self.ctxt, self.migration_id, include_task_info=False) - self.assertEqual(result, self.rpc_client.get_migration.return_value) diff --git a/coriolis/tests/replicas/test_api.py b/coriolis/tests/replicas/test_api.py index 5b7445f6..3611ffb9 100644 --- a/coriolis/tests/replicas/test_api.py +++ b/coriolis/tests/replicas/test_api.py @@ -32,13 +32,15 @@ def test_create(self): storage_mappings = mock.sentinel.storage_mappings result = self.api.create( - self.ctxt, origin_endpoint_id, destination_endpoint_id, + self.ctxt, mock.sentinel.replica_scenario, + origin_endpoint_id, destination_endpoint_id, origin_minion_pool_id, destination_minion_pool_id, instance_osmorphing_minion_pool_mappings, source_environment, destination_environment, instances, network_map, storage_mappings) self.rpc_client.create_instances_replica.assert_called_once_with( - self.ctxt, origin_endpoint_id, destination_endpoint_id, + self.ctxt, mock.sentinel.replica_scenario, + origin_endpoint_id, destination_endpoint_id, origin_minion_pool_id, destination_minion_pool_id, instance_osmorphing_minion_pool_mappings, source_environment, destination_environment, instances, network_map, storage_mappings, From 1c687900ed3df82f35680478dfaf3646b2e434f9 Mon Sep 17 00:00:00 2001 From: Daniel Vincze Date: Fri, 9 Aug 2024 16:11:31 +0300 Subject: [PATCH 16/24] Do not delete reservation on action error or cancellation This patch makes sure that reservations only get deleted when a Transfer action is being deleted. --- coriolis/conductor/rpc/server.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/coriolis/conductor/rpc/server.py b/coriolis/conductor/rpc/server.py index d2272491..05540bb1 100644 --- a/coriolis/conductor/rpc/server.py +++ b/coriolis/conductor/rpc/server.py @@ -3205,11 +3205,6 @@ def set_task_error(self, ctxt, task_id, exception_details): else: self._cancel_tasks_execution(ctxt, execution) - # NOTE: if this was a migration, make sure to delete - # its associated reservation. - if execution.type == constants.EXECUTION_TYPE_MIGRATION: - self._check_delete_reservation_for_transfer(action) - @task_synchronized def add_task_event(self, ctxt, task_id, level, message): LOG.info("Adding event for task '%s': %s", task_id, message) From f86f74a2bb5dda578c94160b9bb0f854f5353c10 Mon Sep 17 00:00:00 2001 From: Daniel Vincze Date: Wed, 16 Oct 2024 19:54:57 +0300 Subject: [PATCH 17/24] Fix running deployment message --- coriolis/conductor/rpc/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coriolis/conductor/rpc/server.py b/coriolis/conductor/rpc/server.py index 05540bb1..e31c0f89 100644 --- a/coriolis/conductor/rpc/server.py +++ b/coriolis/conductor/rpc/server.py @@ -1351,7 +1351,7 @@ def _check_running_replica_migrations(ctxt, replica_id): if [m.id for m in migrations if m.executions[0].status in ( constants.ACTIVE_EXECUTION_STATUSES)]: raise exception.InvalidReplicaState( - "Replica '%s' is currently being migrated" % replica_id) + "Transfer '%s' is currently being deployed" % replica_id) @staticmethod def _check_running_executions(action): From 01010f53e719386cc57abc5b79980fd29727678f Mon Sep 17 00:00:00 2001 From: Daniel Vincze Date: Mon, 21 Oct 2024 18:36:19 +0300 Subject: [PATCH 18/24] Fix reservation management on minion pool errors Fixes an edge-case where if a reservation cannot be found for an existing replica, and the minion pool set to it is also in an invalid state, a new reservation would be created but never saved to DB because of the minion pool error. This patch makes sure that the minion pool validation is done before the reservation checks when creating replica executions. --- coriolis/conductor/rpc/server.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/coriolis/conductor/rpc/server.py b/coriolis/conductor/rpc/server.py index e31c0f89..548aee1b 100644 --- a/coriolis/conductor/rpc/server.py +++ b/coriolis/conductor/rpc/server.py @@ -911,9 +911,9 @@ def _check_task_cls_param_requirements(task, instance_task_info_keys): @replica_synchronized def execute_replica_tasks(self, ctxt, replica_id, shutdown_instances): replica = self._get_replica(ctxt, replica_id, include_task_info=True) - self._check_reservation_for_replica(replica) self._check_replica_running_executions(ctxt, replica) self._check_minion_pools_for_action(ctxt, replica) + self._check_reservation_for_replica(replica) execution = models.TasksExecution() execution.id = str(uuid.uuid4()) @@ -1396,7 +1396,6 @@ def deploy_replica_instances( instance_osmorphing_minion_pool_mappings=None, skip_os_morphing=False, user_scripts=None): replica = self._get_replica(ctxt, replica_id, include_task_info=True) - self._check_reservation_for_replica(replica) self._check_replica_running_executions(ctxt, replica) self._check_valid_replica_tasks_execution(replica, force) user_scripts = user_scripts or replica.user_scripts @@ -1445,6 +1444,7 @@ def deploy_replica_instances( migration.instance_osmorphing_minion_pool_mappings.update( instance_osmorphing_minion_pool_mappings) self._check_minion_pools_for_action(ctxt, migration) + self._check_reservation_for_replica(replica) execution = models.TasksExecution() migration.executions = [execution] From bd248172e57e55240e9b8b28de2a5f2b431a617c Mon Sep 17 00:00:00 2001 From: Daniel Vincze Date: Tue, 22 Oct 2024 20:44:08 +0300 Subject: [PATCH 19/24] Fix fulfilled migration exception Raises proper exception when user attempts to re-execute or re-deploy an already fulfilled migration action. --- coriolis/conductor/rpc/server.py | 10 +++------- coriolis/exception.py | 8 ++++++++ 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/coriolis/conductor/rpc/server.py b/coriolis/conductor/rpc/server.py index 548aee1b..d8decbe7 100644 --- a/coriolis/conductor/rpc/server.py +++ b/coriolis/conductor/rpc/server.py @@ -402,13 +402,9 @@ def _check_reservation_for_replica(self, replica): fulfilled_at = reservation.get("fulfilled_at", None) if scenario == constants.REPLICA_SCENARIO_LIVE_MIGRATION and ( fulfilled_at): - raise exception.LicensingException( - message=f"The Live Migration operation with ID " - f"'{replica.id}' (licensing reservation " - f"'{reservation_id}' has already been " - f"fulfilled on {fulfilled_at}. Please " - f"create a new Live Migration operation " - f"to create a new licensing reservation.") + raise exception.MigrationLicenceFulfilledException( + action_id=replica.id, reservation_id=reservation_id, + fulfilled_at=fulfilled_at) replica.reservation_id = ( self._licensing_client.check_refresh_reservation( diff --git a/coriolis/exception.py b/coriolis/exception.py index db311782..8e0a54d6 100644 --- a/coriolis/exception.py +++ b/coriolis/exception.py @@ -523,3 +523,11 @@ class OSMorphingWinRMOperationTimeout(OSMorphingOperationTimeout): " or the command execution time exceeds the timeout set. Try extending" " the timeout by editing the 'default_osmorphing_operation_timeout' " "in Coriolis' static configuration file.") + + +class MigrationLicenceFulfilledException(Invalid): + message = ( + "The Live Migration operation with ID '%(action_id)s' (licensing " + "reservation '%(reservation_id)s') has already been fulfilled on " + "%(fulfilled_at)s. Please create a new Live Migration operation to " + "create a new licensing reservation.") From d038551b7a149d67140b5430141915aaf4d87a08 Mon Sep 17 00:00:00 2001 From: Daniel Vincze Date: Mon, 28 Oct 2024 13:33:36 +0200 Subject: [PATCH 20/24] Update unit tests Updates some conductor server unit tests --- coriolis/tests/conductor/rpc/test_server.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/coriolis/tests/conductor/rpc/test_server.py b/coriolis/tests/conductor/rpc/test_server.py index 6c78c0ad..857c6b64 100644 --- a/coriolis/tests/conductor/rpc/test_server.py +++ b/coriolis/tests/conductor/rpc/test_server.py @@ -2362,8 +2362,6 @@ def call_deploy_replica_instance(): mock.sentinel.replica_id, include_task_info=True, ) - mock_check_reservation_for_replica.assert_called_once_with( - mock_get_replica.return_value) mock_check_replica_running_executions.assert_called_once_with( mock.sentinel.context, mock_get_replica.return_value @@ -2409,6 +2407,8 @@ def create_task_side_effect( mock.sentinel.context, mock_migration.return_value ) + mock_check_reservation_for_replica.assert_called_once_with( + mock_get_replica.return_value) self.assertEqual( mock_tasks_execution.return_value.status, @@ -5649,6 +5649,3 @@ def test_set_task_error_os_morphing( mock.sentinel.task_id, mock.sentinel.exception_details, ) - mock_check_delete_reservation_for_transfer.assert_called_once_with( - mock_get_action.return_value, - ) From efc0876c36d9418305f7cb262593affbc172ca6f Mon Sep 17 00:00:00 2001 From: Daniel Vincze Date: Tue, 29 Oct 2024 15:26:01 +0200 Subject: [PATCH 21/24] Refactor DB layer Refactors include mostly renames of Replica into Transfer, and Migration into Deployment. They also include some DB column refactoring (removing unnecessary ones). --- coriolis/conductor/rpc/server.py | 67 ++- coriolis/db/api.py | 232 ++++---- .../versions/020_rename_tables.py | 30 ++ coriolis/db/sqlalchemy/models.py | 52 +- .../tests/api/v1/test_replica_schedules.py | 16 +- .../test_replica_tasks_execution_actions.py | 2 +- .../api/v1/test_replica_tasks_executions.py | 16 +- coriolis/tests/conductor/rpc/test_server.py | 496 +++++++++--------- .../tests/replica_cron/rpc/test_server.py | 4 +- coriolis/tests/replica_cron/test_api.py | 2 +- .../replica_tasks_executions/test_api.py | 2 +- coriolis/tests/replicas/test_api.py | 2 +- 12 files changed, 471 insertions(+), 450 deletions(-) create mode 100644 coriolis/db/sqlalchemy/migrate_repo/versions/020_rename_tables.py diff --git a/coriolis/conductor/rpc/server.py b/coriolis/conductor/rpc/server.py index d8decbe7..1d9bad4e 100644 --- a/coriolis/conductor/rpc/server.py +++ b/coriolis/conductor/rpc/server.py @@ -487,7 +487,7 @@ def get_endpoint(self, ctxt, endpoint_id): @endpoint_synchronized def delete_endpoint(self, ctxt, endpoint_id): - q_replicas_count = db_api.get_endpoint_replicas_count( + q_replicas_count = db_api.get_endpoint_transfers_count( ctxt, endpoint_id) if q_replicas_count != 0: raise exception.NotAuthorized("%s replicas would be orphaned!" % @@ -1102,7 +1102,7 @@ def execute_replica_tasks(self, ctxt, replica_id, shutdown_instances): ctxt, replica.id, instance, replica.info[instance]) # add new execution to DB: - db_api.add_replica_tasks_execution(ctxt, execution) + db_api.add_transfer_tasks_execution(ctxt, execution) LOG.info("Replica tasks execution added to DB: %s", execution.id) uses_minion_pools = any([ @@ -1124,7 +1124,7 @@ def execute_replica_tasks(self, ctxt, replica_id, shutdown_instances): def get_replica_tasks_executions(self, ctxt, replica_id, include_tasks=False, include_task_info=False): - return db_api.get_replica_tasks_executions( + return db_api.get_transfer_tasks_executions( ctxt, replica_id, include_tasks, include_task_info=include_task_info, to_dict=True) @@ -1144,7 +1144,7 @@ def delete_replica_tasks_execution(self, ctxt, replica_id, execution_id): "Cannot delete execution '%s' for Replica '%s' as it is " "currently in '%s' state." % ( execution_id, replica_id, execution.status)) - db_api.delete_replica_tasks_execution(ctxt, execution_id) + db_api.delete_transfer_tasks_execution(ctxt, execution_id) @tasks_execution_synchronized def cancel_replica_tasks_execution(self, ctxt, replica_id, execution_id, @@ -1165,7 +1165,7 @@ def cancel_replica_tasks_execution(self, ctxt, replica_id, execution_id, def _get_replica_tasks_execution(self, ctxt, replica_id, execution_id, include_task_info=False, to_dict=False): - execution = db_api.get_replica_tasks_execution( + execution = db_api.get_transfer_tasks_execution( ctxt, replica_id, execution_id, include_task_info=include_task_info, to_dict=to_dict) if not execution: @@ -1176,7 +1176,7 @@ def _get_replica_tasks_execution(self, ctxt, replica_id, execution_id, def get_replicas(self, ctxt, include_tasks_executions=False, include_task_info=False): - return db_api.get_replicas( + return db_api.get_transfers( ctxt, include_tasks_executions, include_task_info=include_task_info, to_dict=True) @@ -1191,7 +1191,7 @@ def delete_replica(self, ctxt, replica_id): replica = self._get_replica(ctxt, replica_id) self._check_replica_running_executions(ctxt, replica) self._check_delete_reservation_for_transfer(replica) - db_api.delete_replica(ctxt, replica_id) + db_api.delete_transfer(ctxt, replica_id) @replica_synchronized def delete_replica_disks(self, ctxt, replica_id): @@ -1238,7 +1238,7 @@ def delete_replica_disks(self, ctxt, replica_id): for instance in replica.instances: db_api.update_transfer_action_info_for_instance( ctxt, replica.id, instance, replica.info[instance]) - db_api.add_replica_tasks_execution(ctxt, execution) + db_api.add_transfer_tasks_execution(ctxt, execution) LOG.info("Replica tasks execution created: %s", execution.id) self._begin_tasks(ctxt, replica, execution) @@ -1281,7 +1281,7 @@ def create_instances_replica(self, ctxt, replica_scenario, ctxt, destination_endpoint_id) self._check_endpoints(ctxt, origin_endpoint, destination_endpoint) - replica = models.Replica() + replica = models.Transfer() replica.id = str(uuid.uuid4()) replica.base_id = replica.id replica.scenario = replica_scenario @@ -1307,13 +1307,13 @@ def create_instances_replica(self, ctxt, replica_scenario, self._create_reservation_for_replica(replica) - db_api.add_replica(ctxt, replica) + db_api.add_transfer(ctxt, replica) LOG.info("Replica created: %s", replica.id) return self.get_replica(ctxt, replica.id) def _get_replica(self, ctxt, replica_id, include_task_info=False, to_dict=False): - replica = db_api.get_replica( + replica = db_api.get_transfer( ctxt, replica_id, include_task_info=include_task_info, to_dict=to_dict) if not replica: @@ -1329,10 +1329,9 @@ def get_migration(self, ctxt, migration_id, include_task_info=False): def get_deployments(self, ctxt, include_tasks, include_task_info=False): - return db_api.get_migrations( + return db_api.get_deployments( ctxt, include_tasks, include_task_info=include_task_info, - replica_migrations_only=True, to_dict=True) @deployment_synchronized @@ -1343,7 +1342,7 @@ def get_deployment(self, ctxt, deployment_id, include_task_info=False): @staticmethod def _check_running_replica_migrations(ctxt, replica_id): - migrations = db_api.get_replica_migrations(ctxt, replica_id) + migrations = db_api.get_transfer_deployments(ctxt, replica_id) if [m.id for m in migrations if m.executions[0].status in ( constants.ACTIVE_EXECUTION_STATUSES)]: raise exception.InvalidReplicaState( @@ -1411,7 +1410,7 @@ def deploy_replica_instances( instances = replica.instances - migration = models.Migration() + migration = models.Deployment() migration.id = str(uuid.uuid4()) migration.base_id = migration.id migration.origin_endpoint_id = replica.origin_endpoint_id @@ -1612,7 +1611,7 @@ def deploy_replica_instances( on_error=True) self._check_execution_tasks_sanity(execution, migration.info) - db_api.add_migration(ctxt, migration) + db_api.add_deployment(ctxt, migration) LOG.info("Migration created: %s", migration.id) if not skip_os_morphing and ( @@ -1755,7 +1754,7 @@ def confirm_replica_minions_allocation( self._update_task_info_for_minion_allocations( ctxt, replica, minion_machine_allocations) - last_replica_execution = db_api.get_replica_tasks_execution( + last_replica_execution = db_api.get_transfer_tasks_execution( ctxt, replica.id, last_replica_execution.id) self._begin_tasks( ctxt, replica, last_replica_execution) @@ -1832,7 +1831,7 @@ def report_migration_minions_allocation_error( def _get_migration(self, ctxt, migration_id, include_task_info=False, to_dict=False): - migration = db_api.get_migration( + migration = db_api.get_deployment( ctxt, migration_id, include_task_info=include_task_info, to_dict=to_dict) if not migration: @@ -1847,7 +1846,7 @@ def _delete_migration(self, ctxt, migration_id): raise exception.InvalidMigrationState( "Cannot delete Migration '%s' as it is currently in " "'%s' state." % (migration_id, execution.status)) - db_api.delete_migration(ctxt, migration_id) + db_api.delete_deployment(ctxt, migration_id) @deployment_synchronized def delete_deployment(self, ctxt, deployment_id): @@ -2078,7 +2077,7 @@ def _update_reservation_fulfillment_for_execution(self, ctxt, execution): transfer_id = transfer_action.base_id if transfer_action.type == constants.TRANSFER_ACTION_TYPE_MIGRATION: deployment = self._get_migration(ctxt, transfer_id) - transfer_id = deployment.replica_id + transfer_id = deployment.transfer_id transfer_action = self._get_replica( ctxt, transfer_id, include_task_info=False) else: @@ -2633,8 +2632,8 @@ def _update_replica_volumes_info(self, ctxt, replica_id, instance, def _update_volumes_info_for_migration_parent_replica( self, ctxt, migration_id, instance, updated_task_info): - migration = db_api.get_migration(ctxt, migration_id) - replica_id = migration.replica_id + migration = db_api.get_deployment(ctxt, migration_id) + replica_id = migration.transfer_id with lockutils.lock( constants.REPLICA_LOCK_NAME_FORMAT % replica_id, @@ -2751,7 +2750,7 @@ def _check_other_tasks_running(execution, current_task): # NOTE: considering all the instances of the Replica get # the same params, it doesn't matter which instance's # update task finishes last: - db_api.update_replica( + db_api.update_transfer( ctxt, execution.action_id, task_info) elif task_type in ( @@ -3242,7 +3241,7 @@ def update_task_progress_update( def _get_replica_schedule(self, ctxt, replica_id, schedule_id, expired=True): - schedule = db_api.get_replica_schedule( + schedule = db_api.get_transfer_schedule( ctxt, replica_id, schedule_id, expired=expired) if not schedule: raise exception.NotFound( @@ -3255,17 +3254,17 @@ def create_replica_schedule(self, ctxt, replica_id, shutdown_instance): keystone.create_trust(ctxt) replica = self._get_replica(ctxt, replica_id) - replica_schedule = models.ReplicaSchedule() + replica_schedule = models.TransferSchedule() replica_schedule.id = str(uuid.uuid4()) - replica_schedule.replica = replica - replica_schedule.replica_id = replica_id + replica_schedule.transfer = replica + replica_schedule.transfer_id = replica_id replica_schedule.schedule = schedule replica_schedule.expiration_date = exp_date replica_schedule.enabled = enabled replica_schedule.shutdown_instance = shutdown_instance replica_schedule.trust_id = ctxt.trust_id - db_api.add_replica_schedule( + db_api.add_transfer_schedule( ctxt, replica_schedule, lambda ctxt, sched: self._replica_cron_client.register( ctxt, sched)) @@ -3275,7 +3274,7 @@ def create_replica_schedule(self, ctxt, replica_id, @schedule_synchronized def update_replica_schedule(self, ctxt, replica_id, schedule_id, updated_values): - db_api.update_replica_schedule( + db_api.update_transfer_schedule( ctxt, replica_id, schedule_id, updated_values, None, lambda ctxt, sched: self._replica_cron_client.register( ctxt, sched)) @@ -3300,15 +3299,15 @@ def delete_replica_schedule(self, ctxt, replica_id, schedule_id): 'Replica Schedule cannot be deleted while the Replica is in ' '%s state. Please wait for the Replica execution to finish' % (replica_status)) - db_api.delete_replica_schedule( + db_api.delete_transfer_schedule( ctxt, replica_id, schedule_id, None, lambda ctxt, sched: self._cleanup_schedule_resources( ctxt, sched)) @replica_synchronized def get_replica_schedules(self, ctxt, replica_id=None, expired=True): - return db_api.get_replica_schedules( - ctxt, replica_id=replica_id, expired=expired) + return db_api.get_transfer_schedules( + ctxt, transfer_id=replica_id, expired=expired) @schedule_synchronized def get_replica_schedule(self, ctxt, replica_id, @@ -3326,7 +3325,7 @@ def update_replica( "instance_osmorphing_minion_pool_mappings"] if any([mpf in updated_properties for mpf in minion_pool_fields]): # NOTE: this is just a dummy Replica model to use for validation: - dummy = models.Replica() + dummy = models.Transfer() dummy.id = replica.id dummy.instances = replica.instances dummy.origin_endpoint_id = replica.origin_endpoint_id @@ -3409,7 +3408,7 @@ def update_replica( db_api.update_transfer_action_info_for_instance( ctxt, replica.id, instance, replica.info[instance]) - db_api.add_replica_tasks_execution(ctxt, execution) + db_api.add_transfer_tasks_execution(ctxt, execution) LOG.debug("Execution for Replica update tasks created: %s", execution.id) diff --git a/coriolis/db/api.py b/coriolis/db/api.py index 65be5d45..06361c6a 100644 --- a/coriolis/db/api.py +++ b/coriolis/db/api.py @@ -98,26 +98,26 @@ def _update_sqlalchemy_object_fields( "of type '%s': %s" % (type(obj), values_to_update.keys())) -def _get_replica_schedules_filter(context, replica_id=None, - schedule_id=None, expired=True): +def _get_transfer_schedules_filter(context, transfer_id=None, + schedule_id=None, expired=True): now = timeutils.utcnow() - q = _soft_delete_aware_query(context, models.ReplicaSchedule) - q = q.join(models.Replica) + q = _soft_delete_aware_query(context, models.TransferSchedule) + q = q.join(models.Transfer) sched_filter = q.filter() if is_user_context(context): sched_filter = sched_filter.filter( - models.Replica.project_id == context.project_id) + models.Transfer.project_id == context.project_id) - if replica_id: + if transfer_id: sched_filter = sched_filter.filter( - models.Replica.id == replica_id) + models.Transfer.id == transfer_id) if schedule_id: sched_filter = sched_filter.filter( - models.ReplicaSchedule.id == schedule_id) + models.TransferSchedule.id == schedule_id) if not expired: sched_filter = sched_filter.filter( - or_(models.ReplicaSchedule.expiration_date == null(), - models.ReplicaSchedule.expiration_date > now)) + or_(models.TransferSchedule.expiration_date == null(), + models.TransferSchedule.expiration_date > now)) return sched_filter @@ -274,37 +274,37 @@ def delete_endpoint(context, endpoint_id): @enginefacade.reader -def get_replica_tasks_executions(context, replica_id, include_tasks=False, - include_task_info=False, to_dict=False): +def get_transfer_tasks_executions(context, transfer_id, include_tasks=False, + include_task_info=False, to_dict=False): q = _soft_delete_aware_query(context, models.TasksExecution) - q = q.join(models.Replica) + q = q.join(models.Transfer) if include_task_info: q = q.options(orm.joinedload('action').undefer('info')) if include_tasks: q = _get_tasks_with_details_options(q) if is_user_context(context): - q = q.filter(models.Replica.project_id == context.project_id) + q = q.filter(models.Transfer.project_id == context.project_id) db_result = q.filter( - models.Replica.id == replica_id).all() + models.Transfer.id == transfer_id).all() if to_dict: return [e.to_dict() for e in db_result] return db_result @enginefacade.reader -def get_replica_tasks_execution(context, replica_id, execution_id, - include_task_info=False, to_dict=False): +def get_transfer_tasks_execution(context, transfer_id, execution_id, + include_task_info=False, to_dict=False): q = _soft_delete_aware_query(context, models.TasksExecution).join( - models.Replica) + models.Transfer) if include_task_info: q = q.options(orm.joinedload('action').undefer('info')) q = _get_tasks_with_details_options(q) if is_user_context(context): - q = q.filter(models.Replica.project_id == context.project_id) + q = q.filter(models.Transfer.project_id == context.project_id) db_result = q.filter( - models.Replica.id == replica_id, + models.Transfer.id == transfer_id, models.TasksExecution.id == execution_id).first() if to_dict and db_result is not None: return db_result.to_dict() @@ -312,7 +312,7 @@ def get_replica_tasks_execution(context, replica_id, execution_id, @enginefacade.writer -def add_replica_tasks_execution(context, execution): +def add_transfer_tasks_execution(context, execution): if is_user_context(context): if execution.action.project_id != context.project_id: raise exception.NotAuthorized() @@ -330,12 +330,12 @@ def add_replica_tasks_execution(context, execution): @enginefacade.writer -def delete_replica_tasks_execution(context, execution_id): +def delete_transfer_tasks_execution(context, execution_id): q = _soft_delete_aware_query(context, models.TasksExecution).filter( models.TasksExecution.id == execution_id) if is_user_context(context): - if not q.join(models.Replica).filter( - models.Replica.project_id == context.project_id).first(): + if not q.join(models.Transfer).filter( + models.Transfer.project_id == context.project_id).first(): raise exception.NotAuthorized() count = q.soft_delete() if count == 0: @@ -343,28 +343,28 @@ def delete_replica_tasks_execution(context, execution_id): @enginefacade.reader -def get_replica_schedules(context, replica_id=None, expired=True): - sched_filter = _get_replica_schedules_filter( - context, replica_id=replica_id, expired=expired) +def get_transfer_schedules(context, transfer_id=None, expired=True): + sched_filter = _get_transfer_schedules_filter( + context, transfer_id=transfer_id, expired=expired) return sched_filter.all() @enginefacade.reader -def get_replica_schedule(context, replica_id, schedule_id, expired=True): - sched_filter = _get_replica_schedules_filter( - context, replica_id=replica_id, schedule_id=schedule_id, +def get_transfer_schedule(context, transfer_id, schedule_id, expired=True): + sched_filter = _get_transfer_schedules_filter( + context, transfer_id=transfer_id, schedule_id=schedule_id, expired=expired) return sched_filter.first() @enginefacade.writer -def update_replica_schedule(context, replica_id, schedule_id, - updated_values, pre_update_callable=None, - post_update_callable=None): +def update_transfer_schedule(context, transfer_id, schedule_id, + updated_values, pre_update_callable=None, + post_update_callable=None): # NOTE(gsamfira): we need to refactor the DB layer a bit to allow # two-phase transactions or at least allow running these functions # inside a single transaction block. - schedule = get_replica_schedule(context, replica_id, schedule_id) + schedule = get_transfer_schedule(context, transfer_id, schedule_id) if pre_update_callable: pre_update_callable(schedule=schedule) for val in ["schedule", "expiration_date", "enabled", "shutdown_instance"]: @@ -378,23 +378,23 @@ def update_replica_schedule(context, replica_id, schedule_id, @enginefacade.writer -def delete_replica_schedule(context, replica_id, - schedule_id, pre_delete_callable=None, - post_delete_callable=None): +def delete_transfer_schedule(context, transfer_id, + schedule_id, pre_delete_callable=None, + post_delete_callable=None): # NOTE(gsamfira): we need to refactor the DB layer a bit to allow # two-phase transactions or at least allow running these functions # inside a single transaction block. - q = _soft_delete_aware_query(context, models.ReplicaSchedule).filter( - models.ReplicaSchedule.id == schedule_id, - models.ReplicaSchedule.replica_id == replica_id) + q = _soft_delete_aware_query(context, models.TransferSchedule).filter( + models.TransferSchedule.id == schedule_id, + models.TransferSchedule.transfer_id == transfer_id) schedule = q.first() if not schedule: raise exception.NotFound( "No such schedule") if is_user_context(context): - if not q.join(models.Replica).filter( - models.Replica.project_id == context.project_id).first(): + if not q.join(models.Transfer).filter( + models.Transfer.project_id == context.project_id).first(): raise exception.NotAuthorized() if pre_delete_callable: pre_delete_callable(context, schedule) @@ -406,39 +406,39 @@ def delete_replica_schedule(context, replica_id, @enginefacade.writer -def add_replica_schedule(context, schedule, post_create_callable=None): +def add_transfer_schedule(context, schedule, post_create_callable=None): # NOTE(gsamfira): we need to refactor the DB layer a bit to allow # two-phase transactions or at least allow running these functions # inside a single transaction block. - if schedule.replica.project_id != context.project_id: + if schedule.transfer.project_id != context.project_id: raise exception.NotAuthorized() _session(context).add(schedule) if post_create_callable: post_create_callable(context, schedule) -def _get_replica_with_tasks_executions_options(q): - return q.options(orm.joinedload(models.Replica.executions)) +def _get_transfer_with_tasks_executions_options(q): + return q.options(orm.joinedload(models.Transfer.executions)) @enginefacade.reader -def get_replicas(context, - replica_scenario=None, - include_tasks_executions=False, - include_task_info=False, - to_dict=False): - q = _soft_delete_aware_query(context, models.Replica) +def get_transfers(context, + transfer_scenario=None, + include_tasks_executions=False, + include_task_info=False, + to_dict=False): + q = _soft_delete_aware_query(context, models.Transfer) if include_tasks_executions: - q = _get_replica_with_tasks_executions_options(q) + q = _get_transfer_with_tasks_executions_options(q) if include_task_info: q = q.options(orm.undefer('info')) q = q.filter() - if replica_scenario: - q.filter(models.Replica.scenario == replica_scenario) + if transfer_scenario: + q.filter(models.Transfer.scenario == transfer_scenario) if is_user_context(context): q = q.filter( - models.Replica.project_id == context.project_id) + models.Transfer.project_id == context.project_id) db_result = q.all() if to_dict: return [ @@ -450,55 +450,55 @@ def get_replicas(context, @enginefacade.reader -def get_replica(context, replica_id, - replica_scenario=None, - include_task_info=False, - to_dict=False): - q = _soft_delete_aware_query(context, models.Replica) - q = _get_replica_with_tasks_executions_options(q) +def get_transfer(context, transfer_id, + transfer_scenario=None, + include_task_info=False, + to_dict=False): + q = _soft_delete_aware_query(context, models.Transfer) + q = _get_transfer_with_tasks_executions_options(q) if include_task_info: q = q.options(orm.undefer('info')) - if replica_scenario: + if transfer_scenario: q = q.filter( - models.Replica.scenario == replica_scenario) + models.Transfer.scenario == transfer_scenario) if is_user_context(context): q = q.filter( - models.Replica.project_id == context.project_id) + models.Transfer.project_id == context.project_id) - replica = q.filter( - models.Replica.id == replica_id).first() - if to_dict and replica is not None: - return replica.to_dict(include_task_info=include_task_info) + transfer = q.filter( + models.Transfer.id == transfer_id).first() + if to_dict and transfer is not None: + return transfer.to_dict(include_task_info=include_task_info) - return replica + return transfer @enginefacade.reader -def get_endpoint_replicas_count( - context, endpoint_id, replica_scenario=None): +def get_endpoint_transfers_count( + context, endpoint_id, transfer_scenario=None): scenario_filter_kwargs = {} - if replica_scenario: - scenario_filter_kwargs = {"scenario": replica_scenario} + if transfer_scenario: + scenario_filter_kwargs = {"scenario": transfer_scenario} origin_args = {'origin_endpoint_id': endpoint_id} origin_args.update(scenario_filter_kwargs) q_origin_count = _soft_delete_aware_query( - context, models.Replica).filter_by(**origin_args).count() + context, models.Transfer).filter_by(**origin_args).count() destination_args = {'destination_endpoint_id': endpoint_id} destination_args.update(scenario_filter_kwargs) q_destination_count = _soft_delete_aware_query( - context, models.Replica).filter_by(**destination_args).count() + context, models.Transfer).filter_by(**destination_args).count() return q_origin_count + q_destination_count @enginefacade.writer -def add_replica(context, replica): - replica.user_id = context.user - replica.project_id = context.project_id - _session(context).add(replica) +def add_transfer(context, transfer): + transfer.user_id = context.user + transfer.project_id = context.project_id + _session(context).add(transfer) @enginefacade.writer @@ -516,39 +516,35 @@ def _delete_transfer_action(context, cls, id): @enginefacade.writer -def delete_replica(context, replica_id): - _delete_transfer_action(context, models.Replica, replica_id) +def delete_transfer(context, transfer_id): + _delete_transfer_action(context, models.Transfer, transfer_id) @enginefacade.reader -def get_replica_migrations(context, replica_id): - q = _soft_delete_aware_query(context, models.Migration) - q = q.join("replica") +def get_transfer_deployments(context, transfer_id): + q = _soft_delete_aware_query(context, models.Deployment) + q = q.join("transfer") q = q.options(orm.joinedload("executions")) if is_user_context(context): q = q.filter( - models.Migration.project_id == context.project_id) + models.Deployment.project_id == context.project_id) return q.filter( - models.Replica.id == replica_id).all() + models.Transfer.id == transfer_id).all() @enginefacade.reader -def get_migrations(context, - include_tasks=False, - include_task_info=False, - to_dict=False, - replica_migrations_only=False): - q = _soft_delete_aware_query(context, models.Migration) +def get_deployments(context, + include_tasks=False, + include_task_info=False, + to_dict=False): + q = _soft_delete_aware_query(context, models.Deployment) if include_tasks: - q = _get_migration_task_query_options(q) + q = _get_deployment_task_query_options(q) else: q = q.options(orm.joinedload("executions")) if include_task_info: q = q.options(orm.undefer('info')) - if replica_migrations_only: - q.filter(models.Migration.replica_id is not None) - args = {} if is_user_context(context): args["project_id"] = context.project_id @@ -569,7 +565,7 @@ def _get_tasks_with_details_options(query): joinedload("events")) -def _get_migration_task_query_options(query): +def _get_deployment_task_query_options(query): return query.options( orm.joinedload("executions"). joinedload("tasks"). @@ -582,13 +578,13 @@ def _get_migration_task_query_options(query): @enginefacade.reader -def get_migration(context, migration_id, include_task_info=False, - to_dict=False): - q = _soft_delete_aware_query(context, models.Migration) - q = _get_migration_task_query_options(q) +def get_deployment(context, deployment_id, include_task_info=False, + to_dict=False): + q = _soft_delete_aware_query(context, models.Deployment) + q = _get_deployment_task_query_options(q) if include_task_info: q = q.options(orm.undefer('info')) - args = {"id": migration_id} + args = {"id": deployment_id} if is_user_context(context): args["project_id"] = context.project_id db_result = q.filter_by(**args).first() @@ -599,15 +595,15 @@ def get_migration(context, migration_id, include_task_info=False, @enginefacade.writer -def add_migration(context, migration): - migration.user_id = context.user - migration.project_id = context.project_id - _session(context).add(migration) +def add_deployment(context, deployment): + deployment.user_id = context.user + deployment.project_id = context.project_id + _session(context).add(deployment) @enginefacade.writer -def delete_migration(context, migration_id): - _delete_transfer_action(context, models.Migration, migration_id) +def delete_deployment(context, deployment_id): + _delete_transfer_action(context, models.Deployment, deployment_id) @enginefacade.writer @@ -941,10 +937,10 @@ def update_task_progress_update( @enginefacade.writer -def update_replica(context, replica_id, updated_values): - replica = get_replica(context, replica_id) - if not replica: - raise exception.NotFound("Replica not found") +def update_transfer(context, transfer_id, updated_values): + transfer = get_transfer(context, transfer_id) + if not transfer: + raise exception.NotFound("Transfer not found") mapped_info_fields = { 'destination_environment': 'target_environment'} @@ -957,11 +953,11 @@ def update_replica(context, replica_id, updated_values): for field in updateable_fields: if mapped_info_fields.get(field, field) in updated_values: LOG.debug( - "Updating the '%s' field of Replica '%s' to: '%s'", - field, replica_id, updated_values[ + "Updating the '%s' field of Transfer '%s' to: '%s'", + field, transfer_id, updated_values[ mapped_info_fields.get(field, field)]) setattr( - replica, field, + transfer, field, updated_values[mapped_info_fields.get(field, field)]) non_updateable_fields = set( @@ -970,12 +966,12 @@ def update_replica(context, replica_id, updated_values): for field in updateable_fields}) if non_updateable_fields: LOG.warn( - "The following Replica fields can NOT be updated: %s", + "The following Transfer fields can NOT be updated: %s", non_updateable_fields) # the oslo_db library uses this method for both the `created_at` and # `updated_at` fields - setattr(replica, 'updated_at', timeutils.utcnow()) + setattr(transfer, 'updated_at', timeutils.utcnow()) @enginefacade.writer diff --git a/coriolis/db/sqlalchemy/migrate_repo/versions/020_rename_tables.py b/coriolis/db/sqlalchemy/migrate_repo/versions/020_rename_tables.py new file mode 100644 index 00000000..cc5e259e --- /dev/null +++ b/coriolis/db/sqlalchemy/migrate_repo/versions/020_rename_tables.py @@ -0,0 +1,30 @@ +import sqlalchemy + + +def upgrade(migrate_engine): + meta = sqlalchemy.MetaData() + meta.bind = migrate_engine + + replica = sqlalchemy.Table('replica', meta, autoload=True) + replica.rename('transfer') + + migration = sqlalchemy.Table('migration', meta, autoload=True) + migration.rename('deployment') + migration.c.replica_id.alter(name='transfer_id', nullable=False) + migration.c.replication_count.drop() + + replica_schedule = sqlalchemy.Table( + 'replica_schedules', meta, autoload=True) + replica_schedule.rename('transfer_schedules') + replica_schedule.c.replica_id.alter(name='transfer_id') + + # NOTE(dvincze): Update models polymorphic identity + # Due to the model code changes, this cannot be done using the ORM. + # Had to resort to using raw SQL statements. + with migrate_engine.connect() as conn: + conn.execute( + 'UPDATE base_transfer_action SET type = "transfer" ' + 'WHERE type = "replica";') + conn.execute( + 'UPDATE base_transfer_action SET type = "deployment" ' + 'WHERE type = "migration";') diff --git a/coriolis/db/sqlalchemy/models.py b/coriolis/db/sqlalchemy/models.py index beac9c3b..2f101be7 100644 --- a/coriolis/db/sqlalchemy/models.py +++ b/coriolis/db/sqlalchemy/models.py @@ -323,23 +323,23 @@ def to_dict(self, include_task_info=True, include_executions=True): return result -class Replica(BaseTransferAction): - __tablename__ = 'replica' +class Transfer(BaseTransferAction): + __tablename__ = 'transfer' id = sqlalchemy.Column( sqlalchemy.String(36), sqlalchemy.ForeignKey( 'base_transfer_action.base_id'), primary_key=True) scenario = sqlalchemy.Column( - sqlalchemy.String(255), + sqlalchemy.String(255), nullable=False, default=constants.REPLICA_SCENARIO_REPLICA) __mapper_args__ = { - 'polymorphic_identity': 'replica', + 'polymorphic_identity': 'transfer', } def to_dict(self, include_task_info=True, include_executions=True): - base = super(Replica, self).to_dict( + base = super(Transfer, self).to_dict( include_task_info=include_task_info, include_executions=include_executions) base.update({ @@ -348,40 +348,36 @@ def to_dict(self, include_task_info=True, include_executions=True): return base -class Migration(BaseTransferAction): - __tablename__ = 'migration' +class Deployment(BaseTransferAction): + __tablename__ = 'deployment' id = sqlalchemy.Column( sqlalchemy.String(36), sqlalchemy.ForeignKey( 'base_transfer_action.base_id'), primary_key=True) - replica_id = sqlalchemy.Column( + transfer_id = sqlalchemy.Column( sqlalchemy.String(36), - sqlalchemy.ForeignKey('replica.id'), nullable=True) - replica = orm.relationship( - Replica, backref=orm.backref("migrations"), foreign_keys=[replica_id]) + sqlalchemy.ForeignKey('transfer.id'), nullable=False) + transfer = orm.relationship( + Transfer, backref=orm.backref("deployments"), + foreign_keys=[transfer_id]) shutdown_instances = sqlalchemy.Column( sqlalchemy.Boolean, nullable=False, default=False) - replication_count = sqlalchemy.Column( - sqlalchemy.Integer, nullable=False, default=2) __mapper_args__ = { - 'polymorphic_identity': 'migration', + 'polymorphic_identity': 'deployment', } def to_dict(self, include_task_info=True, include_tasks=True): - base = super(Migration, self).to_dict( + base = super(Deployment, self).to_dict( include_task_info=include_task_info, include_executions=include_tasks) - replica_scenario_type = None - if self.replica: - replica_scenario_type = self.replica.scenario + base.update({ "id": self.id, - "replica_id": self.replica_id, - "replica_scenario_type": replica_scenario_type, + "transfer_id": self.transfer_id, + "transfer_scenario_type": self.transfer.scenario, "shutdown_instances": self.shutdown_instances, - "replication_count": self.replication_count, }) return base @@ -667,18 +663,18 @@ class Endpoint(BASE, models.TimestampMixin, models.ModelBase, secondary="endpoint_region_mapping") -class ReplicaSchedule(BASE, models.TimestampMixin, models.ModelBase, - models.SoftDeleteMixin): - __tablename__ = "replica_schedules" +class TransferSchedule(BASE, models.TimestampMixin, models.ModelBase, + models.SoftDeleteMixin): + __tablename__ = "transfer_schedules" id = sqlalchemy.Column(sqlalchemy.String(36), default=lambda: str(uuid.uuid4()), primary_key=True) - replica_id = sqlalchemy.Column( + transfer_id = sqlalchemy.Column( sqlalchemy.String(36), - sqlalchemy.ForeignKey('replica.id'), nullable=False) - replica = orm.relationship( - Replica, backref=orm.backref("schedules"), foreign_keys=[replica_id]) + sqlalchemy.ForeignKey('transfer.id'), nullable=False) + transfer = orm.relationship( + Transfer, backref=orm.backref("schedules"), foreign_keys=[transfer_id]) schedule = sqlalchemy.Column(types.Json, nullable=False) expiration_date = sqlalchemy.Column( sqlalchemy.types.DateTime, nullable=True) diff --git a/coriolis/tests/api/v1/test_replica_schedules.py b/coriolis/tests/api/v1/test_replica_schedules.py index 27a8ac1d..c32cc792 100644 --- a/coriolis/tests/api/v1/test_replica_schedules.py +++ b/coriolis/tests/api/v1/test_replica_schedules.py @@ -33,7 +33,7 @@ def test_show( mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} id = mock.sentinel.id - replica_id = mock.sentinel.replica_id + replica_id = mock.sentinel.transfer_id result = self.replica_schedules.show(mock_req, replica_id, id) @@ -58,7 +58,7 @@ def test_show_not_found( mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} id = mock.sentinel.id - replica_id = mock.sentinel.replica_id + replica_id = mock.sentinel.transfer_id mock_get_schedule.return_value = None self.assertRaises( @@ -84,7 +84,7 @@ def test_index( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.replica_id + replica_id = mock.sentinel.transfer_id mock_req.GET = {"show_expired": "False"} result = self.replica_schedules.index(mock_req, replica_id) @@ -338,7 +338,7 @@ def test_create( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.replica_id + replica_id = mock.sentinel.transfer_id body = mock.sentinel.body schedule = mock.sentinel.schedule exp_date = mock.sentinel.exp_date @@ -368,7 +368,7 @@ def test_create_except( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.replica_id + replica_id = mock.sentinel.transfer_id body = mock.sentinel.body mock_validate_create_body.side_effect = Exception("err") @@ -397,7 +397,7 @@ def test_update( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.replica_id + replica_id = mock.sentinel.transfer_id id = mock.sentinel.id body = mock.sentinel.body @@ -425,7 +425,7 @@ def test_update_except( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.replica_id + replica_id = mock.sentinel.transfer_id id = mock.sentinel.id body = mock.sentinel.body mock_validate_update_body.side_effect = Exception("err") @@ -451,7 +451,7 @@ def test_delete( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.replica_id + replica_id = mock.sentinel.transfer_id id = mock.sentinel.id self.assertRaises( diff --git a/coriolis/tests/api/v1/test_replica_tasks_execution_actions.py b/coriolis/tests/api/v1/test_replica_tasks_execution_actions.py index 3e46b725..2ed8e2fd 100644 --- a/coriolis/tests/api/v1/test_replica_tasks_execution_actions.py +++ b/coriolis/tests/api/v1/test_replica_tasks_execution_actions.py @@ -37,7 +37,7 @@ def test_cancel( mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} id = mock.sentinel.id - replica_id = mock.sentinel.replica_id + replica_id = mock.sentinel.transfer_id body = config["body"] if exception_raised: mock_cancel.side_effect = getattr(exception, exception_raised)( diff --git a/coriolis/tests/api/v1/test_replica_tasks_executions.py b/coriolis/tests/api/v1/test_replica_tasks_executions.py index fce66069..b1b2b11b 100644 --- a/coriolis/tests/api/v1/test_replica_tasks_executions.py +++ b/coriolis/tests/api/v1/test_replica_tasks_executions.py @@ -29,7 +29,7 @@ def test_show( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.replica_id + replica_id = mock.sentinel.transfer_id id = mock.sentinel.id result = self.replica_api.show(mock_req, replica_id, id) @@ -55,7 +55,7 @@ def test_show_not_found( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.replica_id + replica_id = mock.sentinel.transfer_id id = mock.sentinel.id mock_get_execution.return_value = None @@ -83,7 +83,7 @@ def test_index( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.replica_id + replica_id = mock.sentinel.transfer_id result = self.replica_api.index(mock_req, replica_id) @@ -109,7 +109,7 @@ def test_detail( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.replica_id + replica_id = mock.sentinel.transfer_id result = self.replica_api.detail(mock_req, replica_id) @@ -135,7 +135,7 @@ def test_create( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.replica_id + replica_id = mock.sentinel.transfer_id execution = {"shutdown_instances": True} mock_body = {"execution": execution} @@ -162,7 +162,7 @@ def test_create_no_executions( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.replica_id + replica_id = mock.sentinel.transfer_id mock_body = {} result = self.replica_api.create(mock_req, replica_id, mock_body) @@ -186,7 +186,7 @@ def test_delete( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.replica_id + replica_id = mock.sentinel.transfer_id id = mock.sentinel.id self.assertRaises( @@ -209,7 +209,7 @@ def test_delete_not_found( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.replica_id + replica_id = mock.sentinel.transfer_id id = mock.sentinel.id mock_delete.side_effect = exception.NotFound() diff --git a/coriolis/tests/conductor/rpc/test_server.py b/coriolis/tests/conductor/rpc/test_server.py index 857c6b64..0582c990 100644 --- a/coriolis/tests/conductor/rpc/test_server.py +++ b/coriolis/tests/conductor/rpc/test_server.py @@ -269,9 +269,9 @@ def call_get_endpoint(): self.assertRaises(exception.NotFound, call_get_endpoint) @mock.patch.object(db_api, "delete_endpoint") - @mock.patch.object(db_api, "get_endpoint_replicas_count") + @mock.patch.object(db_api, "get_endpoint_transfers_count") def test_delete_endpoint( - self, mock_get_endpoint_replicas_count, mock_delete_endpoint + self, mock_get_endpoint_transfers_count, mock_delete_endpoint ): def call_delete_endpoint(): return testutils.get_wrapped_function(self.server.delete_endpoint)( @@ -279,14 +279,14 @@ def call_delete_endpoint(): mock.sentinel.endpoint_id # type: ignore ) - mock_get_endpoint_replicas_count.return_value = 0 + mock_get_endpoint_transfers_count.return_value = 0 call_delete_endpoint() mock_delete_endpoint.assert_called_once_with( mock.sentinel.context, mock.sentinel.endpoint_id ) # endpoint has replicas - mock_get_endpoint_replicas_count.return_value = 1 + mock_get_endpoint_transfers_count.return_value = 1 self.assertRaises(exception.NotAuthorized, call_delete_endpoint) @mock.patch.object( @@ -1103,12 +1103,12 @@ def test_delete_replica_disks_invalid_state( delete_replica_disks, self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, ) mock_get_replica.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, include_task_info=True, ) mock_check_replica_running.assert_called_once_with( @@ -1190,7 +1190,7 @@ def convert_to_task(task_config): server.ConductorServerEndpoint, "_minion_manager_client" ) - @mock.patch.object(db_api, "add_replica_tasks_execution") + @mock.patch.object(db_api, "add_transfer_tasks_execution") @mock.patch.object(db_api, "update_transfer_action_info_for_instance") @mock.patch.object( server.ConductorServerEndpoint, @@ -1231,7 +1231,7 @@ def test_execute_replica_tasks( mock_create_task, mock_check_execution_tasks_sanity, mock_update_transfer_action_info_for_instance, - mock_add_replica_tasks_execution, + mock_add_transfer_tasks_execution, mock_minion_manager_client, mock_set_tasks_execution_status, mock_begin_tasks, @@ -1249,7 +1249,7 @@ def call_execute_replica_tasks(): .get_wrapped_function(self.server.execute_replica_tasks)( self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, shutdown_instances, # type: ignore ) @@ -1288,7 +1288,7 @@ def create_task_side_effect( result = call_execute_replica_tasks() mock_get_replica.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, include_task_info=True, ) mock_check_reservation.assert_called_once_with(mock_replica) @@ -1356,7 +1356,7 @@ def create_task_side_effect( mock_tasks_execution.return_value, mock_replica.info) - mock_add_replica_tasks_execution.assert_called_once_with( + mock_add_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, mock_tasks_execution.return_value) @@ -1380,7 +1380,7 @@ def create_task_side_effect( mock_get_replica_tasks_execution.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock_tasks_execution.return_value.id) self.assertEqual( @@ -1392,53 +1392,53 @@ def create_task_side_effect( self.assertEqual( result, mock_get_replica_tasks_execution.return_value) - @mock.patch.object(db_api, "get_replica_tasks_executions") + @mock.patch.object(db_api, "get_transfer_tasks_executions") def test_get_replica_tasks_executions( self, - mock_get_replica_tasks_executions + mock_get_transfer_tasks_executions ): result = testutils.get_wrapped_function( self.server.get_replica_tasks_executions)( self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.execution_id, include_task_info=False ) self.assertEqual( - mock_get_replica_tasks_executions.return_value, + mock_get_transfer_tasks_executions.return_value, result ) - mock_get_replica_tasks_executions.assert_called_once_with( + mock_get_transfer_tasks_executions.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.execution_id, include_task_info=False, to_dict=True ) - @mock.patch.object(db_api, "get_replica_tasks_execution") + @mock.patch.object(db_api, "get_transfer_tasks_execution") def test_get_replica_tasks_execution( self, - mock_get_replica_tasks_execution + mock_get_transfer_tasks_execution ): result = testutils.get_wrapped_function( self.server.get_replica_tasks_execution)( self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.execution_id, include_task_info=False ) self.assertEqual( - mock_get_replica_tasks_execution.return_value, + mock_get_transfer_tasks_execution.return_value, result ) - mock_get_replica_tasks_execution.assert_called_once_with( + mock_get_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.execution_id, include_task_info=False, to_dict=True @@ -1448,10 +1448,10 @@ def test_get_replica_tasks_execution( server.ConductorServerEndpoint, '_get_replica_tasks_execution' ) - @mock.patch.object(db_api, 'delete_replica_tasks_execution') + @mock.patch.object(db_api, 'delete_transfer_tasks_execution') def test_delete_replica_tasks_execution( self, - mock_delete_replica_tasks_execution, + mock_delete_transfer_tasks_execution, mock_get_replica_tasks_execution ): def call_delete_replica_tasks_execution(): @@ -1459,21 +1459,21 @@ def call_delete_replica_tasks_execution(): self.server.delete_replica_tasks_execution)( self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.execution_id, # type: ignore ) call_delete_replica_tasks_execution() mock_get_replica_tasks_execution.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.execution_id) - mock_delete_replica_tasks_execution.assert_called_once_with( + mock_delete_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, mock.sentinel.execution_id) # raises exception if status is active - mock_get_replica_tasks_execution.return_value.status = constants\ - .EXECUTION_STATUS_RUNNING + mock_get_replica_tasks_execution.return_value.status = ( + constants.EXECUTION_STATUS_RUNNING) self.assertRaises( exception.InvalidMigrationState, @@ -1494,13 +1494,13 @@ def test_cancel_replica_tasks_execution( self.server.cancel_replica_tasks_execution)( self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.execution_id, False ) mock_get_replica_tasks_execution.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.execution_id) mock_cancel_replica_tasks_execution.assert_called_once_with( mock.sentinel.context, @@ -1515,13 +1515,13 @@ def test_cancel_replica_tasks_execution( self.server.cancel_replica_tasks_execution)( self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.execution_id, True ) mock_get_replica_tasks_execution.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.execution_id) mock_cancel_replica_tasks_execution.assert_called_once_with( mock.sentinel.context, @@ -1543,13 +1543,13 @@ def test_cancel_replica_tasks_execution_status_not_active( self.server.cancel_replica_tasks_execution), self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.execution_id, False ) mock_get_replica_tasks_execution.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.execution_id) mock_cancel_replica_tasks_execution.assert_not_called() @@ -1570,64 +1570,64 @@ def test_cancel_replica_tasks_execution_status_cancelling_no_force( self.server.cancel_replica_tasks_execution), self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.execution_id, False ) mock_get_replica_tasks_execution.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.execution_id) mock_cancel_replica_tasks_execution.assert_not_called() - @mock.patch.object(db_api, 'get_replica_tasks_execution') + @mock.patch.object(db_api, 'get_transfer_tasks_execution') def test__get_replica_tasks_execution( self, - mock_get_replica_tasks_execution + mock_get_transfer_tasks_execution ): result = self.server._get_replica_tasks_execution( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.execution_id, include_task_info=False, to_dict=False, ) self.assertEqual( - mock_get_replica_tasks_execution.return_value, + mock_get_transfer_tasks_execution.return_value, result ) - mock_get_replica_tasks_execution.assert_called_once_with( + mock_get_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.execution_id, include_task_info=False, to_dict=False) - @mock.patch.object(db_api, 'get_replica_tasks_execution') + @mock.patch.object(db_api, 'get_transfer_tasks_execution') def test__get_replica_tasks_execution_no_execution( self, - mock_get_replica_tasks_execution + mock_get_transfer_tasks_execution ): - mock_get_replica_tasks_execution.return_value = None + mock_get_transfer_tasks_execution.return_value = None self.assertRaises( exception.NotFound, self.server._get_replica_tasks_execution, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.execution_id, include_task_info=False, to_dict=False, ) - mock_get_replica_tasks_execution.assert_called_once_with( + mock_get_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.execution_id, include_task_info=False, to_dict=False) - @mock.patch.object(db_api, 'get_replicas') - def test_get_replicas(self, mock_get_replicas): + @mock.patch.object(db_api, 'get_transfers') + def test_get_replicas(self, mock_get_transfers): result = self.server.get_replicas( mock.sentinel.context, include_tasks_executions=False, @@ -1635,10 +1635,10 @@ def test_get_replicas(self, mock_get_replicas): ) self.assertEqual( - mock_get_replicas.return_value, + mock_get_transfers.return_value, result ) - mock_get_replicas.assert_called_once_with( + mock_get_transfers.assert_called_once_with( mock.sentinel.context, False, include_task_info=False, @@ -1650,7 +1650,7 @@ def test_get_replica(self, mock_get_replica): result = testutils.get_wrapped_function(self.server.get_replica)( self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, include_task_info=False ) @@ -1660,12 +1660,12 @@ def test_get_replica(self, mock_get_replica): ) mock_get_replica.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, include_task_info=False, to_dict=True ) - @mock.patch.object(db_api, 'delete_replica') + @mock.patch.object(db_api, 'delete_transfer') @mock.patch.object(server.ConductorServerEndpoint, '_check_delete_reservation_for_transfer') @mock.patch.object(server.ConductorServerEndpoint, @@ -1676,21 +1676,21 @@ def test_delete_replica( mock_get_replica, mock_check_replica_running_executions, mock_check_delete_reservation_for_transfer, - mock_delete_replica, + mock_delete_transfer, ): testutils.get_wrapped_function(self.server.delete_replica)( self.server, mock.sentinel.context, - mock.sentinel.replica_id + mock.sentinel.transfer_id ) mock_get_replica.assert_called_once_with( - mock.sentinel.context, mock.sentinel.replica_id) + mock.sentinel.context, mock.sentinel.transfer_id) mock_check_replica_running_executions.assert_called_once_with( mock.sentinel.context, mock_get_replica.return_value) mock_check_delete_reservation_for_transfer.assert_called_once_with( mock_get_replica.return_value) - mock_delete_replica.assert_called_once_with( - mock.sentinel.context, mock.sentinel.replica_id) + mock_delete_transfer.assert_called_once_with( + mock.sentinel.context, mock.sentinel.transfer_id) @mock.patch.object( server.ConductorServerEndpoint, @@ -1700,7 +1700,7 @@ def test_delete_replica( server.ConductorServerEndpoint, '_begin_tasks' ) - @mock.patch.object(db_api, "add_replica_tasks_execution") + @mock.patch.object(db_api, "add_transfer_tasks_execution") @mock.patch.object(db_api, "update_transfer_action_info_for_instance") @mock.patch.object( server.ConductorServerEndpoint, @@ -1731,7 +1731,7 @@ def test_delete_replica_disks( mock_deepcopy, mock_check_execution_tasks_sanity, mock_update_transfer_action_info_for_instance, - mock_add_replica_tasks_execution, + mock_add_transfer_tasks_execution, mock_begin_tasks, mock_get_replica_tasks_execution, ): @@ -1740,12 +1740,12 @@ def call_delete_replica_disks(): self.server.delete_replica_disks)( self.server, mock.sentinel.context, - mock.sentinel.replica_id, # type: ignore + mock.sentinel.transfer_id, # type: ignore ) instances = [mock.Mock(), mock.Mock()] mock_replica = mock.Mock( instances=instances, - id=mock.sentinel.replica_id, + id=mock.sentinel.transfer_id, network_map=mock.sentinel.network_map, info={ instance: instance @@ -1774,7 +1774,7 @@ def create_task_side_effect( mock_get_replica.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, include_task_info=True ) mock_check_replica_running_executions.assert_called_once_with( @@ -1825,7 +1825,7 @@ def create_task_side_effect( mock_tasks_execution.return_value, mock_replica.info, ) - mock_add_replica_tasks_execution.assert_called_once_with( + mock_add_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, mock_tasks_execution.return_value ) @@ -1897,27 +1897,27 @@ def test_check_endpoints_same_destination_connection_info(self): ) @mock.patch.object(server.ConductorServerEndpoint, 'get_replica') - @mock.patch.object(db_api, 'add_replica') + @mock.patch.object(db_api, 'add_transfer') @mock.patch.object(server.ConductorServerEndpoint, '_create_reservation_for_replica') @mock.patch.object(server.ConductorServerEndpoint, '_check_minion_pools_for_action') - @mock.patch.object(models, 'Replica') + @mock.patch.object(models, 'Transfer') @mock.patch.object(server.ConductorServerEndpoint, '_check_endpoints') @mock.patch.object(server.ConductorServerEndpoint, 'get_endpoint') def test_create_instances_replica( self, mock_get_endpoint, mock_check_endpoints, - mock_replica, + mock_transfer, mock_check_minion_pools_for_action, mock_create_reservation_for_replica, - mock_add_replica, + mock_add_transfer, mock_get_replica ): mock_get_endpoint.side_effect = mock.sentinel.origin_endpoint_id, \ mock.sentinel.destination_endpoint_id - mock_replica.return_value = mock.Mock() + mock_transfer.return_value = mock.Mock() result = self.server.create_instances_replica( mock.sentinel.context, constants.REPLICA_SCENARIO_REPLICA, @@ -1949,18 +1949,18 @@ def test_create_instances_replica( ) self.assertEqual( ( - mock_replica.return_value.origin_endpoint_id, - mock_replica.return_value.destination_endpoint_id, - mock_replica.return_value.destination_endpoint_id, - mock_replica.return_value.origin_minion_pool_id, - mock_replica.return_value.destination_minion_pool_id, - (mock_replica.return_value. - instance_osmorphing_minion_pool_mappings), - mock_replica.return_value.source_environment, - mock_replica.return_value.destination_environment, - mock_replica.return_value.info, - mock_replica.return_value.notes, - mock_replica.return_value.user_scripts), + mock_transfer.return_value.origin_endpoint_id, + mock_transfer.return_value.destination_endpoint_id, + mock_transfer.return_value.destination_endpoint_id, + mock_transfer.return_value.origin_minion_pool_id, + mock_transfer.return_value.destination_minion_pool_id, + (mock_transfer.return_value. + instance_osmorphing_minion_pool_mappings), + mock_transfer.return_value.source_environment, + mock_transfer.return_value.destination_environment, + mock_transfer.return_value.info, + mock_transfer.return_value.notes, + mock_transfer.return_value.user_scripts), ( mock.sentinel.origin_endpoint_id, mock.sentinel.destination_endpoint_id, @@ -1977,19 +1977,19 @@ def test_create_instances_replica( {}) ) mock_check_minion_pools_for_action.assert_called_once_with( - mock.sentinel.context, mock_replica.return_value) + mock.sentinel.context, mock_transfer.return_value) mock_create_reservation_for_replica.assert_called_once_with( - mock_replica.return_value) - mock_add_replica.assert_called_once_with( - mock.sentinel.context, mock_replica.return_value) + mock_transfer.return_value) + mock_add_transfer.assert_called_once_with( + mock.sentinel.context, mock_transfer.return_value) mock_get_replica.assert_called_once_with( - mock.sentinel.context, mock_replica.return_value.id) + mock.sentinel.context, mock_transfer.return_value.id) - @mock.patch.object(db_api, 'get_replica') + @mock.patch.object(db_api, 'get_transfer') def test__get_replica(self, mock_get_replica): result = self.server._get_replica( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, include_task_info=False, to_dict=False ) @@ -1999,25 +1999,25 @@ def test__get_replica(self, mock_get_replica): ) mock_get_replica.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, include_task_info=False, to_dict=False ) - @mock.patch.object(db_api, 'get_replica') - def test__get_replica_not_found(self, mock_get_replica): - mock_get_replica.return_value = None + @mock.patch.object(db_api, 'get_transfer') + def test__get_replica_not_found(self, mock_get_transfer): + mock_get_transfer.return_value = None self.assertRaises( exception.NotFound, self.server._get_replica, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, include_task_info=False, to_dict=False ) - mock_get_replica.assert_called_once_with( + mock_get_transfer.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, include_task_info=False, to_dict=False ) @@ -2041,10 +2041,10 @@ def test_get_migration(self, mock_get_migration): to_dict=True ) - @mock.patch.object(db_api, 'get_replica_migrations') + @mock.patch.object(db_api, 'get_transfer_deployments') def test_check_running_replica_migrations( self, - mock_get_replica_migrations + mock_get_transfer_deployments ): migration_1 = mock.Mock() migration_2 = mock.Mock() @@ -2055,20 +2055,20 @@ def test_check_running_replica_migrations( migration_2.executions[0].status = \ constants.EXECUTION_STATUS_ERROR migrations = [migration_1, migration_2] - mock_get_replica_migrations.return_value = migrations + mock_get_transfer_deployments.return_value = migrations self.server._check_running_replica_migrations( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, ) - mock_get_replica_migrations.assert_called_once_with( + mock_get_transfer_deployments.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, ) - @mock.patch.object(db_api, 'get_replica_migrations') + @mock.patch.object(db_api, 'get_transfer_deployments') def test_check_running_replica_migrations_invalid_replica_state( self, - mock_get_replica_migrations + mock_get_transfer_deployments ): migration_1 = mock.Mock() migration_2 = mock.Mock() @@ -2078,16 +2078,16 @@ def test_check_running_replica_migrations_invalid_replica_state( migration_2.executions[0].status = \ constants.EXECUTION_STATUS_COMPLETED migrations = [migration_1, migration_2] - mock_get_replica_migrations.return_value = migrations + mock_get_transfer_deployments.return_value = migrations self.assertRaises( exception.InvalidReplicaState, self.server._check_running_replica_migrations, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, ) - mock_get_replica_migrations.assert_called_once_with( + mock_get_transfer_deployments.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, ) def test_check_running_executions(self): @@ -2239,7 +2239,7 @@ def test_get_provider_types(self, mock_get_available_providers): server.ConductorServerEndpoint, '_get_provider_types' ) - @mock.patch.object(models, "Migration") + @mock.patch.object(models, "Deployment") @mock.patch.object(uuid, "uuid4", return_value="migration_id") @mock.patch.object(copy, "deepcopy") @mock.patch.object( @@ -2259,7 +2259,7 @@ def test_get_provider_types(self, mock_get_available_providers): server.ConductorServerEndpoint, '_check_execution_tasks_sanity' ) - @mock.patch.object(db_api, 'add_migration') + @mock.patch.object(db_api, 'add_deployment') @mock.patch.object(lockutils, 'lock') @mock.patch.object( server.ConductorServerEndpoint, @@ -2286,7 +2286,7 @@ def test_deploy_replica_instance( mock_set_tasks_execution_status, mock_minion_manager_client, mock_lock, - mock_add_migration, + mock_add_deployment, mock_check_execution_tasks_sanity, mock_create_task, mock_get_instance_scripts, @@ -2294,7 +2294,7 @@ def test_deploy_replica_instance( mock_check_minion_pools_for_action, mock_deepcopy, mock_uuid4, - mock_migration, + mock_deployment, mock_get_provider_types, mock_get_endpoint, mock_check_valid_replica_tasks_execution, @@ -2337,7 +2337,7 @@ def test_deploy_replica_instance( def call_deploy_replica_instance(): return self.server.deploy_replica_instances( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, clone_disks=clone_disks, force=False, instance_osmorphing_minion_pool_mappings=( @@ -2359,7 +2359,7 @@ def call_deploy_replica_instance(): mock_get_replica.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, include_task_info=True, ) mock_check_replica_running_executions.assert_called_once_with( @@ -2405,7 +2405,7 @@ def create_task_side_effect( mock_check_minion_pools_for_action.assert_called_once_with( mock.sentinel.context, - mock_migration.return_value + mock_deployment.return_value ) mock_check_reservation_for_replica.assert_called_once_with( mock_get_replica.return_value) @@ -2449,25 +2449,25 @@ def create_task_side_effect( mock_check_execution_tasks_sanity.assert_called_once_with( mock_tasks_execution.return_value, - mock_migration.return_value.info, + mock_deployment.return_value.info, ) - mock_add_migration.assert_called_once_with( + mock_add_deployment.assert_called_once_with( mock.sentinel.context, - mock_migration.return_value, + mock_deployment.return_value, ) if not skip_os_morphing and has_os_morphing_minion: mock_lock.assert_any_call( constants.MIGRATION_LOCK_NAME_FORMAT - % mock_migration.return_value.id, + % mock_deployment.return_value.id, external=True, ) mock_minion_manager_client\ .allocate_minion_machines_for_migration\ .assert_called_once_with( mock.sentinel.context, - mock_migration.return_value, + mock_deployment.return_value, include_transfer_minions=False, include_osmorphing_minions=True ) @@ -2479,13 +2479,13 @@ def create_task_side_effect( else: mock_begin_tasks.assert_called_once_with( mock.sentinel.context, - mock_migration.return_value, + mock_deployment.return_value, mock_tasks_execution.return_value, ) mock_get_migration.assert_called_once_with( mock.sentinel.context, - mock_migration.return_value.id, + mock_deployment.return_value.id, ) self.assertEqual( @@ -2810,7 +2810,7 @@ def test_get_execution_for_migration( ) @mock.patch.object(server.ConductorServerEndpoint, '_begin_tasks') - @mock.patch.object(db_api, 'get_replica_tasks_execution') + @mock.patch.object(db_api, 'get_transfer_tasks_execution') @mock.patch.object(server.ConductorServerEndpoint, '_update_task_info_for_minion_allocations') @mock.patch.object(server.ConductorServerEndpoint, @@ -2821,7 +2821,7 @@ def test_confirm_replica_minions_allocation( mock_get_replica, mock_get_last_execution_for_replica, mock_update_task_info_for_minion_allocations, - mock_get_replica_tasks_execution, + mock_get_transfer_tasks_execution, mock_begin_tasks ): mock_get_replica.return_value.last_execution_status = \ @@ -2831,13 +2831,13 @@ def test_confirm_replica_minions_allocation( self.server.confirm_replica_minions_allocation)( self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.minion_machine_allocations ) mock_get_replica.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, include_task_info=True ) mock_get_last_execution_for_replica.assert_called_once_with( @@ -2850,7 +2850,7 @@ def test_confirm_replica_minions_allocation( mock_get_replica.return_value, mock.sentinel.minion_machine_allocations ) - mock_get_replica_tasks_execution.assert_called_once_with( + mock_get_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, mock_get_replica.return_value.id, mock_get_last_execution_for_replica.return_value.id @@ -2858,11 +2858,11 @@ def test_confirm_replica_minions_allocation( mock_begin_tasks.assert_called_once_with( mock.sentinel.context, mock_get_replica.return_value, - mock_get_replica_tasks_execution.return_value + mock_get_transfer_tasks_execution.return_value ) @mock.patch.object(server.ConductorServerEndpoint, '_begin_tasks') - @mock.patch.object(db_api, 'get_replica_tasks_execution') + @mock.patch.object(db_api, 'get_transfer_tasks_execution') @mock.patch.object(server.ConductorServerEndpoint, '_update_task_info_for_minion_allocations') @mock.patch.object(server.ConductorServerEndpoint, @@ -2873,7 +2873,7 @@ def test_confirm_replica_minions_allocation_unexpected_status( mock_get_replica, mock_get_last_execution_for_replica, mock_update_task_info_for_minion_allocations, - mock_get_replica_tasks_execution, + mock_get_transfer_tasks_execution, mock_begin_tasks ): mock_get_replica.return_value.last_execution_status = \ @@ -2885,18 +2885,18 @@ def test_confirm_replica_minions_allocation_unexpected_status( self.server.confirm_replica_minions_allocation), self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.minion_machine_allocations ) mock_get_replica.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, include_task_info=True ) mock_get_last_execution_for_replica.assert_not_called() mock_update_task_info_for_minion_allocations.assert_not_called() - mock_get_replica_tasks_execution.assert_not_called() + mock_get_transfer_tasks_execution.assert_not_called() mock_begin_tasks.assert_not_called() @mock.patch.object(server.ConductorServerEndpoint, @@ -2920,13 +2920,13 @@ def test_report_replica_minions_allocation_error( self.server.report_replica_minions_allocation_error)( self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.minion_allocation_error_details ) mock_get_replica.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id + mock.sentinel.transfer_id ) mock_get_last_execution_for_replica.assert_called_once_with( mock.sentinel.context, @@ -2967,13 +2967,13 @@ def test_report_replica_minions_allocation_error_unexpected_status( self.server.report_replica_minions_allocation_error), self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.minion_allocation_error_details ) mock_get_replica.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id + mock.sentinel.transfer_id ) mock_get_last_execution_for_replica.assert_not_called() mock_cancel_tasks_execution.assert_not_called() @@ -2999,13 +2999,13 @@ def test_confirm_migration_minions_allocation( self.server.confirm_migration_minions_allocation)( self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.minion_machine_allocations ) mock_get_migration.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, include_task_info=True ) mock_get_execution_for_migration.assert_called_once_with( @@ -3046,13 +3046,13 @@ def test_confirm_migration_minions_allocation_unexpected_status( self.server.confirm_migration_minions_allocation), self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.minion_machine_allocations ) mock_get_migration.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, include_task_info=True ) mock_get_execution_for_migration.assert_not_called() @@ -3081,13 +3081,13 @@ def test_report_migration_minions_allocation_error( self.server.report_migration_minions_allocation_error)( self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.minion_allocation_error_details ) mock_get_migration.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id + mock.sentinel.transfer_id ) mock_get_execution_for_migration.assert_called_once_with( mock.sentinel.context, @@ -3128,13 +3128,13 @@ def test_report_migration_minions_allocation_error_unexpected_status( self.server.report_migration_minions_allocation_error), self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.minion_allocation_error_details ) mock_get_migration.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id + mock.sentinel.transfer_id ) mock_get_execution_for_migration.assert_not_called() mock_cancel_tasks_execution.assert_not_called() @@ -3246,10 +3246,10 @@ def call_cancel_tasks_execution( exception_details=mock.ANY, ) - @mock.patch.object(db_api, 'get_migration') + @mock.patch.object(db_api, 'get_deployment') def test__get_migration( self, - mock_get_migration + mock_get_deployment ): result = self.server._get_migration( mock.sentinel.context, @@ -3258,18 +3258,18 @@ def test__get_migration( to_dict=False ) self.assertEqual( - mock_get_migration.return_value, + mock_get_deployment.return_value, result ) - mock_get_migration.assert_called_once_with( + mock_get_deployment.assert_called_once_with( mock.sentinel.context, mock.sentinel.migration_id, include_task_info=False, to_dict=False ) - mock_get_migration.reset_mock() - mock_get_migration.return_value = None + mock_get_deployment.reset_mock() + mock_get_deployment.return_value = None self.assertRaises( exception.NotFound, @@ -3280,7 +3280,7 @@ def test__get_migration( to_dict=False ) - mock_get_migration.assert_called_once_with( + mock_get_deployment.assert_called_once_with( mock.sentinel.context, mock.sentinel.migration_id, include_task_info=False, @@ -4003,14 +4003,14 @@ def test_update_replica_volumes_info( ): self.server._update_replica_volumes_info( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.instance, mock.sentinel.updated_task_info ) mock_update_transfer_action_info_for_instance.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.instance, mock.sentinel.updated_task_info ) @@ -4018,15 +4018,15 @@ def test_update_replica_volumes_info( @mock.patch.object(server.ConductorServerEndpoint, '_update_replica_volumes_info') @mock.patch.object(lockutils, 'lock') - @mock.patch.object(db_api, 'get_migration') + @mock.patch.object(db_api, 'get_deployment') def test_update_volumes_info_for_migration_parent_replica( self, - mock_get_migration, + mock_get_deployment, mock_lock, mock_update_replica_volumes_info ): - migration = mock.Mock() - mock_get_migration.return_value = migration + deployment = mock.Mock() + mock_get_deployment.return_value = deployment self.server._update_volumes_info_for_migration_parent_replica( mock.sentinel.context, @@ -4035,18 +4035,18 @@ def test_update_volumes_info_for_migration_parent_replica( mock.sentinel.updated_task_info ) - mock_get_migration.assert_called_once_with( + mock_get_deployment.assert_called_once_with( mock.sentinel.context, mock.sentinel.migration_id ) mock_lock.assert_called_once_with( constants.REPLICA_LOCK_NAME_FORMAT % - mock_get_migration.return_value.replica_id, + mock_get_deployment.return_value.transfer_id, external=True ) mock_update_replica_volumes_info.assert_called_once_with( mock.sentinel.context, - mock_get_migration.return_value.replica_id, + mock_get_deployment.return_value.transfer_id, mock.sentinel.instance, mock.sentinel.updated_task_info ) @@ -4056,7 +4056,7 @@ def test_update_volumes_info_for_migration_parent_replica( '_minion_manager_client' ) @mock.patch.object(db_api, 'update_minion_machine') - @mock.patch.object(db_api, 'update_replica') + @mock.patch.object(db_api, 'update_transfer') @mock.patch.object( server.ConductorServerEndpoint, '_update_replica_volumes_info' @@ -4073,7 +4073,7 @@ def test_handle_post_task_actions( mock_validate_value, mock_set_transfer_action_result, mock_update_replica_volumes_info, - mock_update_replica, + mock_update_transfer, mock_update_minion_machine, mock_minion_manager_client, ): @@ -4212,7 +4212,7 @@ def call_handle_post_task_actions(): # execution has active tasks task.type = constants.TASK_TYPE_UPDATE_DESTINATION_REPLICA call_handle_post_task_actions() - mock_update_replica.assert_not_called() + mock_update_transfer.assert_not_called() # execution has no active tasks execution.tasks = [ @@ -4222,12 +4222,12 @@ def call_handle_post_task_actions(): ) ] call_handle_post_task_actions() - mock_update_replica.assert_called_once_with( + mock_update_transfer.assert_called_once_with( mock.sentinel.context, mock.sentinel.action_id, task_info ) - mock_update_replica.reset_mock() + mock_update_transfer.reset_mock() # TASK_TYPE_ATTACH_VOLUMES_TO_SOURCE_MINION # TASK_TYPE_DETACH_VOLUMES_FROM_SOURCE_MINION @@ -4336,7 +4336,7 @@ def call_handle_post_task_actions(): # for any other type of task nothing is called task.task_type = constants.TASK_TYPE_COLLECT_OSMORPHING_INFO call_handle_post_task_actions() - mock_update_replica.assert_not_called() + mock_update_transfer.assert_not_called() mock_update_minion_machine.assert_not_called() mock_minion_manager_client.deallocate_minion_machine\ .assert_not_called() @@ -4732,69 +4732,69 @@ def test_update_task_progress_update( new_message=mock.sentinel.new_message, ) - @mock.patch.object(db_api, "get_replica_schedule") + @mock.patch.object(db_api, "get_transfer_schedule") def test__get_replica_schedule( self, - mock_get_replica_schedule + mock_get_transfer_schedule ): result = self.server._get_replica_schedule( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.schedule_id, expired=True ) self.assertEqual( - mock_get_replica_schedule.return_value, + mock_get_transfer_schedule.return_value, result ) - mock_get_replica_schedule.assert_called_once_with( + mock_get_transfer_schedule.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.schedule_id, expired=True ) - mock_get_replica_schedule.reset_mock() - mock_get_replica_schedule.return_value = None + mock_get_transfer_schedule.reset_mock() + mock_get_transfer_schedule.return_value = None self.assertRaises( exception.NotFound, self.server._get_replica_schedule, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.schedule_id, expired=False ) - mock_get_replica_schedule.assert_called_once_with( + mock_get_transfer_schedule.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.schedule_id, expired=False ) @mock.patch.object(server.ConductorServerEndpoint, "get_replica_schedule") - @mock.patch.object(db_api, "add_replica_schedule") - @mock.patch.object(models, "ReplicaSchedule") + @mock.patch.object(db_api, "add_transfer_schedule") + @mock.patch.object(models, "TransferSchedule") @mock.patch.object(server.ConductorServerEndpoint, "_get_replica") @mock.patch.object(keystone, "create_trust") def test_create_replica_schedule( self, mock_create_trust, mock_get_replica, - mock_ReplicaSchedule, - mock_add_replica_schedule, + mock_transfer_schedule, + mock_add_transfer_schedule, mock_get_replica_schedule ): context = mock.Mock() - replica_schedule = mock.Mock() + transfer_schedule = mock.Mock() context.trust_id = mock.sentinel.trust_id - mock_ReplicaSchedule.return_value = replica_schedule + mock_transfer_schedule.return_value = transfer_schedule result = self.server.create_replica_schedule( context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.schedule, mock.sentinel.enabled, mock.sentinel.exp_date, @@ -4807,17 +4807,17 @@ def test_create_replica_schedule( ) self.assertEqual( ( - replica_schedule.replica, - replica_schedule.replica_id, - replica_schedule.schedule, - replica_schedule.expiration_date, - replica_schedule.enabled, - replica_schedule.shutdown_instance, - replica_schedule.trust_id + transfer_schedule.transfer, + transfer_schedule.transfer_id, + transfer_schedule.schedule, + transfer_schedule.expiration_date, + transfer_schedule.enabled, + transfer_schedule.shutdown_instance, + transfer_schedule.trust_id ), ( mock_get_replica.return_value, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.schedule, mock.sentinel.exp_date, mock.sentinel.enabled, @@ -4828,32 +4828,32 @@ def test_create_replica_schedule( mock_create_trust.assert_called_once_with(context) mock_get_replica.assert_called_once_with( context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, ) - mock_ReplicaSchedule.assert_called_once() - mock_add_replica_schedule.assert_called_once_with( + mock_transfer_schedule.assert_called_once() + mock_add_transfer_schedule.assert_called_once_with( context, - replica_schedule, + transfer_schedule, mock.ANY ) mock_get_replica_schedule.assert_called_once_with( context, - mock.sentinel.replica_id, - replica_schedule.id + mock.sentinel.transfer_id, + transfer_schedule.id ) @mock.patch.object(server.ConductorServerEndpoint, "_get_replica_schedule") - @mock.patch.object(db_api, "update_replica_schedule") + @mock.patch.object(db_api, "update_transfer_schedule") def test_update_replica_schedule( self, - mock_update_replica_schedule, + mock_update_transfer_schedule, mock_get_replica_schedule ): result = testutils.get_wrapped_function( self.server.update_replica_schedule)( self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.schedule_id, mock.sentinel.updated_values, ) @@ -4862,9 +4862,9 @@ def test_update_replica_schedule( mock_get_replica_schedule.return_value, result ) - mock_update_replica_schedule.assert_called_once_with( + mock_update_transfer_schedule.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.schedule_id, mock.sentinel.updated_values, None, @@ -4872,7 +4872,7 @@ def test_update_replica_schedule( ) mock_get_replica_schedule.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.schedule_id, ) @@ -4917,12 +4917,12 @@ def test_cleanup_schedule_resources( mock_delete_trust.assert_called_once_with( mock_get_admin_context.return_value) - @mock.patch.object(db_api, "delete_replica_schedule") + @mock.patch.object(db_api, "delete_transfer_schedule") @mock.patch.object(server.ConductorServerEndpoint, "_get_replica") def test_delete_replica_schedule( self, mock_get_replica, - mock_delete_replica_schedule + mock_delete_transfer_schedule ): replica = mock.Mock() replica.last_execution_status = constants.EXECUTION_STATUS_COMPLETED @@ -4931,24 +4931,24 @@ def test_delete_replica_schedule( testutils.get_wrapped_function(self.server.delete_replica_schedule)( self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.schedule_id ) mock_get_replica.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id + mock.sentinel.transfer_id ) - mock_delete_replica_schedule.assert_called_once_with( + mock_delete_transfer_schedule.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.schedule_id, None, mock.ANY ) mock_get_replica.reset_mock() - mock_delete_replica_schedule.reset_mock() + mock_delete_transfer_schedule.reset_mock() replica.last_execution_status = constants.EXECUTION_STATUS_RUNNING self.assertRaises( @@ -4957,18 +4957,18 @@ def test_delete_replica_schedule( self.server.delete_replica_schedule), self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.schedule_id ) mock_get_replica.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id + mock.sentinel.transfer_id ) - mock_delete_replica_schedule.assert_not_called() + mock_delete_transfer_schedule.assert_not_called() - @mock.patch.object(db_api, "get_replica_schedules") - def test_get_replica_schedules(self, mock_get_replica_schedules): + @mock.patch.object(db_api, "get_transfer_schedules") + def test_get_replica_schedules(self, mock_get_transfer_schedules): result = testutils.get_wrapped_function( self.server.get_replica_schedules)( self.server, @@ -4978,33 +4978,33 @@ def test_get_replica_schedules(self, mock_get_replica_schedules): ) self.assertEqual( - mock_get_replica_schedules.return_value, + mock_get_transfer_schedules.return_value, result ) - mock_get_replica_schedules.assert_called_once_with( + mock_get_transfer_schedules.assert_called_once_with( mock.sentinel.context, - replica_id=None, + transfer_id=None, expired=True ) - @mock.patch.object(db_api, "get_replica_schedule") - def test_get_replica_schedule(self, mock_get_replica_schedule): + @mock.patch.object(db_api, "get_transfer_schedule") + def test_get_replica_schedule(self, mock_get_transfer_schedule): result = testutils.get_wrapped_function( self.server.get_replica_schedule)( self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.schedule_id, expired=True ) self.assertEqual( - mock_get_replica_schedule.return_value, + mock_get_transfer_schedule.return_value, result ) - mock_get_replica_schedule.assert_called_once_with( + mock_get_transfer_schedule.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, mock.sentinel.schedule_id, expired=True ) @@ -5012,7 +5012,7 @@ def test_get_replica_schedule(self, mock_get_replica_schedule): @mock.patch.object(server.ConductorServerEndpoint, "get_replica_tasks_execution") @mock.patch.object(server.ConductorServerEndpoint, "_begin_tasks") - @mock.patch.object(db_api, "add_replica_tasks_execution") + @mock.patch.object(db_api, "add_transfer_tasks_execution") @mock.patch.object(db_api, "update_transfer_action_info_for_instance") @mock.patch.object(server.ConductorServerEndpoint, "_check_execution_tasks_sanity") @@ -5025,14 +5025,14 @@ def test_get_replica_schedule(self, mock_get_replica_schedule): "_check_replica_running_executions") @mock.patch.object(server.ConductorServerEndpoint, "_check_minion_pools_for_action") - @mock.patch.object(models, "Replica") + @mock.patch.object(models, "Transfer") @mock.patch.object(server.ConductorServerEndpoint, "_get_replica") @ddt.file_data("data/update_replica_config.yml") @ddt.unpack def test_update_replica( self, mock_get_replica, - mock_Replica, + mock_transfer, mock_check_minion_pools_for_action, mock_check_replica_running_executions, mock_check_valid_replica_tasks_execution, @@ -5041,27 +5041,27 @@ def test_update_replica( mock_create_task, mock_check_execution_tasks_sanity, mock_update_transfer_action_info_for_instance, - mock_add_replica_tasks_execution, + mock_add_transfer_tasks_execution, mock_begin_tasks, mock_get_replica_tasks_execution, config, has_updated_values, has_replica_instance ): - replica = mock.Mock() + transfer = mock.Mock() dummy = mock.Mock() execution = mock.Mock() - replica.instances = config['replica'].get("instances", []) - replica.info = config['replica'].get("info", {}) - mock_get_replica.return_value = replica - mock_Replica.return_value = dummy + transfer.instances = config['replica'].get("instances", []) + transfer.info = config['replica'].get("info", {}) + mock_get_replica.return_value = transfer + mock_transfer.return_value = dummy mock_TasksExecution.return_value = execution updated_properties = config.get("updated_properties", {}) result = testutils.get_wrapped_function(self.server.update_replica)( self.server, mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, updated_properties ) @@ -5071,7 +5071,7 @@ def test_update_replica( ) mock_get_replica.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, include_task_info=True ) mock_check_replica_running_executions.assert_called_once_with( @@ -5088,20 +5088,20 @@ def test_update_replica( ) mock_check_execution_tasks_sanity.assert_called_once_with( execution, - replica.info + transfer.info ) - mock_add_replica_tasks_execution.assert_called_once_with( + mock_add_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, execution ) mock_begin_tasks.assert_called_once_with( mock.sentinel.context, - replica, + transfer, execution ) mock_get_replica_tasks_execution.assert_called_once_with( mock.sentinel.context, - mock.sentinel.replica_id, + mock.sentinel.transfer_id, execution.id ) if has_updated_values: @@ -5119,7 +5119,7 @@ def test_update_replica( expected_sanitize_task_info_calls.append( mock.call(mock.ANY)) expected_sanitize_task_info_calls.append( - mock.call(replica.info[instance])) + mock.call(transfer.info[instance])) create_task_calls.append(mock.call( instance, constants.TASK_TYPE_GET_INSTANCE_INFO, @@ -5136,9 +5136,9 @@ def test_update_replica( update_transfer_action_info_for_instance_calls.append( mock.call( mock.sentinel.context, - replica.id, + transfer.id, instance, - replica.info[instance]) + transfer.info[instance]) ) mock_sanitize_task_info.assert_has_calls( expected_sanitize_task_info_calls) diff --git a/coriolis/tests/replica_cron/rpc/test_server.py b/coriolis/tests/replica_cron/rpc/test_server.py index ba06523a..1fdb3ca3 100644 --- a/coriolis/tests/replica_cron/rpc/test_server.py +++ b/coriolis/tests/replica_cron/rpc/test_server.py @@ -27,10 +27,10 @@ def test__trigger_replica(self): result = server._trigger_replica( mock.sentinel.ctxt, mock_conductor_client, - mock.sentinel.replica_id, False) + mock.sentinel.transfer_id, False) mock_conductor_client.execute_replica_tasks.assert_called_once_with( - mock.sentinel.ctxt, mock.sentinel.replica_id, False) + mock.sentinel.ctxt, mock.sentinel.transfer_id, False) self.assertEqual( result, 'Execution %s for Replica %s' % ( diff --git a/coriolis/tests/replica_cron/test_api.py b/coriolis/tests/replica_cron/test_api.py index a78434e4..a770b4e2 100644 --- a/coriolis/tests/replica_cron/test_api.py +++ b/coriolis/tests/replica_cron/test_api.py @@ -16,7 +16,7 @@ def setUp(self): self.rpc_client = mock.MagicMock() self.api._rpc_client = self.rpc_client self.ctxt = mock.sentinel.ctxt - self.replica_id = mock.sentinel.replica_id + self.replica_id = mock.sentinel.transfer_id self.schedule_id = mock.sentinel.schedule_id def test_create(self): diff --git a/coriolis/tests/replica_tasks_executions/test_api.py b/coriolis/tests/replica_tasks_executions/test_api.py index 3ae28f41..077e2876 100644 --- a/coriolis/tests/replica_tasks_executions/test_api.py +++ b/coriolis/tests/replica_tasks_executions/test_api.py @@ -16,7 +16,7 @@ def setUp(self): self.rpc_client = mock.MagicMock() self.api._rpc_client = self.rpc_client self.ctxt = mock.sentinel.ctxt - self.replica_id = mock.sentinel.replica_id + self.replica_id = mock.sentinel.transfer_id self.execution_id = mock.sentinel.execution_id def test_create(self): diff --git a/coriolis/tests/replicas/test_api.py b/coriolis/tests/replicas/test_api.py index 3611ffb9..a948da8f 100644 --- a/coriolis/tests/replicas/test_api.py +++ b/coriolis/tests/replicas/test_api.py @@ -16,7 +16,7 @@ def setUp(self): self.rpc_client = mock.MagicMock() self.api._rpc_client = self.rpc_client self.ctxt = mock.sentinel.ctxt - self.replica_id = mock.sentinel.replica_id + self.replica_id = mock.sentinel.transfer_id def test_create(self): origin_endpoint_id = mock.sentinel.origin_endpoint_id From 3bad643c8ec3cfdce44d6ea790acd5ff37970100 Mon Sep 17 00:00:00 2001 From: Daniel Vincze Date: Tue, 5 Nov 2024 12:54:09 +0200 Subject: [PATCH 22/24] Add DB API unit tests --- coriolis/db/api.py | 2 +- coriolis/providers/backup_writers.py | 2 +- coriolis/tests/db/sqlalchemy/test_api.py | 4 + coriolis/tests/db/sqlalchemy/test_models.py | 27 +- coriolis/tests/db/test_api.py | 1112 ++++++++++++++++++- requirements.txt | 1 + tox.ini | 2 +- 7 files changed, 1119 insertions(+), 31 deletions(-) diff --git a/coriolis/db/api.py b/coriolis/db/api.py index 06361c6a..cd5111e7 100644 --- a/coriolis/db/api.py +++ b/coriolis/db/api.py @@ -435,7 +435,7 @@ def get_transfers(context, q = q.options(orm.undefer('info')) q = q.filter() if transfer_scenario: - q.filter(models.Transfer.scenario == transfer_scenario) + q = q.filter(models.Transfer.scenario == transfer_scenario) if is_user_context(context): q = q.filter( models.Transfer.project_id == context.project_id) diff --git a/coriolis/providers/backup_writers.py b/coriolis/providers/backup_writers.py index fede3a35..d746a160 100644 --- a/coriolis/providers/backup_writers.py +++ b/coriolis/providers/backup_writers.py @@ -254,7 +254,7 @@ def from_connection_info(cls, info, volumes_info): class SSHBackupWriterImpl(BaseBackupWriterImpl): - def __init__(self, path, disk_id, compress_transfer=None, + def __init__(self, path, disk_id, compress_transfer=False, encoder_count=3): self._msg_id = None self._stdin = None diff --git a/coriolis/tests/db/sqlalchemy/test_api.py b/coriolis/tests/db/sqlalchemy/test_api.py index 2838dc29..de79b5d8 100644 --- a/coriolis/tests/db/sqlalchemy/test_api.py +++ b/coriolis/tests/db/sqlalchemy/test_api.py @@ -15,6 +15,10 @@ class DatabaseSqlalchemyApiTestCase(test_base.CoriolisBaseTestCase): """Test suite for the Coriolis Database Sqlalchemy api.""" + def tearDown(self): + api._facade = None + super(DatabaseSqlalchemyApiTestCase, self).tearDown() + @mock.patch.object(db_session, 'EngineFacade') def test_get_facade_none(self, mock_EngineFacade): cfg.CONF.database.connection = mock.sentinel.connection diff --git a/coriolis/tests/db/sqlalchemy/test_models.py b/coriolis/tests/db/sqlalchemy/test_models.py index faee2c0b..b2209159 100644 --- a/coriolis/tests/db/sqlalchemy/test_models.py +++ b/coriolis/tests/db/sqlalchemy/test_models.py @@ -324,10 +324,10 @@ class ReplicaTestCase(test_base.CoriolisBaseTestCase): """Test suite for the Coriolis Database Sqlalchemy Replica.""" def test_to_dict(self): - replica = models.Replica() - replica.id = mock.sentinel.id + transfer = models.Transfer() + transfer.id = mock.sentinel.id - result = replica.to_dict() + result = transfer.to_dict() self.assertEqual( mock.sentinel.id, @@ -335,23 +335,24 @@ def test_to_dict(self): ) -class MigrationTestCase(test_base.CoriolisBaseTestCase): - """Test suite for the Coriolis Database Sqlalchemy Migration.""" +class DeploymentTestCase(test_base.CoriolisBaseTestCase): + """Test suite for the Coriolis Database Sqlalchemy Deployment.""" def test_to_dict(self): - migration = models.Migration() - migration.id = mock.sentinel.id - migration.replica_id = mock.sentinel.replica_id - migration.shutdown_instances = mock.sentinel.shutdown_instances - migration.replication_count = mock.sentinel.replication_count + transfer = mock.MagicMock(id=mock.sentinel.transfer_id) + deployment = models.Deployment() + deployment.id = mock.sentinel.id + deployment.transfer_id = transfer.id + deployment.transfer = transfer + deployment.shutdown_instances = mock.sentinel.shutdown_instances expected_result = { "id": mock.sentinel.id, - "replica_id": mock.sentinel.replica_id, + "transfer_id": mock.sentinel.transfer_id, "shutdown_instances": mock.sentinel.shutdown_instances, - "replication_count": mock.sentinel.replication_count, + "transfer_scenario_type": transfer.scenario, } - result = migration.to_dict() + result = deployment.to_dict() assert all(item in result.items() for item in expected_result.items()) diff --git a/coriolis/tests/db/test_api.py b/coriolis/tests/db/test_api.py index 9c3980ec..76fd5f03 100644 --- a/coriolis/tests/db/test_api.py +++ b/coriolis/tests/db/test_api.py @@ -1,29 +1,1111 @@ # Copyright 2017 Cloudbase Solutions Srl # All Rights Reserved. - +import datetime from unittest import mock +import uuid + +import ddt +from oslo_utils import timeutils +import sqlalchemy.orm +from coriolis import constants from coriolis.db import api +from coriolis.db.sqlalchemy import api as sqlalchemy_api +from coriolis.db.sqlalchemy import models from coriolis import exception from coriolis.tests import test_base from coriolis.tests import testutils +CONTEXT_MOCK = mock.MagicMock() +DEFAULT_INSTANCE = "instance1" +DEFAULT_USER_ID = "1" +DEFAULT_PROJECT_ID = "1" +DEFAULT_TASK_INFO = {DEFAULT_INSTANCE: {"volumes_info": []}} +DEFAULT_EXECUTION_STATUS = constants.EXECUTION_STATUS_RUNNING + + +def get_valid_endpoint( + endpoint_id=None, user_id=DEFAULT_USER_ID, + project_id=DEFAULT_PROJECT_ID, connection_info=None, + endpoint_type="openstack", name="test_name", + description="Endpoint Description"): + if endpoint_id is None: + endpoint_id = str(uuid.uuid4()) + if connection_info is None: + connection_info = {"conn_info": {"secret": "info"}} + + endpoint = models.Endpoint() + endpoint.id = endpoint_id + endpoint.user_id = user_id + endpoint.project_id = project_id + endpoint.connection_info = connection_info + endpoint.type = endpoint_type + endpoint.name = name + endpoint.description = description + + return endpoint + + +def create_valid_tasks_execution(): + valid_tasks_execution = models.TasksExecution() + valid_tasks_execution.id = str(uuid.uuid4()) + valid_tasks_execution.status = DEFAULT_EXECUTION_STATUS + valid_tasks_execution.type = constants.EXECUTION_TYPE_REPLICA_EXECUTION + valid_tasks_execution.number = 1 + + valid_task = models.Task() + valid_task.id = str(uuid.uuid4()) + valid_task.execution = valid_tasks_execution + valid_task.instance = DEFAULT_INSTANCE + valid_task.status = constants.TASK_STATUS_RUNNING + valid_task.task_type = ( + constants.TASK_TYPE_VALIDATE_REPLICA_SOURCE_INPUTS) + valid_task.index = 1 + valid_task.on_error = False + + valid_progress_update = models.TaskProgressUpdate() + valid_progress_update.id = str(uuid.uuid4()) + valid_progress_update.task = valid_task + valid_progress_update.index = 1 + valid_progress_update.current_step = 0 + + valid_task_event = models.TaskEvent() + valid_task_event.id = str(uuid.uuid4()) + valid_task_event.task = valid_task + valid_task_event.level = constants.TASK_EVENT_INFO + valid_task_event.index = 1 + valid_task_event.message = "event message test" + return valid_tasks_execution + + +class BaseDBAPITestCase(test_base.CoriolisBaseTestCase): + + valid_data = { + "user_scope": {}, + "outer_scope": {} + } + + @classmethod + def setup_scoped_data(cls, region_id, project_id="1"): + data = dict() + valid_endpoint_source = get_valid_endpoint( + endpoint_type='vmware', project_id=project_id) + cls.session.add(valid_endpoint_source) + data['source_endpoint'] = valid_endpoint_source + valid_endpoint_destination = get_valid_endpoint( + endpoint_type='openstack', project_id=project_id) + cls.session.add(valid_endpoint_destination) + data['destination_endpoint'] = valid_endpoint_destination + + valid_endpoint_region_mapping = models.EndpointRegionMapping() + valid_endpoint_region_mapping.id = str(uuid.uuid4()) + valid_endpoint_region_mapping.endpoint_id = valid_endpoint_source.id + valid_endpoint_region_mapping.region_id = region_id + cls.session.add(valid_endpoint_region_mapping) + data['endpoint_mapping'] = valid_endpoint_region_mapping + + valid_transfer = models.Transfer() + valid_transfer.id = str(uuid.uuid4()) + valid_transfer.user_id = project_id + valid_transfer.project_id = project_id + valid_transfer.base_id = valid_transfer.id + valid_transfer.scenario = constants.REPLICA_SCENARIO_REPLICA + valid_transfer.last_execution_status = DEFAULT_EXECUTION_STATUS + valid_transfer.executions = [] + valid_transfer.instances = [DEFAULT_INSTANCE] + valid_transfer.info = DEFAULT_TASK_INFO + valid_transfer.origin_endpoint_id = valid_endpoint_source.id + valid_transfer.destination_endpoint_id = valid_endpoint_destination.id + cls.session.add(valid_transfer) + data['transfer'] = valid_transfer + + valid_tasks_execution = create_valid_tasks_execution() + valid_tasks_execution.action = valid_transfer + cls.session.add(valid_tasks_execution) + data['tasks_execution'] = valid_tasks_execution + data['task'] = valid_tasks_execution.tasks[0] + + valid_transfer_schedule = models.TransferSchedule() + valid_transfer_schedule.id = str(uuid.uuid4()) + valid_transfer_schedule.transfer = valid_transfer + valid_transfer_schedule.schedule = {} + valid_transfer_schedule.expiration_date = timeutils.utcnow() + valid_transfer_schedule.enabled = True + valid_transfer_schedule.shutdown_instance = False + valid_transfer_schedule.trust_id = str(uuid.uuid4()) + cls.session.add(valid_transfer_schedule) + data['transfer_schedule'] = valid_transfer_schedule + + valid_deployment = models.Deployment() + valid_deployment.id = str(uuid.uuid4()) + valid_deployment.user_id = project_id + valid_deployment.project_id = project_id + valid_deployment.base_id = valid_deployment.id + valid_deployment.last_execution_status = DEFAULT_EXECUTION_STATUS + valid_deployment.instances = [DEFAULT_INSTANCE] + valid_deployment.info = DEFAULT_TASK_INFO + valid_deployment.origin_endpoint_id = valid_endpoint_source.id + valid_deployment.destination_endpoint_id = ( + valid_endpoint_destination.id) + valid_deployment.transfer = valid_transfer + + deployment_execution = create_valid_tasks_execution() + deployment_execution.action = valid_deployment + cls.session.add(valid_deployment) + data['deployment'] = valid_deployment + data['deployment_execution'] = deployment_execution + + return data + + @classmethod + def setup_database_data(cls): + cls.valid_region = models.Region() + cls.valid_region.id = str(uuid.uuid4()) + cls.valid_region.name = "region1" + cls.valid_region.enabled = True + cls.session.add(cls.valid_region) + + cls.valid_data['user_scope'] = cls.setup_scoped_data( + cls.valid_region.id) + cls.valid_data['outer_scope'] = cls.setup_scoped_data( + cls.valid_region.id, project_id="2") + cls.session.commit() + + @classmethod + def setUpClass(cls): + super(BaseDBAPITestCase, cls).setUpClass() + with mock.patch.object(sqlalchemy_api, 'CONF') as mock_conf: + mock_conf.database.connection = "sqlite://" + engine = api.get_engine() + models.BASE.metadata.create_all(engine) + cls.session = api.get_session() + cls.setup_database_data() + + def setUp(self): + super(BaseDBAPITestCase, self).setUp() + self.context = CONTEXT_MOCK + self.context.session = self.session + self.context.show_deleted = False + self.context.user = DEFAULT_USER_ID + self.context.project_id = DEFAULT_PROJECT_ID + self.context.is_admin = False + + def tearDown(self): + self.context.reset_mock() + super(BaseDBAPITestCase, self).tearDown() + + @classmethod + def tearDownClass(cls): + cls.session.rollback() + cls.session.close() + super(BaseDBAPITestCase, cls).tearDownClass() + + +@ddt.ddt +class DBAPITestCase(BaseDBAPITestCase): + """Test suite for the common Coriolis DB API.""" + + def test_get_engine(self): + self.assertEqual(api.get_engine(), api.IMPL.get_engine()) + + def test_get_session(self): + self.assertIsInstance(api.get_session(), sqlalchemy.orm.Session) + + @mock.patch.object(api, 'IMPL') + def test_db_sync(self, mock_impl): + self.assertEqual( + api.db_sync(mock.sentinel.engine, version=mock.sentinel.version), + mock_impl.db_sync.return_value) + mock_impl.db_sync.assert_called_once_with( + mock.sentinel.engine, version=mock.sentinel.version) + + @mock.patch.object(api, 'IMPL') + def test_db_version(self, mock_impl): + self.assertEqual( + api.db_version(mock.sentinel.engine), + mock_impl.db_version.return_value) + mock_impl.db_version.assert_called_once_with(mock.sentinel.engine) + + def test__session(self): + self.assertEqual(api._session(self.context), self.context.session) + + @mock.patch.object(api, 'get_session') + def test__session_no_context(self, mock_get_session): + self.assertEqual( + api._session(None), + mock_get_session.return_value) + + @mock.patch.object(api, 'get_session') + def test__session_sessionless_context(self, mock_get_session): + context = mock.Mock(session=None) + self.assertEqual( + api._session(context), + mock_get_session.return_value) + + @ddt.data( + {"kwargs": None, "expected_result": False}, + {"kwargs": {}, "expected_result": False}, + {"kwargs": {"user_id": None}, "expected_result": False}, + {"kwargs": {"user_id": "1", "project_id": None}, + "expected_result": False}, + {"kwargs": {"user_id": "1", "project_id": "1", "is_admin": True}, + "expected_result": False}, + {"kwargs": {"user_id": "1", "project_id": "1", "is_admin": False}, + "expected_result": True}, + ) + def test_is_user_context(self, data): + kwargs = data.get('kwargs') + if kwargs is None: + context = None + else: + context = mock.Mock(**data.get('kwargs', {})) + self.assertEqual( + api.is_user_context(context), data.get('expected_result')) + + @mock.patch.object(api, '_session') + def test__model_query(self, mock_session): + self.assertEqual( + api._model_query(mock.sentinel.context, mock.sentinel.model), + mock_session.return_value.query.return_value) + mock_session.assert_called_once_with( + mock.sentinel.context) + mock_session.return_value.query.assert_called_once_with( + mock.sentinel.model) + + def test__update_sqlalchemy_object_fields_non_dict_values(self): + self.assertRaises( + exception.InvalidInput, api._update_sqlalchemy_object_fields, + mock.ANY, mock.ANY, None) + + def test__update_sqlalchemy_object_fields_conflict(self): + updateable_fields = ["field1", "field2"] + values_to_update = {"field1": "value1", "field3": "value3"} + self.assertRaises( + exception.Conflict, api._update_sqlalchemy_object_fields, + mock.ANY, updateable_fields, values_to_update) + + def test__update_sqlalchemy_object_fields_invalid_obj_field(self): + self.assertRaises( + exception.InvalidInput, api._update_sqlalchemy_object_fields, + models.Endpoint, ["invalid_field"], {"invalid_field": "new_value"}) + + def test__update_sqlalchemy_object_fields(self): + obj = models.Endpoint() + obj.description = "initial test description" + new_description = "updated test description" + + api._update_sqlalchemy_object_fields( + obj, ["description"], {"description": new_description}) + self.assertEqual(obj.description, new_description) + + def test__soft_delete_aware_query_show_deleted_kwarg(self): + valid_endpoint = get_valid_endpoint() + self.session.add(valid_endpoint) + self.session.commit() + + testutils.get_wrapped_function(api.delete_endpoint)( + self.context, valid_endpoint.id) + self.context.show_deleted = False + result = api._soft_delete_aware_query( + self.context, models.Endpoint, show_deleted=True).filter( + models.Endpoint.id == valid_endpoint.id).first() + self.assertEqual(result.id, valid_endpoint.id) + self.assertIsNotNone(result.deleted_at) + + def test__soft_delete_aware_query_context_show_deleted(self): + valid_endpoint = get_valid_endpoint() + self.session.add(valid_endpoint) + self.session.commit() + + testutils.get_wrapped_function(api.delete_endpoint)( + self.context, valid_endpoint.id) + self.context.show_deleted = True + result = api._soft_delete_aware_query( + self.context, models.Endpoint).filter( + models.Endpoint.id == valid_endpoint.id).first() + self.assertEqual(result.id, valid_endpoint.id) + self.assertIsNotNone(result.deleted_at) + + +class EndpointDBAPITestCase(BaseDBAPITestCase): + + @classmethod + def setUpClass(cls): + super(EndpointDBAPITestCase, cls).setUpClass() + cls.valid_endpoint_source = cls.valid_data['user_scope'].get( + 'source_endpoint') + cls.valid_endpoint_region_mapping = cls.valid_data['user_scope'].get( + 'endpoint_mapping') + cls.outer_scope_endpoint = cls.valid_data['outer_scope'].get( + 'source_endpoint') + + def test_get_endpoints(self): + result = api.get_endpoints(self.context) + self.assertIn(self.valid_endpoint_source, result) + + def test_get_endpoints_admin(self): + self.context.is_admin = True + result = api.get_endpoints(self.context) + self.assertIn(self.outer_scope_endpoint, result) + + def test_get_endpoints_out_of_user_scope(self): + result = api.get_endpoints(self.context) + self.assertNotIn(self.outer_scope_endpoint, result) + + def test_get_endpoint(self): + result = api.get_endpoint(self.context, self.valid_endpoint_source.id) + self.assertEqual(result, self.valid_endpoint_source) + + def test_get_endpoint_admin_context(self): + self.context.is_admin = True + result = api.get_endpoint(self.context, self.outer_scope_endpoint.id) + self.assertEqual(result, self.outer_scope_endpoint) + + def test_get_endpoint_out_of_user_scope(self): + result = api.get_endpoint(self.context, self.outer_scope_endpoint.id) + self.assertIsNone(result) + + def test_add_endpoint(self): + self.context.user = "2" + self.context.project_id = "2" + new_endpoint_id = str(uuid.uuid4()) + new_endpoint = get_valid_endpoint( + endpoint_id=new_endpoint_id, + connection_info={"conn_info": {"new": "info"}}, + endpoint_type="vmware", name="new_endpoint", + description="New Endpoint") + api.add_endpoint(self.context, new_endpoint) + result = api.get_endpoint(self.context, new_endpoint_id) + self.assertEqual(result, new_endpoint) + + def test_update_endpoint_not_found(self): + self.assertRaises( + exception.NotFound, api.update_endpoint, + self.context, "invalid_id", mock.ANY) + + def test_update_endpoint_invalid_values(self): + self.assertRaises( + exception.InvalidInput, api.update_endpoint, + self.context, self.valid_endpoint_source.id, None) + + def test_update_endpoint_invalid_column(self): + self.assertRaises( + exception.Conflict, api.update_endpoint, + self.context, self.valid_endpoint_source.id, {"type": "openstack"}) + + def test_update_endpoint_region_not_found(self): + self.assertRaises( + exception.NotFound, api.update_endpoint, self.context, + self.valid_endpoint_source.id, + {"mapped_regions": ["invalid_region_id"]}) + + def test_update_endpoint(self): + new_region_id = str(uuid.uuid4()) + new_endpoint_name = "new_name" + new_region = models.Region() + new_region.id = new_region_id + new_region.name = "new_region" + new_region.enabled = True + self.session.add(new_region) + self.session.commit() + + api.update_endpoint( + self.context, self.valid_endpoint_source.id, + {"mapped_regions": [new_region_id], "name": new_endpoint_name}) + result = api.get_endpoint(self.context, self.valid_endpoint_source.id) + old_endpoint_region_mapping = api.get_endpoint_region_mapping( + self.context, self.valid_endpoint_source.id, self.valid_region.id) + new_endpoint_region_mapping = api.get_endpoint_region_mapping( + self.context, self.valid_endpoint_source.id, new_region_id)[0] + self.assertEqual(result.name, new_endpoint_name) + self.assertEqual(old_endpoint_region_mapping, []) + self.assertEqual(new_endpoint_region_mapping.region_id, new_region_id) + self.assertEqual( + new_endpoint_region_mapping.endpoint_id, + self.valid_endpoint_source.id) + + @mock.patch.object(api, 'delete_endpoint_region_mapping') + @mock.patch.object(api, 'add_endpoint_region_mapping') + @mock.patch.object(api, 'get_region') + @mock.patch.object(api, '_update_sqlalchemy_object_fields') + def test_update_endpoint_remapping_failure( + self, mock_update_obj, mock_get_region, mock_add_mapping, + mock_delete_mapping): + mock_add_mapping.side_effect = [Exception, None] + + self.assertRaises( + Exception, api.update_endpoint, + self.context, self.valid_endpoint_source.id, + {"mapped_regions": [mock.sentinel.region_id]}) + mock_get_region.assert_called_with( + self.context, mock.sentinel.region_id) + + mock_delete_mapping.side_effect = Exception + mock_update_obj.side_effect = Exception + self.assertRaises( + Exception, api.update_endpoint, self.context, + self.valid_endpoint_source.id, + {"mapped_regions": [mock.sentinel.region_id]}) + + def test_delete_endpoint(self): + new_endpoint = get_valid_endpoint() + new_endpoint_id = new_endpoint.id + new_endpoint_region_mapping = self.valid_endpoint_region_mapping + new_endpoint_region_mapping.endpoint_id = new_endpoint_id + api.add_endpoint(self.context, new_endpoint) + + api.delete_endpoint(self.context, new_endpoint_id) + result = api.get_endpoint(self.context, new_endpoint_id) + mappings = api.get_endpoint_region_mapping( + self.context, new_endpoint_id, self.valid_region.id) + self.assertIsNone(result) + self.assertEqual(mappings, []) + + def test_delete_endpoint_not_found(self): + self.assertRaises( + exception.NotFound, api.delete_endpoint, self.context, "no_id") + + def test_delete_endpoint_admin_context(self): + self.context.is_admin = True + self.context.show_deleted = True + new_outer_scope_endpoint = get_valid_endpoint() + new_outer_scope_endpoint.user_id = "3" + new_outer_scope_endpoint.project_id = "3" + api.add_endpoint(self.context, new_outer_scope_endpoint) + + api.delete_endpoint( + self.context, new_outer_scope_endpoint.id) + result = api.get_endpoint(self.context, new_outer_scope_endpoint.id) + self.assertIsNotNone(result.deleted_at) + + def test_delete_endpoint_out_of_user_scope(self): + new_outer_scope_endpoint = get_valid_endpoint( + user_id="3", project_id="3") + self.session.add(new_outer_scope_endpoint) + self.session.commit() + + self.assertRaises( + exception.NotFound, api.delete_endpoint, self.context, + new_outer_scope_endpoint.id) + + +class TransferTasksExecutionDBAPITestCase(BaseDBAPITestCase): + + @classmethod + def setUpClass(cls): + super(TransferTasksExecutionDBAPITestCase, cls).setUpClass() + cls.valid_transfer = cls.valid_data['user_scope'].get('transfer') + cls.valid_task = cls.valid_data['user_scope'].get('task') + cls.valid_tasks_execution = cls.valid_data['user_scope'].get( + 'tasks_execution') + cls.outer_scope_transfer = cls.valid_data['outer_scope'].get( + 'transfer') + cls.outer_scope_tasks_execution = cls.valid_data['outer_scope'].get( + "tasks_execution") + + def setUp(self): + super(TransferTasksExecutionDBAPITestCase, self).setUp() + self.outer_scope_tasks_execution.status = DEFAULT_EXECUTION_STATUS + self.valid_tasks_execution.status = DEFAULT_EXECUTION_STATUS + + @staticmethod + def _create_dummy_execution(action): + new_tasks_execution = models.TasksExecution() + new_tasks_execution.id = str(uuid.uuid4()) + new_tasks_execution.action = action + new_tasks_execution.status = constants.EXECUTION_STATUS_UNEXECUTED + new_tasks_execution.type = constants.EXECUTION_TYPE_REPLICA_EXECUTION + new_tasks_execution.number = 0 + + return new_tasks_execution + + def test_get_transfer_tasks_executions_include_info(self): + result = api.get_transfer_tasks_executions( + self.context, self.valid_transfer.id, include_task_info=True) + self.assertTrue(hasattr(result[0].action, 'info')) + + def test_get_transfer_tasks_executions_include_tasks(self): + result = api.get_transfer_tasks_executions( + self.context, self.valid_transfer.id, include_tasks=True) + tasks = [] + for e in result: + tasks.extend(e.tasks) + + self.assertIn(self.valid_task, tasks) + + def test_get_transfer_tasks_executions_to_dict(self): + result = api.get_transfer_tasks_executions( + self.context, self.valid_transfer.id, to_dict=True) + execution_ids = [e['id'] for e in result] + self.assertIn(self.valid_tasks_execution.id, execution_ids) + + def test_get_transfer_tasks_executions(self): + result = api.get_transfer_tasks_executions( + self.context, self.valid_transfer.id) + self.assertIn(self.valid_tasks_execution, result) + + def test_get_transfer_tasks_executions_admin(self): + self.context.is_admin = True + result = api.get_transfer_tasks_executions( + self.context, self.outer_scope_transfer.id) + self.assertIn(self.outer_scope_tasks_execution, result) + + def test_get_transfer_tasks_execution_out_of_user_scope(self): + result = api.get_transfer_tasks_executions( + self.context, self.outer_scope_transfer.id) + self.assertEqual(result, []) + + def test_get_transfer_tasks_execution(self): + result = api.get_transfer_tasks_execution( + self.context, self.valid_transfer.id, + self.valid_tasks_execution.id) + self.assertEqual(result, self.valid_tasks_execution) + + def test_get_transfer_tasks_execution_admin(self): + self.context.is_admin = True + result = api.get_transfer_tasks_execution( + self.context, self.outer_scope_transfer.id, + self.outer_scope_tasks_execution.id) + self.assertEqual(result, self.outer_scope_tasks_execution) + + def test_get_transfer_tasks_execution_out_of_user_context(self): + result = api.get_transfer_tasks_execution( + self.context, self.outer_scope_transfer.id, + self.outer_scope_tasks_execution.id) + self.assertIsNone(result) + + def test_get_transfer_tasks_execution_include_task_info(self): + result = api.get_transfer_tasks_execution( + self.context, self.valid_transfer.id, + self.valid_tasks_execution.id, include_task_info=True) + self.assertTrue(hasattr(result.action, 'info')) + + def test_get_transfer_tasks_execution_to_dict(self): + result = api.get_transfer_tasks_execution( + self.context, self.valid_transfer.id, + self.valid_tasks_execution.id, to_dict=True) + self.assertEqual(result['id'], self.valid_tasks_execution.id) + + def test_add_transfer_tasks_execution(self): + new_tasks_execution = self._create_dummy_execution(self.valid_transfer) + + api.add_transfer_tasks_execution(self.context, new_tasks_execution) + result = api.get_transfer_tasks_execution( + self.context, self.valid_transfer.id, new_tasks_execution.id) + self.assertEqual(new_tasks_execution, result) + self.assertGreater(result.number, 0) + + def test_add_transfer_tasks_execution_admin(self): + self.context.is_admin = True + new_tasks_execution = self._create_dummy_execution( + self.outer_scope_transfer) + api.add_transfer_tasks_execution(self.context, new_tasks_execution) + result = api.get_transfer_tasks_execution( + self.context, self.outer_scope_transfer.id, new_tasks_execution.id) + self.assertEqual(new_tasks_execution, result) + + def test_add_transfer_tasks_execution_out_of_user_context(self): + new_tasks_execution = self._create_dummy_execution( + self.outer_scope_transfer) + self.assertRaises( + exception.NotAuthorized, api.add_transfer_tasks_execution, + self.context, new_tasks_execution) + + def test_delete_transfer_tasks_execution(self): + new_tasks_execution = self._create_dummy_execution(self.valid_transfer) + api.add_transfer_tasks_execution(self.context, new_tasks_execution) + api.delete_transfer_tasks_execution( + self.context, new_tasks_execution.id) + result = api.get_transfer_tasks_execution( + self.context, self.valid_transfer.id, new_tasks_execution.id) + self.assertIsNone(result) + + def test_delete_transfer_tasks_execution_admin(self): + self.context.is_admin = True + new_tasks_execution = self._create_dummy_execution( + self.outer_scope_transfer) + api.add_transfer_tasks_execution(self.context, new_tasks_execution) + api.delete_transfer_tasks_execution( + self.context, new_tasks_execution.id) + result = api.get_transfer_tasks_execution( + self.context, self.outer_scope_transfer.id, new_tasks_execution.id) + self.assertIsNone(result) + + def test_delete_transfer_tasks_execution_out_of_user_scope(self): + self.context.is_admin = True + new_tasks_execution = self._create_dummy_execution( + self.outer_scope_transfer) + api.add_transfer_tasks_execution(self.context, new_tasks_execution) + + self.context.is_admin = False + self.assertRaises( + exception.NotAuthorized, api.delete_transfer_tasks_execution, + self.context, new_tasks_execution.id) + + def test_delete_transfer_tasks_execution_not_found(self): + self.context.is_admin = True + self.assertRaises( + exception.NotFound, api.delete_transfer_tasks_execution, + self.context, "invalid_id") + + def test_set_execution_status_admin(self): + self.context.is_admin = True + new_status = constants.EXECUTION_STATUS_COMPLETED + result = api.set_execution_status( + self.context, self.outer_scope_tasks_execution.id, new_status, + update_action_status=False) + self.assertEqual(result.status, new_status) + + def test_set_execution_status_out_of_user_scope(self): + self.assertRaises( + exception.NotFound, api.set_execution_status, self.context, + self.outer_scope_tasks_execution.id, mock.ANY, + update_action_status=False) + + def test_set_execution_status_not_found(self): + self.assertRaises( + exception.NotFound, api.set_execution_status, self.context, + "invalid_id", mock.ANY, + update_action_status=False) + + def test_set_execution_status_update_action_status(self): + new_status = constants.EXECUTION_STATUS_COMPLETED + api.set_execution_status( + self.context, self.valid_tasks_execution.id, new_status) + self.assertEqual(self.valid_transfer.last_execution_status, new_status) + + +class TransferSchedulesDBAPITestCase(BaseDBAPITestCase): + + @classmethod + def setUpClass(cls): + super(TransferSchedulesDBAPITestCase, cls).setUpClass() + cls.valid_transfer_schedule = cls.valid_data['user_scope'].get( + 'transfer_schedule') + cls.valid_transfer = cls.valid_data['user_scope'].get('transfer') + cls.outer_scope_transfer_schedule = cls.valid_data['outer_scope'].get( + 'transfer_schedule') + cls.outer_scope_transfer = cls.valid_data['outer_scope'].get( + 'transfer') + + @staticmethod + def _create_dummy_transfer_schedule(transfer, expiration_date): + ts = models.TransferSchedule() + ts.id = str(uuid.uuid4()) + ts.transfer = transfer + ts.schedule = {} + ts.expiration_date = expiration_date + ts.enabled = True + ts.shutdown_instance = False + ts.trust_id = str(uuid.uuid4()) + + return ts + + def test__get_transfer_schedules_filter(self): + result = api._get_transfer_schedules_filter(self.context).all() + self.assertIn(self.valid_transfer_schedule, result) + + def test__get_transfer_schedules_filter_admin(self): + self.context.is_admin = True + result = api._get_transfer_schedules_filter( + self.context, schedule_id=self.outer_scope_transfer_schedule.id + ).first() + self.assertEqual(result, self.outer_scope_transfer_schedule) + + def test__get_transfer_schedules_filter_out_of_user_context(self): + result = api._get_transfer_schedules_filter( + self.context, schedule_id=self.outer_scope_transfer_schedule.id + ).first() + self.assertIsNone(result) + + def test__get_transfer_schedules_filter_by_transfer(self): + result = api._get_transfer_schedules_filter( + self.context, transfer_id=self.valid_transfer_schedule.transfer_id) + self.assertEqual(result.first(), self.valid_transfer_schedule) + + def test__get_transfer_schedules_filter_by_schedule_id(self): + result = api._get_transfer_schedules_filter( + self.context, schedule_id=self.valid_transfer_schedule.id).first() + self.assertEqual(result, self.valid_transfer_schedule) + + def test__get_transfer_schedules_filter_by_not_expired(self): + expiration_date = timeutils.utcnow() + datetime.timedelta(days=1) + unexpired_transfer_schedule = self._create_dummy_transfer_schedule( + self.valid_transfer, expiration_date=expiration_date) + self.session.add(unexpired_transfer_schedule) + expiration_null_transfer_schedule = ( + self._create_dummy_transfer_schedule( + self.valid_transfer, expiration_date=None)) + self.session.add(expiration_null_transfer_schedule) + result = api._get_transfer_schedules_filter( + self.context, expired=False).all() + self.assertIn(unexpired_transfer_schedule, result) + self.assertIn(expiration_null_transfer_schedule, result) + + def test_get_transfer_schedules(self): + result = api.get_transfer_schedules(self.context) + self.assertIn(self.valid_transfer_schedule, result) + + def test_get_transfer_schedule(self): + result = api.get_transfer_schedule( + self.context, self.valid_transfer.id, + self.valid_transfer_schedule.id) + self.assertEqual(result, self.valid_transfer_schedule) + + def test_update_transfer_schedule(self): + pre_update_mock = mock.Mock() + post_update_mock = mock.Mock() + api.update_transfer_schedule( + self.context, self.valid_transfer.id, + self.valid_transfer_schedule.id, {"shutdown_instance": True}, + pre_update_callable=pre_update_mock, + post_update_callable=post_update_mock) + result = api.get_transfer_schedule( + self.context, self.valid_transfer.id, + self.valid_transfer_schedule.id) + self.assertEqual(result.shutdown_instance, True) + pre_update_mock.assert_called_once_with( + schedule=self.valid_transfer_schedule) + post_update_mock.assert_called_once_with( + self.context, self.valid_transfer_schedule) + + def test_delete_transfer_schedule_not_found(self): + self.assertRaises(exception.NotFound, api.delete_transfer_schedule, + self.context, self.valid_transfer.id, "invalid") + + def test_delete_transfer_schedule_admin(self): + self.context.is_admin = True + outer_scope_schedule = self._create_dummy_transfer_schedule( + self.outer_scope_transfer, None) + self.session.add(outer_scope_schedule) + api.delete_transfer_schedule( + self.context, self.outer_scope_transfer.id, + outer_scope_schedule.id) + result = api.get_transfer_schedule( + self.context, self.outer_scope_transfer.id, + outer_scope_schedule.id) + self.assertIsNone(result) + + def test_delete_transfer_schedule_out_of_user_context(self): + outer_scope_schedule = self._create_dummy_transfer_schedule( + self.outer_scope_transfer, None) + self.session.add(outer_scope_schedule) + self.assertRaises( + exception.NotAuthorized, api.delete_transfer_schedule, + self.context, self.outer_scope_transfer.id, + outer_scope_schedule.id) + + def test_delete_transfer_schedule(self): + dummy_transfer_schedule = self._create_dummy_transfer_schedule( + self.valid_transfer, None) + self.session.add(dummy_transfer_schedule) + pre_delete_mock = mock.Mock() + post_delete_mock = mock.Mock() + api.delete_transfer_schedule( + self.context, self.valid_transfer.id, dummy_transfer_schedule.id, + pre_delete_callable=pre_delete_mock, + post_delete_callable=post_delete_mock) + result = api.get_transfer_schedule( + self.context, self.valid_transfer.id, dummy_transfer_schedule.id) + self.assertIsNone(result) + pre_delete_mock.assert_called_once_with( + self.context, dummy_transfer_schedule) + post_delete_mock.assert_called_once_with( + self.context, dummy_transfer_schedule) + + def test_delete_transfer_schedule_already_deleted(self): + dummy_transfer_schedule = self._create_dummy_transfer_schedule( + self.valid_transfer, None) + self.session.add(dummy_transfer_schedule) + + def pre_delete(context, schedule): + schedule.deleted = True + schedule.deleted_at = timeutils.utcnow() + context.session.commit() + + self.assertRaises( + exception.NotFound, api.delete_transfer_schedule, + self.context, self.valid_transfer.id, dummy_transfer_schedule.id, + pre_delete_callable=pre_delete) + + def test_add_transfer_schedule(self): + new_schedule = self._create_dummy_transfer_schedule( + self.valid_transfer, None) + post_add_mock = mock.Mock() + api.add_transfer_schedule( + self.context, new_schedule, post_create_callable=post_add_mock) + result = api.get_transfer_schedule( + self.context, self.valid_transfer.id, new_schedule.id) + self.assertEqual(result, new_schedule) + post_add_mock.assert_called_once_with(self.context, new_schedule) + + def test_add_transfer_schedule_out_of_user_context(self): + new_schedule = self._create_dummy_transfer_schedule( + self.outer_scope_transfer, None) + self.assertRaises( + exception.NotAuthorized, api.add_transfer_schedule, + self.context, new_schedule) + + +class TransfersDBAPITestCase(BaseDBAPITestCase): + + @classmethod + def setUpClass(cls): + super(TransfersDBAPITestCase, cls).setUpClass() + cls.valid_transfer = cls.valid_data['user_scope'].get('transfer') + cls.valid_transfer_execution = cls.valid_data['user_scope'].get( + 'tasks_execution') + cls.outer_scope_transfer = cls.valid_data['outer_scope'].get( + 'transfer') + + @staticmethod + def _create_dummy_transfer(scenario=constants.REPLICA_SCENARIO_REPLICA, + origin_endpoint_id=str(uuid.uuid4()), + destination_endpoint_id=str(uuid.uuid4()), + project_id=DEFAULT_PROJECT_ID): + transfer = models.Transfer() + transfer.id = str(uuid.uuid4()) + transfer.user_id = project_id + transfer.project_id = project_id + transfer.base_id = transfer.id + transfer.scenario = scenario + transfer.last_execution_status = DEFAULT_EXECUTION_STATUS + transfer.executions = [] + transfer.instances = [DEFAULT_INSTANCE] + transfer.info = DEFAULT_TASK_INFO + transfer.origin_endpoint_id = origin_endpoint_id + transfer.destination_endpoint_id = destination_endpoint_id + + return transfer + + def test_get_transfers_admin(self): + self.context.is_admin = True + result = api.get_transfers(self.context) + self.assertIn(self.outer_scope_transfer, result) + + def test_get_transfers_out_of_user_context(self): + result = api.get_transfers(self.context) + self.assertNotIn(self.outer_scope_transfer, result) + + def test_get_transfers(self): + result = api.get_transfers(self.context) + self.assertIn(self.valid_transfer, result) + + def test_get_transfers_include_tasks_executions(self): + result = api.get_transfers(self.context, include_tasks_executions=True) + executions = [] + for transfer in result: + executions.extend(transfer.executions) + self.assertIn(self.valid_transfer_execution, executions) + + def test_get_transfers_include_task_info(self): + result = api.get_transfers(self.context, include_task_info=True) + self.assertTrue(hasattr(result[0], 'info')) + + def test_get_transfers_transfer_scenario(self): + scenario = constants.REPLICA_SCENARIO_REPLICA + result = api.get_transfers(self.context, transfer_scenario=scenario) + self.assertTrue(all([res.scenario == scenario for res in result])) + + def test_get_transfers_to_dict(self): + result = api.get_transfers(self.context, to_dict=True) + transfer_ids = [res['id'] for res in result] + self.assertIn(self.valid_transfer.id, transfer_ids) + + def test_get_transfer_admin(self): + self.context.is_admin = True + result = api.get_transfer(self.context, self.outer_scope_transfer.id) + self.assertEqual(result, self.outer_scope_transfer) + + def test_get_transfer_include_task_info(self): + result = api.get_transfer( + self.context, self.valid_transfer.id, include_task_info=True) + self.assertEqual(result.info, DEFAULT_TASK_INFO) + + def test_get_transfer_by_scenario(self): + result = api.get_transfer( + self.context, self.valid_transfer.id, + transfer_scenario=constants.REPLICA_SCENARIO_REPLICA) + self.assertEqual(result, self.valid_transfer) + + def test_get_transfer_out_of_user_scope(self): + result = api.get_transfer(self.context, self.outer_scope_transfer.id) + self.assertIsNone(result) + + def test_get_transfer_to_dict(self): + result = api.get_transfer( + self.context, self.valid_transfer.id, to_dict=True) + self.assertEqual(result['id'], self.valid_transfer.id) + + result = api.get_transfer(self.context, "invalid", to_dict=True) + self.assertIsNone(result) + + def test_get_endpoint_transfers_count(self): + origin_endpoint_id = str(uuid.uuid4()) + dest_endpoint_id = str(uuid.uuid4()) + dummy_transfer_replica = self._create_dummy_transfer( + origin_endpoint_id=origin_endpoint_id, + destination_endpoint_id=dest_endpoint_id) + dummy_transfer_migration = self._create_dummy_transfer( + scenario=constants.REPLICA_SCENARIO_LIVE_MIGRATION, + origin_endpoint_id=origin_endpoint_id, + destination_endpoint_id=dest_endpoint_id) + self.session.add(dummy_transfer_replica) + self.session.add(dummy_transfer_migration) + + result = api.get_endpoint_transfers_count( + self.context, origin_endpoint_id) + self.assertEqual(result, 2) + + result = api.get_endpoint_transfers_count( + self.context, origin_endpoint_id, + transfer_scenario=constants.REPLICA_SCENARIO_REPLICA) + self.assertEqual(result, 1) + + def test_add_transfer(self): + dummy_transfer = self._create_dummy_transfer() + api.add_transfer(self.context, dummy_transfer) + result = api.get_transfer(self.context, dummy_transfer.id) + self.assertEqual(result, dummy_transfer) + + +class DeploymentsDBAPITestCase(BaseDBAPITestCase): + + @classmethod + def setUpClass(cls): + super(DeploymentsDBAPITestCase, cls).setUpClass() + cls.user_deployment = cls.valid_data['user_scope'].get('deployment') + cls.outer_scope_deployment = cls.valid_data['outer_scope'].get( + 'deployment') + cls.user_deployment_execution = cls.valid_data['user_scope'].get( + 'deployment_execution') + cls.outer_scope_deployment_execution = cls.valid_data[ + 'outer_scope'].get('deployment_execution') + cls.user_deployment_task = cls.user_deployment_execution.tasks[0] + cls.user_transfer = cls.valid_data['user_scope'].get('transfer') + + @staticmethod + def _create_dummy_deployment(transfer_id, + origin_endpoint_id=str(uuid.uuid4()), + destination_endpoint_id=str(uuid.uuid4()), + project_id=DEFAULT_PROJECT_ID): + deployment = models.Deployment() + deployment.id = str(uuid.uuid4()) + deployment.user_id = project_id + deployment.project_id = project_id + deployment.base_id = deployment.id + deployment.transfer_id = transfer_id + deployment.last_execution_status = DEFAULT_EXECUTION_STATUS + deployment.executions = [] + deployment.instances = [DEFAULT_INSTANCE] + deployment.info = DEFAULT_TASK_INFO + deployment.origin_endpoint_id = origin_endpoint_id + deployment.destination_endpoint_id = destination_endpoint_id + + return deployment + + def test_get_transfers_deployments_admin(self): + self.context.is_admin = True + result = api.get_transfer_deployments( + self.context, self.user_deployment.transfer_id) + self.assertIn(self.user_deployment, result) + + def test_get_transfer_deployments_out_of_user_context(self): + result = api.get_transfer_deployments( + self.context, self.outer_scope_deployment.transfer_id) + self.assertNotIn(self.outer_scope_deployment, result) + + def test_get_transfer_deployments(self): + result = api.get_transfer_deployments( + self.context, self.user_deployment.transfer_id) + self.assertIn(self.user_deployment, result) + + def test_get_deployments_admin(self): + self.context.is_admin = True + result = api.get_deployments(self.context) + self.assertIn(self.outer_scope_deployment, result) + self.assertIn(self.outer_scope_deployment_execution, + self.outer_scope_deployment.executions) + + def test_get_deployments_include_tasks(self): + result = api.get_deployments(self.context, include_tasks=True) + self.assertIn(self.user_deployment, result) + tasks = [] + for dep in result: + for execution in dep.executions: + tasks.extend(execution.tasks) + self.assertIn(self.user_deployment_task, tasks) + + def test_get_deployments_include_task_info(self): + result = api.get_deployments(self.context, include_task_info=True) + for dep in result: + if dep.id == self.user_deployment.id: + self.assertEqual(dep.info, DEFAULT_TASK_INFO) + + def test_get_deployments_out_of_user_context(self): + result = api.get_deployments(self.context) + self.assertNotIn(self.outer_scope_deployment, result) + + def test_get_deployments(self): + result = api.get_deployments(self.context) + self.assertIn(self.user_deployment, result) + + def test_get_deployments_to_dict(self): + result = api.get_deployments(self.context, to_dict=True) + self.assertIn(self.user_deployment.id, [d['id'] for d in result]) + + def test_get_deployment_admin(self): + self.context.is_admin = True + result = api.get_deployment( + self.context, self.outer_scope_deployment.id) + self.assertEqual(self.outer_scope_deployment, result) + + def test_get_deployment_include_task_info(self): + result = api.get_deployment(self.context, self.user_deployment.id, + include_task_info=True) + self.assertEqual(result.info, self.user_deployment.info) + + def test_get_deployment_out_of_user_context(self): + result = api.get_deployment( + self.context, self.outer_scope_deployment.id) + self.assertIsNone(result) + + def test_get_deployment(self): + result = api.get_deployment(self.context, self.user_deployment.id) + self.assertEqual(result, self.user_deployment) + + def test_get_deployment_to_dict(self): + result = api.get_deployment(self.context, self.user_deployment.id, + to_dict=True) + self.assertEqual(result['id'], self.user_deployment.id) + + def test_add_deployment(self): + dummy_deployment = self._create_dummy_deployment(self.user_transfer.id) + api.add_deployment(self.context, dummy_deployment) + result = api.get_deployment(self.context, dummy_deployment.id) + self.assertEqual(result, dummy_deployment) + -class DBAPITestCase(test_base.CoriolisBaseTestCase): - """Test suite for the Coriolis DB API.""" +class BaseTransferActionDBAPITestCase(BaseDBAPITestCase): - @mock.patch.object(api, 'get_endpoint') - def test_update_endpoint_not_found(self, mock_get_endpoint): - mock_get_endpoint.return_value = None + @classmethod + def setUpClass(cls): + super(BaseTransferActionDBAPITestCase, cls).setUpClass() + cls.user_transfer = cls.valid_data['user_scope'].get('transfer') + cls.outer_scope_transfer = cls.valid_data['outer_scope'].get( + 'transfer') - # We only need to test the unwrapped functions. Without this, - # when calling a coriolis.db.api function, it will try to - # establish an SQL connection. - update_endpoint = testutils.get_wrapped_function(api.update_endpoint) + def test_get_action_admin(self): + self.context.is_admin = True + result = api.get_action(self.context, self.outer_scope_transfer.id) + self.assertEqual(result, self.outer_scope_transfer) - self.assertRaises(exception.NotFound, update_endpoint, - mock.sentinel.context, mock.sentinel.endpoint_id, - mock.sentinel.updated_values) + def test_get_action_not_found(self): + self.assertRaises( + exception.NotFound, api.get_action, self.context, + self.outer_scope_transfer.id) - mock_get_endpoint.assert_called_once_with(mock.sentinel.context, - mock.sentinel.endpoint_id) + def test_get_action_include_task_info(self): + result = api.get_action( + self.context, self.user_transfer.id, include_task_info=True) + self.assertEqual(result.info, self.user_transfer.info) diff --git a/requirements.txt b/requirements.txt index 0efdd3b9..bb2b6949 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,6 +8,7 @@ jsonschema # so we limit its version here. kombu==4.6.10 PyMySQL +netifaces oslo.cache oslo.concurrency oslo.config diff --git a/tox.ini b/tox.ini index e4c89e17..432a134f 100644 --- a/tox.ini +++ b/tox.ini @@ -36,5 +36,5 @@ omit = coriolis/tests/* # E125 is deliberately excluded. See https://github.com/jcrocholl/pep8/issues/126 # E251 Skipped due to https://github.com/jcrocholl/pep8/issues/301 -ignore = E125,E251,W503,W504,E305,E731,E117,W605,F632,H401,H403,H404,H405 +ignore = E125,E251,W503,W504,E305,E731,E117,W605,F632,H401,H403,H404,H405,H202 exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,tools From 37570a24924779fb7c4ca32eacfb54d4f1a7fb4d Mon Sep 17 00:00:00 2001 From: Daniel Vincze Date: Fri, 15 Nov 2024 17:27:38 +0200 Subject: [PATCH 23/24] Refactor conductor layer --- coriolis/api/v1/replica_schedules.py | 2 +- coriolis/api/v1/replicas.py | 6 +- coriolis/cmd/replica_cron.py | 4 +- coriolis/conductor/rpc/client.py | 155 +-- coriolis/conductor/rpc/server.py | 1150 ++++++++--------- coriolis/constants.py | 115 +- coriolis/db/sqlalchemy/models.py | 2 +- coriolis/deployments/api.py | 2 +- coriolis/diagnostics/api.py | 4 +- coriolis/exception.py | 8 +- coriolis/minion_manager/rpc/client.py | 4 +- coriolis/minion_manager/rpc/server.py | 15 +- coriolis/minion_manager/rpc/tasks.py | 12 +- coriolis/providers/factory.py | 14 +- coriolis/replica_tasks_executions/api.py | 10 +- coriolis/replicas/api.py | 12 +- coriolis/scheduler/scheduler_utils.py | 6 +- coriolis/tasks/factory.py | 56 +- coriolis/tasks/osmorphing_tasks.py | 8 +- coriolis/tasks/replica_tasks.py | 84 +- .../tests/api/v1/test_replica_schedules.py | 2 +- coriolis/tests/cmd/test_replica_cron.py | 6 +- .../data/deploy_replica_instance_config.yml | 182 --- .../data/deploy_transfer_instance_config.yml | 182 +++ ....yml => execute_transfer_tasks_config.yml} | 130 +- ..._config.yml => update_transfer_config.yml} | 12 +- coriolis/tests/conductor/rpc/test_client.py | 125 +- coriolis/tests/conductor/rpc/test_server.py | 1004 +++++++------- coriolis/tests/db/test_api.py | 20 +- .../tests/minion_manager/rpc/test_client.py | 6 +- .../tests/minion_manager/rpc/test_tasks.py | 26 +- coriolis/tests/replica_cron/test_api.py | 69 - .../replica_tasks_executions/test_api.py | 40 +- coriolis/tests/replicas/test_api.py | 46 +- coriolis/tests/tasks/test_osmorphing_tasks.py | 4 +- coriolis/tests/tasks/test_replica_tasks.py | 62 +- .../transfer_cron}/__init__.py | 0 .../transfer_cron}/rpc/__init__.py | 0 .../rpc/test_client.py | 4 +- .../rpc/test_server.py | 26 +- coriolis/tests/transfer_cron/test_api.py | 69 + coriolis/tests/worker/rpc/test_server.py | 6 +- .../__init__.py | 0 .../{replica_cron => transfer_cron}/api.py | 10 +- .../rpc/__init__.py | 0 .../rpc/client.py | 6 +- .../rpc/server.py | 6 +- coriolis/worker/rpc/server.py | 6 +- 48 files changed, 1822 insertions(+), 1896 deletions(-) delete mode 100644 coriolis/tests/conductor/rpc/data/deploy_replica_instance_config.yml create mode 100644 coriolis/tests/conductor/rpc/data/deploy_transfer_instance_config.yml rename coriolis/tests/conductor/rpc/data/{execute_replica_tasks_config.yml => execute_transfer_tasks_config.yml} (57%) rename coriolis/tests/conductor/rpc/data/{update_replica_config.yml => update_transfer_config.yml} (88%) delete mode 100644 coriolis/tests/replica_cron/test_api.py rename coriolis/{replica_cron => tests/transfer_cron}/__init__.py (100%) rename coriolis/{replica_cron => tests/transfer_cron}/rpc/__init__.py (100%) rename coriolis/tests/{replica_cron => transfer_cron}/rpc/test_client.py (91%) rename coriolis/tests/{replica_cron => transfer_cron}/rpc/test_server.py (90%) create mode 100644 coriolis/tests/transfer_cron/test_api.py rename coriolis/{tests/replica_cron => transfer_cron}/__init__.py (100%) rename coriolis/{replica_cron => transfer_cron}/api.py (76%) rename coriolis/{tests/replica_cron => transfer_cron}/rpc/__init__.py (100%) rename coriolis/{replica_cron => transfer_cron}/rpc/client.py (75%) rename coriolis/{replica_cron => transfer_cron}/rpc/server.py (95%) diff --git a/coriolis/api/v1/replica_schedules.py b/coriolis/api/v1/replica_schedules.py index 32a3e1d0..53edf7c3 100644 --- a/coriolis/api/v1/replica_schedules.py +++ b/coriolis/api/v1/replica_schedules.py @@ -5,8 +5,8 @@ from coriolis.api import wsgi as api_wsgi from coriolis import exception from coriolis.policies import replica_schedules as schedules_policies -from coriolis.replica_cron import api from coriolis import schemas +from coriolis.transfer_cron import api import jsonschema from oslo_log import log as logging diff --git a/coriolis/api/v1/replicas.py b/coriolis/api/v1/replicas.py index 1d1aac9c..197d80e8 100644 --- a/coriolis/api/v1/replicas.py +++ b/coriolis/api/v1/replicas.py @@ -27,8 +27,8 @@ LOG = logging.getLogger(__name__) SUPPORTED_REPLICA_SCENARIOS = [ - constants.REPLICA_SCENARIO_REPLICA, - constants.REPLICA_SCENARIO_LIVE_MIGRATION] + constants.TRANSFER_SCENARIO_REPLICA, + constants.TRANSFER_SCENARIO_LIVE_MIGRATION] class ReplicaController(api_wsgi.Controller): @@ -79,7 +79,7 @@ def _validate_create_body(self, context, body): f"'{scenario}', must be one of: " f"{SUPPORTED_REPLICA_SCENARIOS}") else: - scenario = constants.REPLICA_SCENARIO_REPLICA + scenario = constants.TRANSFER_SCENARIO_REPLICA LOG.warn( "No Replica 'scenario' field set in Replica body, " f"defaulting to: '{scenario}'") diff --git a/coriolis/cmd/replica_cron.py b/coriolis/cmd/replica_cron.py index 2484ccde..80eafc5b 100644 --- a/coriolis/cmd/replica_cron.py +++ b/coriolis/cmd/replica_cron.py @@ -6,8 +6,8 @@ from oslo_config import cfg from coriolis import constants -from coriolis.replica_cron.rpc import server as rpc_server from coriolis import service +from coriolis.transfer_cron.rpc import server as rpc_server from coriolis import utils CONF = cfg.CONF @@ -19,7 +19,7 @@ def main(): utils.setup_logging() server = service.MessagingService( - constants.REPLICA_CRON_MAIN_MESSAGING_TOPIC, + constants.TRANSFER_CRON_MAIN_MESSAGING_TOPIC, [rpc_server.ReplicaCronServerEndpoint()], rpc_server.VERSION, worker_count=1) launcher = service.service.launch( diff --git a/coriolis/conductor/rpc/client.py b/coriolis/conductor/rpc/client.py index c11b5bfa..45148de6 100644 --- a/coriolis/conductor/rpc/client.py +++ b/coriolis/conductor/rpc/client.py @@ -128,49 +128,49 @@ def get_provider_schemas(self, ctxt, platform_name, provider_type): platform_name=platform_name, provider_type=provider_type) - def execute_replica_tasks(self, ctxt, replica_id, - shutdown_instances=False): + def execute_transfer_tasks(self, ctxt, transfer_id, + shutdown_instances=False): return self._call( - ctxt, 'execute_replica_tasks', replica_id=replica_id, + ctxt, 'execute_transfer_tasks', transfer_id=transfer_id, shutdown_instances=shutdown_instances) - def get_replica_tasks_executions(self, ctxt, replica_id, - include_tasks=False): + def get_transfer_tasks_executions(self, ctxt, transfer_id, + include_tasks=False): return self._call( - ctxt, 'get_replica_tasks_executions', - replica_id=replica_id, + ctxt, 'get_transfer_tasks_executions', + transfer_id=transfer_id, include_tasks=include_tasks) - def get_replica_tasks_execution(self, ctxt, replica_id, execution_id, - include_task_info=False): + def get_transfer_tasks_execution(self, ctxt, transfer_id, execution_id, + include_task_info=False): return self._call( - ctxt, 'get_replica_tasks_execution', replica_id=replica_id, + ctxt, 'get_transfer_tasks_execution', transfer_id=transfer_id, execution_id=execution_id, include_task_info=include_task_info) - def delete_replica_tasks_execution(self, ctxt, replica_id, execution_id): + def delete_transfer_tasks_execution(self, ctxt, transfer_id, execution_id): return self._call( - ctxt, 'delete_replica_tasks_execution', replica_id=replica_id, + ctxt, 'delete_transfer_tasks_execution', transfer_id=transfer_id, execution_id=execution_id) - def cancel_replica_tasks_execution(self, ctxt, replica_id, execution_id, - force): + def cancel_transfer_tasks_execution(self, ctxt, transfer_id, execution_id, + force): return self._call( - ctxt, 'cancel_replica_tasks_execution', replica_id=replica_id, + ctxt, 'cancel_transfer_tasks_execution', transfer_id=transfer_id, execution_id=execution_id, force=force) - def create_instances_replica(self, ctxt, - replica_scenario, - origin_endpoint_id, - destination_endpoint_id, - origin_minion_pool_id, - destination_minion_pool_id, - instance_osmorphing_minion_pool_mappings, - source_environment, destination_environment, - instances, network_map, storage_mappings, - notes=None, user_scripts=None): - return self._call( - ctxt, 'create_instances_replica', - replica_scenario=replica_scenario, + def create_instances_transfer(self, ctxt, + transfer_scenario, + origin_endpoint_id, + destination_endpoint_id, + origin_minion_pool_id, + destination_minion_pool_id, + instance_osmorphing_minion_pool_mappings, + source_environment, destination_environment, + instances, network_map, storage_mappings, + notes=None, user_scripts=None): + return self._call( + ctxt, 'create_instances_transfer', + transfer_scenario=transfer_scenario, origin_endpoint_id=origin_endpoint_id, destination_endpoint_id=destination_endpoint_id, origin_minion_pool_id=origin_minion_pool_id, @@ -185,25 +185,25 @@ def create_instances_replica(self, ctxt, source_environment=source_environment, user_scripts=user_scripts) - def get_replicas(self, ctxt, include_tasks_executions=False, - include_task_info=False): + def get_transfers(self, ctxt, include_tasks_executions=False, + include_task_info=False): return self._call( - ctxt, 'get_replicas', + ctxt, 'get_transfers', include_tasks_executions=include_tasks_executions, include_task_info=include_task_info) - def get_replica(self, ctxt, replica_id, include_task_info=False): + def get_transfer(self, ctxt, transfer_id, include_task_info=False): return self._call( - ctxt, 'get_replica', replica_id=replica_id, + ctxt, 'get_transfer', transfer_id=transfer_id, include_task_info=include_task_info) - def delete_replica(self, ctxt, replica_id): + def delete_transfer(self, ctxt, transfer_id): self._call( - ctxt, 'delete_replica', replica_id=replica_id) + ctxt, 'delete_transfer', transfer_id=transfer_id) - def delete_replica_disks(self, ctxt, replica_id): + def delete_transfer_disks(self, ctxt, transfer_id): return self._call( - ctxt, 'delete_replica_disks', replica_id=replica_id) + ctxt, 'delete_transfer_disks', transfer_id=transfer_id) def get_deployments(self, ctxt, include_tasks=False, include_task_info=False): @@ -216,12 +216,12 @@ def get_deployment(self, ctxt, deployment_id, include_task_info=False): ctxt, 'get_deployment', deployment_id=deployment_id, include_task_info=include_task_info) - def deploy_replica_instances( - self, ctxt, replica_id, + def deploy_transfer_instances( + self, ctxt, transfer_id, instance_osmorphing_minion_pool_mappings=None, clone_disks=False, force=False, skip_os_morphing=False, user_scripts=None): return self._call( - ctxt, 'deploy_replica_instances', replica_id=replica_id, + ctxt, 'deploy_transfer_instances', transfer_id=transfer_id, instance_osmorphing_minion_pool_mappings=( instance_osmorphing_minion_pool_mappings), clone_disks=clone_disks, force=force, @@ -283,48 +283,48 @@ def update_task_progress_update( new_current_step=new_current_step, new_total_steps=new_total_steps, new_message=new_message) - def create_replica_schedule(self, ctxt, replica_id, - schedule, enabled, exp_date, - shutdown_instance): + def create_transfer_schedule(self, ctxt, transfer_id, + schedule, enabled, exp_date, + shutdown_instance): return self._call( - ctxt, 'create_replica_schedule', - replica_id=replica_id, + ctxt, 'create_transfer_schedule', + transfer_id=transfer_id, schedule=schedule, enabled=enabled, exp_date=exp_date, shutdown_instance=shutdown_instance) - def update_replica_schedule(self, ctxt, replica_id, schedule_id, - updated_values): + def update_transfer_schedule(self, ctxt, transfer_id, schedule_id, + updated_values): return self._call( - ctxt, 'update_replica_schedule', - replica_id=replica_id, + ctxt, 'update_transfer_schedule', + transfer_id=transfer_id, schedule_id=schedule_id, updated_values=updated_values) - def delete_replica_schedule(self, ctxt, replica_id, schedule_id): + def delete_transfer_schedule(self, ctxt, transfer_id, schedule_id): return self._call( - ctxt, 'delete_replica_schedule', - replica_id=replica_id, + ctxt, 'delete_transfer_schedule', + transfer_id=transfer_id, schedule_id=schedule_id) - def get_replica_schedules(self, ctxt, replica_id=None, expired=True): + def get_transfer_schedules(self, ctxt, transfer_id=None, expired=True): return self._call( - ctxt, 'get_replica_schedules', - replica_id=replica_id, expired=expired) + ctxt, 'get_transfer_schedules', + transfer_id=transfer_id, expired=expired) - def get_replica_schedule(self, ctxt, replica_id, - schedule_id, expired=True): + def get_transfer_schedule(self, ctxt, transfer_id, + schedule_id, expired=True): return self._call( - ctxt, 'get_replica_schedule', - replica_id=replica_id, + ctxt, 'get_transfer_schedule', + transfer_id=transfer_id, schedule_id=schedule_id, expired=expired) - def update_replica(self, ctxt, replica_id, updated_properties): + def update_transfer(self, ctxt, transfer_id, updated_properties): return self._call( - ctxt, 'update_replica', - replica_id=replica_id, + ctxt, 'update_transfer', + transfer_id=transfer_id, updated_properties=updated_properties) def get_diagnostics(self, ctxt): @@ -391,31 +391,32 @@ def delete_service(self, ctxt, service_id): return self._call( ctxt, 'delete_service', service_id=service_id) - def confirm_replica_minions_allocation( - self, ctxt, replica_id, minion_machine_allocations): + def confirm_transfer_minions_allocation( + self, ctxt, transfer_id, minion_machine_allocations): self._call( - ctxt, 'confirm_replica_minions_allocation', replica_id=replica_id, + ctxt, 'confirm_transfer_minions_allocation', + transfer_id=transfer_id, minion_machine_allocations=minion_machine_allocations) - def report_replica_minions_allocation_error( - self, ctxt, replica_id, minion_allocation_error_details): + def report_transfer_minions_allocation_error( + self, ctxt, transfer_id, minion_allocation_error_details): self._call( - ctxt, 'report_replica_minions_allocation_error', - replica_id=replica_id, + ctxt, 'report_transfer_minions_allocation_error', + transfer_id=transfer_id, minion_allocation_error_details=minion_allocation_error_details) - def confirm_migration_minions_allocation( - self, ctxt, migration_id, minion_machine_allocations): + def confirm_deployment_minions_allocation( + self, ctxt, deployment_id, minion_machine_allocations): self._call( - ctxt, 'confirm_migration_minions_allocation', - migration_id=migration_id, + ctxt, 'confirm_deployment_minions_allocation', + deployment_id=deployment_id, minion_machine_allocations=minion_machine_allocations) - def report_migration_minions_allocation_error( - self, ctxt, migration_id, minion_allocation_error_details): + def report_deployment_minions_allocation_error( + self, ctxt, deployment_id, minion_allocation_error_details): self._call( - ctxt, 'report_migration_minions_allocation_error', - migration_id=migration_id, + ctxt, 'report_deployment_minions_allocation_error', + deployment_id=deployment_id, minion_allocation_error_details=minion_allocation_error_details) diff --git a/coriolis/conductor/rpc/server.py b/coriolis/conductor/rpc/server.py index 1d9bad4e..46787b4d 100644 --- a/coriolis/conductor/rpc/server.py +++ b/coriolis/conductor/rpc/server.py @@ -18,10 +18,10 @@ from coriolis import keystone from coriolis.licensing import client as licensing_client from coriolis.minion_manager.rpc import client as rpc_minion_manager_client -from coriolis.replica_cron.rpc import client as rpc_cron_client from coriolis.scheduler.rpc import client as rpc_scheduler_client from coriolis import schemas from coriolis.tasks import factory as tasks_factory +from coriolis.transfer_cron.rpc import client as rpc_cron_client from coriolis import utils from coriolis.worker.rpc import client as rpc_worker_client @@ -46,9 +46,9 @@ "Please review the Conductor logs and contact support for assistance.") SCENARIO_TYPE_TO_LICENSING_RESERVATION_MAP = { - constants.REPLICA_SCENARIO_REPLICA: + constants.TRANSFER_SCENARIO_REPLICA: licensing_client.RESERVATION_TYPE_REPLICA, - constants.REPLICA_SCENARIO_LIVE_MIGRATION: + constants.TRANSFER_SCENARIO_LIVE_MIGRATION: licensing_client.RESERVATION_TYPE_MIGRATION } @@ -65,26 +65,26 @@ def inner(): return wrapper -def replica_synchronized(func): +def transfer_synchronized(func): @functools.wraps(func) - def wrapper(self, ctxt, replica_id, *args, **kwargs): + def wrapper(self, ctxt, transfer_id, *args, **kwargs): @lockutils.synchronized( - constants.REPLICA_LOCK_NAME_FORMAT % replica_id, + constants.TRANSFER_LOCK_NAME_FORMAT % transfer_id, external=True) def inner(): - return func(self, ctxt, replica_id, *args, **kwargs) + return func(self, ctxt, transfer_id, *args, **kwargs) return inner() return wrapper def schedule_synchronized(func): @functools.wraps(func) - def wrapper(self, ctxt, replica_id, schedule_id, *args, **kwargs): + def wrapper(self, ctxt, transfer_id, schedule_id, *args, **kwargs): @lockutils.synchronized( constants.SCHEDULE_LOCK_NAME_FORMAT % schedule_id, external=True) def inner(): - return func(self, ctxt, replica_id, schedule_id, *args, **kwargs) + return func(self, ctxt, transfer_id, schedule_id, *args, **kwargs) return inner() return wrapper @@ -118,18 +118,6 @@ def inner(): return wrapper -def migration_synchronized(func): - @functools.wraps(func) - def wrapper(self, ctxt, migration_id, *args, **kwargs): - @lockutils.synchronized( - constants.MIGRATION_LOCK_NAME_FORMAT % migration_id, - external=True) - def inner(): - return func(self, ctxt, migration_id, *args, **kwargs) - return inner() - return wrapper - - def deployment_synchronized(func): @functools.wraps(func) def wrapper(self, ctxt, deployment_id, *args, **kwargs): @@ -144,12 +132,12 @@ def inner(): def tasks_execution_synchronized(func): @functools.wraps(func) - def wrapper(self, ctxt, replica_id, execution_id, *args, **kwargs): + def wrapper(self, ctxt, transfer_id, execution_id, *args, **kwargs): @lockutils.synchronized( constants.EXECUTION_LOCK_NAME_FORMAT % execution_id, external=True) def inner(): - return func(self, ctxt, replica_id, execution_id, *args, **kwargs) + return func(self, ctxt, transfer_id, execution_id, *args, **kwargs) return inner() return wrapper @@ -183,7 +171,7 @@ def __init__(self): self._licensing_client = licensing_client.LicensingClient.from_env() self._worker_client_instance = None self._scheduler_client_instance = None - self._replica_cron_client_instance = None + self._transfer_cron_client_instance = None self._minion_manager_client_instance = None # NOTE(aznashwan): it is unsafe to fork processes with pre-instantiated @@ -206,11 +194,11 @@ def _scheduler_client(self): return self._scheduler_client_instance @property - def _replica_cron_client(self): - if not self._replica_cron_client_instance: - self._replica_cron_client_instance = ( - rpc_cron_client.ReplicaCronClient()) - return self._replica_cron_client_instance + def _transfer_cron_client(self): + if not self._transfer_cron_client_instance: + self._transfer_cron_client_instance = ( + rpc_cron_client.TransferCronClient()) + return self._transfer_cron_client_instance @property def _minion_manager_client(self): @@ -222,7 +210,7 @@ def _minion_manager_client(self): def get_all_diagnostics(self, ctxt): client_objects = { "conductor": self, - "replica_cron": self._replica_cron_client, + "transfer_cron": self._transfer_cron_client, "minion_manager": self._minion_manager_client, "scheduler": self._scheduler_client} @@ -297,22 +285,22 @@ def _check_delete_reservation_for_transfer(self, transfer_action): "action with ID '%s'. Skipping. Exception\n%s", reservation_id, action_id, utils.get_exception_details()) - def _create_reservation_for_replica(self, replica): - action_id = replica.base_id - scenario = replica.scenario + def _create_reservation_for_transfer(self, transfer): + action_id = transfer.base_id + scenario = transfer.scenario reservation_type = SCENARIO_TYPE_TO_LICENSING_RESERVATION_MAP.get( scenario, None) if not reservation_type: raise exception.LicensingException( - message="Could not determine reservation type for replica " - f"'{action_id}' with scenario '{replica.scenario}'.") + message="Could not determine reservation type for transfer " + f"'{action_id}' with scenario '{transfer.scenario}'.") if not self._licensing_client: LOG.warn( "Licensing client not instantiated. Skipping creation of " "reservation for transfer action '%s'", action_id) return - ninstances = len(replica.instances) + ninstances = len(transfer.instances) LOG.debug( "Attempting to create '%s' reservation for %d instances for " "transfer action with ID '%s'.", @@ -323,7 +311,7 @@ def _create_reservation_for_replica(self, replica): LOG.info( f"Sucessfully created licensing reservation for transfer " f"with ID '{action_id}' with properties: {reservation}") - replica.reservation_id = reservation['id'] + transfer.reservation_id = reservation['id'] return reservation @@ -374,23 +362,24 @@ def _check_mark_reservation_fulfilled( f"Successfully marked reservation with ID '{reservation_id}' " f"for transfer action '{action_id}' as fulfilled") - def _check_reservation_for_replica(self, replica): - scenario = replica.scenario + def _check_reservation_for_transfer(self, transfer): + scenario = transfer.scenario reservation_type = SCENARIO_TYPE_TO_LICENSING_RESERVATION_MAP.get( scenario, None) if not reservation_type: raise exception.LicensingException( - message="Could not determine reservation type for replica " - f"'{replica.id}' with scenario '{replica.scenario}'.") + message="Could not determine reservation type for transfer " + f"'{transfer.id}' with scenario " + f"'{transfer.scenario}'.") - action_id = replica.base_id + action_id = transfer.base_id if not self._licensing_client: LOG.warn( "Licensing client not instantiated. Skipping checking of " "reservation for transfer action '%s'", action_id) return - reservation_id = replica.reservation_id + reservation_id = transfer.reservation_id if reservation_id: LOG.debug( "Attempting to check reservation with ID '%s' for transfer " @@ -400,13 +389,13 @@ def _check_reservation_for_replica(self, replica): reservation_id) fulfilled_at = reservation.get("fulfilled_at", None) - if scenario == constants.REPLICA_SCENARIO_LIVE_MIGRATION and ( + if scenario == constants.TRANSFER_SCENARIO_LIVE_MIGRATION and ( fulfilled_at): raise exception.MigrationLicenceFulfilledException( - action_id=replica.id, reservation_id=reservation_id, + action_id=transfer.id, reservation_id=reservation_id, fulfilled_at=fulfilled_at) - replica.reservation_id = ( + transfer.reservation_id = ( self._licensing_client.check_refresh_reservation( reservation_id)['id']) except Exception as ex: @@ -427,14 +416,14 @@ def _check_reservation_for_replica(self, replica): "reservation. Trace was: %s", reservation_id, action_id, utils.get_exception_details()) - self._create_reservation_for_replica(replica) + self._create_reservation_for_transfer(transfer) else: raise ex else: LOG.info( f"Transfer action '{action_id}' has no reservation ID set, " f"attempting to create a new one for it") - self._create_reservation_for_replica(replica) + self._create_reservation_for_transfer(transfer) def create_endpoint(self, ctxt, name, endpoint_type, description, connection_info, mapped_regions=None): @@ -487,11 +476,11 @@ def get_endpoint(self, ctxt, endpoint_id): @endpoint_synchronized def delete_endpoint(self, ctxt, endpoint_id): - q_replicas_count = db_api.get_endpoint_transfers_count( + q_transfers_count = db_api.get_endpoint_transfers_count( ctxt, endpoint_id) - if q_replicas_count != 0: - raise exception.NotAuthorized("%s replicas would be orphaned!" % - q_replicas_count) + if q_transfers_count != 0: + raise exception.NotAuthorized("%s transfers would be orphaned!" % + q_transfers_count) db_api.delete_endpoint(ctxt, endpoint_id) def get_endpoint_instances(self, ctxt, endpoint_id, source_environment, @@ -904,39 +893,40 @@ def _check_task_cls_param_requirements(task, instance_task_info_keys): "for ordering or state conflicts.", execution.id, execution.type) - @replica_synchronized - def execute_replica_tasks(self, ctxt, replica_id, shutdown_instances): - replica = self._get_replica(ctxt, replica_id, include_task_info=True) - self._check_replica_running_executions(ctxt, replica) - self._check_minion_pools_for_action(ctxt, replica) - self._check_reservation_for_replica(replica) + @transfer_synchronized + def execute_transfer_tasks(self, ctxt, transfer_id, shutdown_instances): + transfer = self._get_transfer( + ctxt, transfer_id, include_task_info=True) + self._check_transfer_running_executions(ctxt, transfer) + self._check_minion_pools_for_action(ctxt, transfer) + self._check_reservation_for_transfer(transfer) execution = models.TasksExecution() execution.id = str(uuid.uuid4()) - execution.action = replica + execution.action = transfer execution.status = constants.EXECUTION_STATUS_UNEXECUTED - execution.type = constants.EXECUTION_TYPE_REPLICA_EXECUTION + execution.type = constants.EXECUTION_TYPE_TRANSFER_EXECUTION # TODO(aznashwan): have these passed separately to the relevant # provider methods. They're currently passed directly inside # dest-env by the API service when accepting the call, but we - # re-overwrite them here in case of Replica updates. - dest_env = copy.deepcopy(replica.destination_environment) - dest_env['network_map'] = replica.network_map - dest_env['storage_mappings'] = replica.storage_mappings + # re-overwrite them here in case of Transfer updates. + dest_env = copy.deepcopy(transfer.destination_environment) + dest_env['network_map'] = transfer.network_map + dest_env['storage_mappings'] = transfer.storage_mappings for instance in execution.action.instances: # NOTE: we default/convert the volumes info to an empty list # to preserve backwards-compatibility with older versions # of Coriolis dating before the scheduling overhaul (PR##114) - if instance not in replica.info: - replica.info[instance] = {'volumes_info': []} - elif replica.info[instance].get('volumes_info') is None: - replica.info[instance]['volumes_info'] = [] + if instance not in transfer.info: + transfer.info[instance] = {'volumes_info': []} + elif transfer.info[instance].get('volumes_info') is None: + transfer.info[instance]['volumes_info'] = [] # NOTE: we update all of the param values before triggering an # execution to ensure that the latest parameters are used: - replica.info[instance].update({ - "source_environment": replica.source_environment, + transfer.info[instance].update({ + "source_environment": transfer.source_environment, "target_environment": dest_env}) # TODO(aznashwan): have these passed separately to the relevant # provider methods (they're currently passed directly inside @@ -944,9 +934,9 @@ def execute_replica_tasks(self, ctxt, replica_id, shutdown_instances): # "network_map": network_map, # "storage_mappings": storage_mappings, - validate_replica_source_inputs_task = self._create_task( + validate_transfer_source_inputs_task = self._create_task( instance, - constants.TASK_TYPE_VALIDATE_REPLICA_SOURCE_INPUTS, + constants.TASK_TYPE_VALIDATE_TRANSFER_SOURCE_INPUTS, execution) get_instance_info_task = self._create_task( @@ -954,20 +944,20 @@ def execute_replica_tasks(self, ctxt, replica_id, shutdown_instances): constants.TASK_TYPE_GET_INSTANCE_INFO, execution) - validate_replica_destination_inputs_task = self._create_task( + validate_transfer_destination_inputs_task = self._create_task( instance, - constants.TASK_TYPE_VALIDATE_REPLICA_DESTINATION_INPUTS, + constants.TASK_TYPE_VALIDATE_TRANSFER_DESTINATION_INPUTS, execution, depends_on=[get_instance_info_task.id]) disk_deployment_depends_on = [] validate_origin_minion_task = None - if replica.origin_minion_pool_id: + if transfer.origin_minion_pool_id: # NOTE: these values are required for the # _check_execution_tasks_sanity call but # will be populated later when the pool # allocations actually happen: - replica.info[instance].update({ + transfer.info[instance].update({ "origin_minion_machine_id": None, "origin_minion_provider_properties": None, "origin_minion_connection_info": None}) @@ -977,20 +967,20 @@ def execute_replica_tasks(self, ctxt, replica_id, shutdown_instances): execution, depends_on=[ get_instance_info_task.id, - validate_replica_source_inputs_task.id]) + validate_transfer_source_inputs_task.id]) disk_deployment_depends_on.append( validate_origin_minion_task.id) else: disk_deployment_depends_on.append( - validate_replica_source_inputs_task.id) + validate_transfer_source_inputs_task.id) validate_destination_minion_task = None - if replica.destination_minion_pool_id: + if transfer.destination_minion_pool_id: # NOTE: these values are required for the # _check_execution_tasks_sanity call but # will be populated later when the pool # allocations actually happen: - replica.info[instance].update({ + transfer.info[instance].update({ "destination_minion_machine_id": None, "destination_minion_provider_properties": None, "destination_minion_connection_info": None, @@ -1000,42 +990,42 @@ def execute_replica_tasks(self, ctxt, replica_id, shutdown_instances): constants.TASK_TYPE_VALIDATE_DESTINATION_MINION_POOL_COMPATIBILITY, # noqa: E501 execution, depends_on=[ - validate_replica_destination_inputs_task.id]) + validate_transfer_destination_inputs_task.id]) disk_deployment_depends_on.append( validate_destination_minion_task.id) else: disk_deployment_depends_on.append( - validate_replica_destination_inputs_task.id) + validate_transfer_destination_inputs_task.id) - deploy_replica_disks_task = self._create_task( - instance, constants.TASK_TYPE_DEPLOY_REPLICA_DISKS, + deploy_transfer_disks_task = self._create_task( + instance, constants.TASK_TYPE_DEPLOY_TRANSFER_DISKS, execution, depends_on=disk_deployment_depends_on) shutdown_deps = [] - deploy_replica_source_resources_task = None - if not replica.origin_minion_pool_id: - deploy_replica_source_resources_task = self._create_task( + deploy_transfer_source_resources_task = None + if not transfer.origin_minion_pool_id: + deploy_transfer_source_resources_task = self._create_task( instance, - constants.TASK_TYPE_DEPLOY_REPLICA_SOURCE_RESOURCES, + constants.TASK_TYPE_DEPLOY_TRANSFER_SOURCE_RESOURCES, execution, depends_on=[ - deploy_replica_disks_task.id]) - shutdown_deps.append(deploy_replica_source_resources_task) + deploy_transfer_disks_task.id]) + shutdown_deps.append(deploy_transfer_source_resources_task) attach_destination_minion_disks_task = None - deploy_replica_target_resources_task = None - if replica.destination_minion_pool_id: + deploy_transfer_target_resources_task = None + if transfer.destination_minion_pool_id: ttyp = constants.TASK_TYPE_ATTACH_VOLUMES_TO_DESTINATION_MINION attach_destination_minion_disks_task = self._create_task( instance, ttyp, execution, depends_on=[ - deploy_replica_disks_task.id]) + deploy_transfer_disks_task.id]) shutdown_deps.append(attach_destination_minion_disks_task) else: - deploy_replica_target_resources_task = self._create_task( + deploy_transfer_target_resources_task = self._create_task( instance, - constants.TASK_TYPE_DEPLOY_REPLICA_TARGET_RESOURCES, + constants.TASK_TYPE_DEPLOY_TRANSFER_TARGET_RESOURCES, execution, depends_on=[ - deploy_replica_disks_task.id]) - shutdown_deps.append(deploy_replica_target_resources_task) + deploy_transfer_disks_task.id]) + shutdown_deps.append(deploy_transfer_target_resources_task) depends_on = [t.id for t in shutdown_deps] if shutdown_instances: @@ -1048,7 +1038,7 @@ def execute_replica_tasks(self, ctxt, replica_id, shutdown_instances): instance, constants.TASK_TYPE_REPLICATE_DISKS, execution, depends_on=depends_on) - if replica.origin_minion_pool_id: + if transfer.origin_minion_pool_id: self._create_task( instance, constants.TASK_TYPE_RELEASE_SOURCE_MINION, @@ -1060,14 +1050,14 @@ def execute_replica_tasks(self, ctxt, replica_id, shutdown_instances): else: self._create_task( instance, - constants.TASK_TYPE_DELETE_REPLICA_SOURCE_RESOURCES, + constants.TASK_TYPE_DELETE_TRANSFER_SOURCE_RESOURCES, execution, depends_on=[ - deploy_replica_source_resources_task.id, + deploy_transfer_source_resources_task.id, replicate_disks_task.id], on_error=True) - if replica.destination_minion_pool_id: + if transfer.destination_minion_pool_id: detach_volumes_from_minion_task = self._create_task( instance, constants.TASK_TYPE_DETACH_VOLUMES_FROM_DESTINATION_MINION, @@ -1088,162 +1078,165 @@ def execute_replica_tasks(self, ctxt, replica_id, shutdown_instances): else: self._create_task( instance, - constants.TASK_TYPE_DELETE_REPLICA_TARGET_RESOURCES, + constants.TASK_TYPE_DELETE_TRANSFER_TARGET_RESOURCES, execution, depends_on=[ - deploy_replica_target_resources_task.id, + deploy_transfer_target_resources_task.id, replicate_disks_task.id], on_error=True) - self._check_execution_tasks_sanity(execution, replica.info) + self._check_execution_tasks_sanity(execution, transfer.info) - # update the action info for all of the Replicas: + # update the action info for all of the Transfers: for instance in execution.action.instances: db_api.update_transfer_action_info_for_instance( - ctxt, replica.id, instance, replica.info[instance]) + ctxt, transfer.id, instance, transfer.info[instance]) # add new execution to DB: db_api.add_transfer_tasks_execution(ctxt, execution) - LOG.info("Replica tasks execution added to DB: %s", execution.id) + LOG.info("Transfer tasks execution added to DB: %s", execution.id) uses_minion_pools = any([ - replica.origin_minion_pool_id, - replica.destination_minion_pool_id]) + transfer.origin_minion_pool_id, + transfer.destination_minion_pool_id]) if uses_minion_pools: - self._minion_manager_client.allocate_minion_machines_for_replica( - ctxt, replica) + self._minion_manager_client.allocate_minion_machines_for_transfer( + ctxt, transfer) self._set_tasks_execution_status( ctxt, execution, constants.EXECUTION_STATUS_AWAITING_MINION_ALLOCATIONS) else: - self._begin_tasks(ctxt, replica, execution) + self._begin_tasks(ctxt, transfer, execution) - return self.get_replica_tasks_execution( - ctxt, replica_id, execution.id) + return self.get_transfer_tasks_execution( + ctxt, transfer_id, execution.id) - @replica_synchronized - def get_replica_tasks_executions(self, ctxt, replica_id, - include_tasks=False, - include_task_info=False): + @transfer_synchronized + def get_transfer_tasks_executions(self, ctxt, transfer_id, + include_tasks=False, + include_task_info=False): return db_api.get_transfer_tasks_executions( - ctxt, replica_id, include_tasks, + ctxt, transfer_id, include_tasks, include_task_info=include_task_info, to_dict=True) @tasks_execution_synchronized - def get_replica_tasks_execution(self, ctxt, replica_id, execution_id, - include_task_info=False): - return self._get_replica_tasks_execution( - ctxt, replica_id, execution_id, + def get_transfer_tasks_execution(self, ctxt, transfer_id, execution_id, + include_task_info=False): + return self._get_transfer_tasks_execution( + ctxt, transfer_id, execution_id, include_task_info=include_task_info, to_dict=True) @tasks_execution_synchronized - def delete_replica_tasks_execution(self, ctxt, replica_id, execution_id): - execution = self._get_replica_tasks_execution( - ctxt, replica_id, execution_id) + def delete_transfer_tasks_execution(self, ctxt, transfer_id, execution_id): + execution = self._get_transfer_tasks_execution( + ctxt, transfer_id, execution_id) if execution.status in constants.ACTIVE_EXECUTION_STATUSES: - raise exception.InvalidMigrationState( - "Cannot delete execution '%s' for Replica '%s' as it is " + raise exception.InvalidActionTasksExecutionState( + "Cannot delete execution '%s' for Transfer '%s' as it is " "currently in '%s' state." % ( - execution_id, replica_id, execution.status)) + execution_id, transfer_id, execution.status)) db_api.delete_transfer_tasks_execution(ctxt, execution_id) @tasks_execution_synchronized - def cancel_replica_tasks_execution(self, ctxt, replica_id, execution_id, - force): - execution = self._get_replica_tasks_execution( - ctxt, replica_id, execution_id) + def cancel_transfer_tasks_execution(self, ctxt, transfer_id, execution_id, + force): + execution = self._get_transfer_tasks_execution( + ctxt, transfer_id, execution_id) if execution.status not in constants.ACTIVE_EXECUTION_STATUSES: - raise exception.InvalidReplicaState( - "Replica '%s' has no running execution to cancel." % ( - replica_id)) + raise exception.InvalidTransferState( + "Transfer '%s' has no running execution to cancel." % ( + transfer_id)) if execution.status == constants.EXECUTION_STATUS_CANCELLING and ( not force): - raise exception.InvalidReplicaState( - "Replica '%s' is already being cancelled. Please use the " + raise exception.InvalidTransferState( + "Transfer '%s' is already being cancelled. Please use the " "force option if you'd like to force-cancel it." % ( - replica_id)) + transfer_id)) self._cancel_tasks_execution(ctxt, execution, force=force) - def _get_replica_tasks_execution(self, ctxt, replica_id, execution_id, - include_task_info=False, to_dict=False): + @staticmethod + def _get_transfer_tasks_execution(ctxt, transfer_id, execution_id, + include_task_info=False, to_dict=False): execution = db_api.get_transfer_tasks_execution( - ctxt, replica_id, execution_id, + ctxt, transfer_id, execution_id, include_task_info=include_task_info, to_dict=to_dict) if not execution: raise exception.NotFound( - "Execution with ID '%s' for Replica '%s' not found." % ( - execution_id, replica_id)) + "Execution with ID '%s' for Transfer '%s' not found." % ( + execution_id, transfer_id)) return execution - def get_replicas(self, ctxt, include_tasks_executions=False, - include_task_info=False): + @staticmethod + def get_transfers(ctxt, include_tasks_executions=False, + include_task_info=False): return db_api.get_transfers( ctxt, include_tasks_executions, include_task_info=include_task_info, to_dict=True) - @replica_synchronized - def get_replica(self, ctxt, replica_id, include_task_info=False): - return self._get_replica( - ctxt, replica_id, + @transfer_synchronized + def get_transfer(self, ctxt, transfer_id, include_task_info=False): + return self._get_transfer( + ctxt, transfer_id, include_task_info=include_task_info, to_dict=True) - @replica_synchronized - def delete_replica(self, ctxt, replica_id): - replica = self._get_replica(ctxt, replica_id) - self._check_replica_running_executions(ctxt, replica) - self._check_delete_reservation_for_transfer(replica) - db_api.delete_transfer(ctxt, replica_id) + @transfer_synchronized + def delete_transfer(self, ctxt, transfer_id): + transfer = self._get_transfer(ctxt, transfer_id) + self._check_transfer_running_executions(ctxt, transfer) + self._check_delete_reservation_for_transfer(transfer) + db_api.delete_transfer(ctxt, transfer_id) - @replica_synchronized - def delete_replica_disks(self, ctxt, replica_id): - replica = self._get_replica(ctxt, replica_id, include_task_info=True) - self._check_replica_running_executions(ctxt, replica) + @transfer_synchronized + def delete_transfer_disks(self, ctxt, transfer_id): + transfer = self._get_transfer( + ctxt, transfer_id, include_task_info=True) + self._check_transfer_running_executions(ctxt, transfer) execution = models.TasksExecution() execution.id = str(uuid.uuid4()) execution.status = constants.EXECUTION_STATUS_UNEXECUTED - execution.action = replica - execution.type = constants.EXECUTION_TYPE_REPLICA_DISKS_DELETE + execution.action = transfer + execution.type = constants.EXECUTION_TYPE_TRANSFER_DISKS_DELETE has_tasks = False - for instance in replica.instances: - if (instance in replica.info and ( - replica.info[instance].get('volumes_info'))): + for instance in transfer.instances: + if (instance in transfer.info and ( + transfer.info[instance].get('volumes_info'))): source_del_task = self._create_task( instance, - constants.TASK_TYPE_DELETE_REPLICA_SOURCE_DISK_SNAPSHOTS, + constants.TASK_TYPE_DELETE_TRANSFER_SOURCE_DISK_SNAPSHOTS, execution) self._create_task( - instance, constants.TASK_TYPE_DELETE_REPLICA_DISKS, + instance, constants.TASK_TYPE_DELETE_TRANSFER_DISKS, execution, depends_on=[source_del_task.id]) has_tasks = True if not has_tasks: - raise exception.InvalidReplicaState( - "Replica '%s' does not have volumes information for any " - "instances. Ensure that the replica has been executed " - "successfully priorly" % replica_id) + raise exception.InvalidTransferState( + "Transfer '%s' does not have volumes information for any " + "instances. Ensure that the transfer has been executed " + "successfully priorly" % transfer_id) # ensure we're passing the updated target-env options on the - # parent Replica itself in case of a Replica update: - dest_env = copy.deepcopy(replica.destination_environment) - dest_env['network_map'] = replica.network_map - dest_env['storage_mappings'] = replica.storage_mappings - for instance in replica.instances: - replica.info[instance].update({ + # parent Transfer itself in case of a Transfer update: + dest_env = copy.deepcopy(transfer.destination_environment) + dest_env['network_map'] = transfer.network_map + dest_env['storage_mappings'] = transfer.storage_mappings + for instance in transfer.instances: + transfer.info[instance].update({ "target_environment": dest_env}) - self._check_execution_tasks_sanity(execution, replica.info) + self._check_execution_tasks_sanity(execution, transfer.info) - # update the action info for all of the Replicas' instances: - for instance in replica.instances: + # update the action info for all of the Transfers' instances: + for instance in transfer.instances: db_api.update_transfer_action_info_for_instance( - ctxt, replica.id, instance, replica.info[instance]) + ctxt, transfer.id, instance, transfer.info[instance]) db_api.add_transfer_tasks_execution(ctxt, execution) - LOG.info("Replica tasks execution created: %s", execution.id) + LOG.info("Transfer tasks execution created: %s", execution.id) - self._begin_tasks(ctxt, replica, execution) - return self.get_replica_tasks_execution( - ctxt, replica_id, execution.id) + self._begin_tasks(ctxt, transfer, execution) + return self.get_transfer_tasks_execution( + ctxt, transfer_id, execution.id) @staticmethod def _check_endpoints(ctxt, origin_endpoint, destination_endpoint): @@ -1258,77 +1251,71 @@ def _check_endpoints(ctxt, origin_endpoint, destination_endpoint): destination_endpoint.connection_info)): raise exception.SameDestination() - def create_instances_replica(self, ctxt, replica_scenario, - origin_endpoint_id, - destination_endpoint_id, - origin_minion_pool_id, - destination_minion_pool_id, - instance_osmorphing_minion_pool_mappings, - source_environment, - destination_environment, instances, - network_map, storage_mappings, notes=None, - user_scripts=None): + def create_instances_transfer(self, ctxt, transfer_scenario, + origin_endpoint_id, + destination_endpoint_id, + origin_minion_pool_id, + destination_minion_pool_id, + instance_osmorphing_minion_pool_mappings, + source_environment, + destination_environment, instances, + network_map, storage_mappings, notes=None, + user_scripts=None): supported_scenarios = [ - constants.REPLICA_SCENARIO_REPLICA, - constants.REPLICA_SCENARIO_LIVE_MIGRATION] - if replica_scenario not in supported_scenarios: + constants.TRANSFER_SCENARIO_REPLICA, + constants.TRANSFER_SCENARIO_LIVE_MIGRATION] + if transfer_scenario not in supported_scenarios: raise exception.InvalidInput( - message=f"Unsupported Replica scenario '{replica_scenario}'. " - f"Must be one of: {supported_scenarios}") + message=f"Unsupported Transfer scenario '{transfer_scenario}'." + f" Must be one of: {supported_scenarios}") origin_endpoint = self.get_endpoint(ctxt, origin_endpoint_id) destination_endpoint = self.get_endpoint( ctxt, destination_endpoint_id) self._check_endpoints(ctxt, origin_endpoint, destination_endpoint) - replica = models.Transfer() - replica.id = str(uuid.uuid4()) - replica.base_id = replica.id - replica.scenario = replica_scenario - replica.origin_endpoint_id = origin_endpoint_id - replica.origin_minion_pool_id = origin_minion_pool_id - replica.destination_endpoint_id = destination_endpoint_id - replica.destination_minion_pool_id = destination_minion_pool_id - replica.destination_environment = destination_environment - replica.source_environment = source_environment - replica.last_execution_status = constants.EXECUTION_STATUS_UNEXECUTED - replica.instances = instances - replica.executions = [] - replica.info = {instance: { + transfer = models.Transfer() + transfer.id = str(uuid.uuid4()) + transfer.base_id = transfer.id + transfer.scenario = transfer_scenario + transfer.origin_endpoint_id = origin_endpoint_id + transfer.origin_minion_pool_id = origin_minion_pool_id + transfer.destination_endpoint_id = destination_endpoint_id + transfer.destination_minion_pool_id = destination_minion_pool_id + transfer.destination_environment = destination_environment + transfer.source_environment = source_environment + transfer.last_execution_status = constants.EXECUTION_STATUS_UNEXECUTED + transfer.instances = instances + transfer.executions = [] + transfer.info = {instance: { 'volumes_info': []} for instance in instances} - replica.notes = notes - replica.network_map = network_map - replica.storage_mappings = storage_mappings - replica.instance_osmorphing_minion_pool_mappings = ( + transfer.notes = notes + transfer.network_map = network_map + transfer.storage_mappings = storage_mappings + transfer.instance_osmorphing_minion_pool_mappings = ( instance_osmorphing_minion_pool_mappings) - replica.user_scripts = user_scripts or {} + transfer.user_scripts = user_scripts or {} - self._check_minion_pools_for_action(ctxt, replica) + self._check_minion_pools_for_action(ctxt, transfer) - self._create_reservation_for_replica(replica) + self._create_reservation_for_transfer(transfer) - db_api.add_transfer(ctxt, replica) - LOG.info("Replica created: %s", replica.id) - return self.get_replica(ctxt, replica.id) + db_api.add_transfer(ctxt, transfer) + LOG.info("Transfer created: %s", transfer.id) + return self.get_transfer(ctxt, transfer.id) - def _get_replica(self, ctxt, replica_id, include_task_info=False, - to_dict=False): - replica = db_api.get_transfer( - ctxt, replica_id, include_task_info=include_task_info, + def _get_transfer(self, ctxt, transfer_id, include_task_info=False, + to_dict=False): + transfer = db_api.get_transfer( + ctxt, transfer_id, include_task_info=include_task_info, to_dict=to_dict) - if not replica: + if not transfer: raise exception.NotFound( - "Replica with ID '%s' not found." % replica_id) - return replica + "Transfer with ID '%s' not found." % transfer_id) + return transfer - @migration_synchronized - def get_migration(self, ctxt, migration_id, include_task_info=False): - return self._get_migration( - ctxt, migration_id, include_task_info=include_task_info, - to_dict=True) - - def get_deployments(self, ctxt, include_tasks, - include_task_info=False): + @staticmethod + def get_deployments(ctxt, include_tasks, include_task_info=False): return db_api.get_deployments( ctxt, include_tasks, include_task_info=include_task_info, @@ -1336,17 +1323,17 @@ def get_deployments(self, ctxt, include_tasks, @deployment_synchronized def get_deployment(self, ctxt, deployment_id, include_task_info=False): - return self._get_migration( + return self._get_deployment( ctxt, deployment_id, include_task_info=include_task_info, to_dict=True) @staticmethod - def _check_running_replica_migrations(ctxt, replica_id): - migrations = db_api.get_transfer_deployments(ctxt, replica_id) - if [m.id for m in migrations if m.executions[0].status in ( + def _check_running_transfer_deployments(ctxt, transfer_id): + deployments = db_api.get_transfer_deployments(ctxt, transfer_id) + if [m.id for m in deployments if m.executions[0].status in ( constants.ACTIVE_EXECUTION_STATUSES)]: - raise exception.InvalidReplicaState( - "Transfer '%s' is currently being deployed" % replica_id) + raise exception.InvalidTransferState( + "Transfer '%s' is currently being deployed" % transfer_id) @staticmethod def _check_running_executions(action): @@ -1358,25 +1345,25 @@ def _check_running_executions(action): "Another tasks execution is in progress: %s" % ( running_executions)) - def _check_replica_running_executions(self, ctxt, replica): - self._check_running_executions(replica) - self._check_running_replica_migrations(ctxt, replica.id) + def _check_transfer_running_executions(self, ctxt, transfer): + self._check_running_executions(transfer) + self._check_running_transfer_deployments(ctxt, transfer.id) @staticmethod - def _check_valid_replica_tasks_execution(replica, force=False): + def _check_valid_transfer_tasks_execution(transfer, force=False): sorted_executions = sorted( - replica.executions, key=lambda e: e.number, reverse=True) + transfer.executions, key=lambda e: e.number, reverse=True) if not sorted_executions: - raise exception.InvalidReplicaState( - "The Replica has never been executed.") + raise exception.InvalidTransferState( + "The Transfer has never been executed.") if not [e for e in sorted_executions - if e.type == constants.EXECUTION_TYPE_REPLICA_EXECUTION and ( + if e.type == constants.EXECUTION_TYPE_TRANSFER_EXECUTION and ( e.status == constants.EXECUTION_STATUS_COMPLETED)]: if not force: - raise exception.InvalidReplicaState( - "A replica must have been executed successfully at least " - "once in order to be migrated") + raise exception.InvalidTransferState( + "A transfer must have been executed successfully at least " + "once in order to be deployed") def _get_provider_types(self, ctxt, endpoint): provider_types = self.get_available_providers(ctxt).get(endpoint.type) @@ -1385,84 +1372,85 @@ def _get_provider_types(self, ctxt, endpoint): "No provider found for: %s" % endpoint.type) return provider_types["types"] - @replica_synchronized - def deploy_replica_instances( - self, ctxt, replica_id, clone_disks, force, + @transfer_synchronized + def deploy_transfer_instances( + self, ctxt, transfer_id, clone_disks, force, instance_osmorphing_minion_pool_mappings=None, skip_os_morphing=False, user_scripts=None): - replica = self._get_replica(ctxt, replica_id, include_task_info=True) - self._check_replica_running_executions(ctxt, replica) - self._check_valid_replica_tasks_execution(replica, force) - user_scripts = user_scripts or replica.user_scripts + transfer = self._get_transfer( + ctxt, transfer_id, include_task_info=True) + self._check_transfer_running_executions(ctxt, transfer) + self._check_valid_transfer_tasks_execution(transfer, force) + user_scripts = user_scripts or transfer.user_scripts destination_endpoint = self.get_endpoint( - ctxt, replica.destination_endpoint_id) + ctxt, transfer.destination_endpoint_id) destination_provider_types = self._get_provider_types( ctxt, destination_endpoint) - for instance, info in replica.info.items(): + for instance, info in transfer.info.items(): if not info.get("volumes_info"): - raise exception.InvalidReplicaState( - "The replica doesn't contain volumes information for " - "instance: %s. If replicated disks are deleted, the " - "replica needs to be executed anew before a migration can " - "occur" % instance) - - instances = replica.instances - - migration = models.Deployment() - migration.id = str(uuid.uuid4()) - migration.base_id = migration.id - migration.origin_endpoint_id = replica.origin_endpoint_id - migration.destination_endpoint_id = replica.destination_endpoint_id + raise exception.InvalidTransferState( + "The transfer doesn't contain volumes information for " + "instance: %s. If transferred disks are deleted, the " + "transfer needs to be executed anew before a deployment" + " can occur" % instance) + + instances = transfer.instances + + deployment = models.Deployment() + deployment.id = str(uuid.uuid4()) + deployment.base_id = deployment.id + deployment.origin_endpoint_id = transfer.origin_endpoint_id + deployment.destination_endpoint_id = transfer.destination_endpoint_id # TODO(aznashwan): have these passed separately to the relevant # provider methods instead of through the dest-env: - dest_env = copy.deepcopy(replica.destination_environment) - dest_env['network_map'] = replica.network_map - dest_env['storage_mappings'] = replica.storage_mappings - migration.destination_environment = dest_env - migration.source_environment = replica.source_environment - migration.network_map = replica.network_map - migration.storage_mappings = replica.storage_mappings - migration.instances = instances - migration.replica = replica - migration.info = replica.info - migration.notes = replica.notes - migration.user_scripts = user_scripts - # NOTE: Migrations-from-Replica have no use for the source/target - # pools of the parent Replica so these can be omitted: - migration.origin_minion_pool_id = None - migration.destination_minion_pool_id = None - migration.instance_osmorphing_minion_pool_mappings = ( - replica.instance_osmorphing_minion_pool_mappings) + dest_env = copy.deepcopy(transfer.destination_environment) + dest_env['network_map'] = transfer.network_map + dest_env['storage_mappings'] = transfer.storage_mappings + deployment.destination_environment = dest_env + deployment.source_environment = transfer.source_environment + deployment.network_map = transfer.network_map + deployment.storage_mappings = transfer.storage_mappings + deployment.instances = instances + deployment.transfer = transfer + deployment.info = transfer.info + deployment.notes = transfer.notes + deployment.user_scripts = user_scripts + # NOTE: Deployments have no use for the source/target + # pools of the parent Transfer so these can be omitted: + deployment.origin_minion_pool_id = None + deployment.destination_minion_pool_id = None + deployment.instance_osmorphing_minion_pool_mappings = ( + transfer.instance_osmorphing_minion_pool_mappings) if instance_osmorphing_minion_pool_mappings: - migration.instance_osmorphing_minion_pool_mappings.update( + deployment.instance_osmorphing_minion_pool_mappings.update( instance_osmorphing_minion_pool_mappings) - self._check_minion_pools_for_action(ctxt, migration) - self._check_reservation_for_replica(replica) + self._check_minion_pools_for_action(ctxt, deployment) + self._check_reservation_for_transfer(transfer) execution = models.TasksExecution() - migration.executions = [execution] + deployment.executions = [execution] execution.status = constants.EXECUTION_STATUS_UNEXECUTED execution.number = 1 - execution.type = constants.EXECUTION_TYPE_REPLICA_DEPLOY + execution.type = constants.EXECUTION_TYPE_DEPLOYMENT for instance in instances: - migration.info[instance]["clone_disks"] = clone_disks + deployment.info[instance]["clone_disks"] = clone_disks scripts = self._get_instance_scripts(user_scripts, instance) - migration.info[instance]["user_scripts"] = scripts + deployment.info[instance]["user_scripts"] = scripts # NOTE: we default/convert the volumes info to an empty list # to preserve backwards-compatibility with older versions # of Coriolis dating before the scheduling overhaul (PR##114) - if instance not in migration.info: - migration.info[instance] = {'volumes_info': []} + if instance not in deployment.info: + deployment.info[instance] = {'volumes_info': []} # NOTE: we update all of the param values before triggering an - # execution to ensure that the params on the Replica are used - # in case there was a failed Replica update (where the new values + # execution to ensure that the params on the Transfer are used + # in case there was a failed Transfer update (where the new values # could be in the `.info` field instead of the old ones) - migration.info[instance].update({ - "source_environment": migration.source_environment, + deployment.info[instance].update({ + "source_environment": deployment.source_environment, "target_environment": dest_env}) # TODO(aznashwan): have these passed separately to the relevant # provider methods (they're currently passed directly inside @@ -1470,20 +1458,20 @@ def deploy_replica_instances( # "network_map": network_map, # "storage_mappings": storage_mappings, - validate_replica_deployment_inputs_task = self._create_task( + validate_transfer_deployment_inputs_task = self._create_task( instance, - constants.TASK_TYPE_VALIDATE_REPLICA_DEPLOYMENT_INPUTS, + constants.TASK_TYPE_VALIDATE_DEPLOYMENT_INPUTS, execution) validate_osmorphing_minion_task = None - last_validation_task = validate_replica_deployment_inputs_task + last_validation_task = validate_transfer_deployment_inputs_task if not skip_os_morphing and instance in ( - migration.instance_osmorphing_minion_pool_mappings): + deployment.instance_osmorphing_minion_pool_mappings): # NOTE: these values are required for the # _check_execution_tasks_sanity call but # will be populated later when the pool # allocations actually happen: - migration.info[instance].update({ + deployment.info[instance].update({ "osmorphing_minion_machine_id": None, "osmorphing_minion_provider_properties": None, "osmorphing_minion_connection_info": None}) @@ -1491,27 +1479,27 @@ def deploy_replica_instances( instance, constants.TASK_TYPE_VALIDATE_OSMORPHING_MINION_POOL_COMPATIBILITY, # noqa: E501 execution, depends_on=[ - validate_replica_deployment_inputs_task.id]) + validate_transfer_deployment_inputs_task.id]) last_validation_task = validate_osmorphing_minion_task create_snapshot_task = self._create_task( - instance, constants.TASK_TYPE_CREATE_REPLICA_DISK_SNAPSHOTS, + instance, constants.TASK_TYPE_CREATE_TRANSFER_DISK_SNAPSHOTS, execution, depends_on=[ last_validation_task.id]) - deploy_replica_task = self._create_task( + deploy_transfer_task = self._create_task( instance, - constants.TASK_TYPE_DEPLOY_REPLICA_INSTANCE_RESOURCES, + constants.TASK_TYPE_DEPLOY_INSTANCE_RESOURCES, execution, depends_on=[create_snapshot_task.id]) - depends_on = [deploy_replica_task.id] + depends_on = [deploy_transfer_task.id] if not skip_os_morphing: task_deploy_os_morphing_resources = None attach_osmorphing_minion_volumes_task = None last_osmorphing_resources_deployment_task = None if instance in ( - migration.instance_osmorphing_minion_pool_mappings): + deployment.instance_osmorphing_minion_pool_mappings): osmorphing_vol_attachment_deps = [ validate_osmorphing_minion_task.id] osmorphing_vol_attachment_deps.extend(depends_on) @@ -1545,7 +1533,7 @@ def deploy_replica_instances( depends_on = [task_osmorphing.id] if instance in ( - migration.instance_osmorphing_minion_pool_mappings): + deployment.instance_osmorphing_minion_pool_mappings): detach_osmorphing_minion_volumes_task = self._create_task( instance, constants.TASK_TYPE_DETACH_VOLUMES_FROM_OSMORPHING_MINION, # noqa: E501 @@ -1581,13 +1569,13 @@ def deploy_replica_instances( finalize_deployment_task = self._create_task( instance, - constants.TASK_TYPE_FINALIZE_REPLICA_INSTANCE_DEPLOYMENT, + constants.TASK_TYPE_FINALIZE_INSTANCE_DEPLOYMENT, execution, depends_on=depends_on) self._create_task( instance, - constants.TASK_TYPE_DELETE_REPLICA_TARGET_DISK_SNAPSHOTS, + constants.TASK_TYPE_DELETE_TRANSFER_TARGET_DISK_SNAPSHOTS, execution, depends_on=[ create_snapshot_task.id, finalize_deployment_task.id], @@ -1595,43 +1583,43 @@ def deploy_replica_instances( cleanup_deployment_task = self._create_task( instance, - constants.TASK_TYPE_CLEANUP_FAILED_REPLICA_INSTANCE_DEPLOYMENT, + constants.TASK_TYPE_CLEANUP_FAILED_INSTANCE_DEPLOYMENT, execution, depends_on=[ - deploy_replica_task.id, + deploy_transfer_task.id, finalize_deployment_task.id], on_error_only=True) if not clone_disks: self._create_task( instance, - constants.TASK_TYPE_RESTORE_REPLICA_DISK_SNAPSHOTS, + constants.TASK_TYPE_RESTORE_TRANSFER_DISK_SNAPSHOTS, execution, depends_on=[cleanup_deployment_task.id], on_error=True) - self._check_execution_tasks_sanity(execution, migration.info) - db_api.add_deployment(ctxt, migration) - LOG.info("Migration created: %s", migration.id) + self._check_execution_tasks_sanity(execution, deployment.info) + db_api.add_deployment(ctxt, deployment) + LOG.info("Deployment created: %s", deployment.id) if not skip_os_morphing and ( - migration.instance_osmorphing_minion_pool_mappings): - # NOTE: we lock on the migration ID to ensure the minion + deployment.instance_osmorphing_minion_pool_mappings): + # NOTE: we lock on the deployment ID to ensure the minion # allocation confirmations don't come in too early: with lockutils.lock( - constants.MIGRATION_LOCK_NAME_FORMAT % migration.id, + constants.DEPLOYMENT_LOCK_NAME_FORMAT % deployment.id, external=True): (self._minion_manager_client - .allocate_minion_machines_for_migration( - ctxt, migration, include_transfer_minions=False, + .allocate_minion_machines_for_deployment( + ctxt, deployment, include_transfer_minions=False, include_osmorphing_minions=True)) self._set_tasks_execution_status( ctxt, execution, constants.EXECUTION_STATUS_AWAITING_MINION_ALLOCATIONS) else: - self._begin_tasks(ctxt, migration, execution) + self._begin_tasks(ctxt, deployment, execution) - return self.get_migration(ctxt, migration.id) + return self.get_deployment(ctxt, deployment.id) def _get_instance_scripts(self, user_scripts, instance): user_scripts = user_scripts or {} @@ -1710,162 +1698,164 @@ def _update_task_info_for_minion_allocations( db_api.update_transfer_action_info_for_instance( ctxt, action.id, instance, action.info[instance]) - def _get_last_execution_for_replica(self, ctxt, replica, requery=False): + def _get_last_execution_for_transfer(self, ctxt, transfer, requery=False): if requery: - replica = self._get_replica(ctxt, replica.id) - last_replica_execution = None - if not replica.executions: - raise exception.InvalidReplicaState( - "Replica with ID '%s' has no existing Replica " - "executions." % (replica.id)) - last_replica_execution = sorted( - replica.executions, key=lambda e: e.number)[-1] - return last_replica_execution - - def _get_execution_for_migration(self, ctxt, migration, requery=False): + transfer = self._get_transfer(ctxt, transfer.id) + last_transfer_execution = None + if not transfer.executions: + raise exception.InvalidTransferState( + "Transfer with ID '%s' has no existing Trasnfer " + "executions." % transfer.id) + last_transfer_execution = sorted( + transfer.executions, key=lambda e: e.number)[-1] + return last_transfer_execution + + def _get_execution_for_deployment(self, ctxt, deployment, requery=False): if requery: - migration = self._get_migration(ctxt, migration.id) - - if not migration.executions: - raise exception.InvalidMigrationState( - "Migration with ID '%s' has no existing executions." % ( - migration.id)) - if len(migration.executions) > 1: - raise exception.InvalidMigrationState( - "Migration with ID '%s' has more than one execution:" - " %s" % (migration.id, [e.id for e in migration.executions])) - return migration.executions[0] - - @replica_synchronized - def confirm_replica_minions_allocation( - self, ctxt, replica_id, minion_machine_allocations): - replica = self._get_replica(ctxt, replica_id, include_task_info=True) + deployment = self._get_deployment(ctxt, deployment.id) + + if not deployment.executions: + raise exception.InvalidDeploymentState( + "Deployment with ID '%s' has no existing executions." % ( + deployment.id)) + if len(deployment.executions) > 1: + raise exception.InvalidDeploymentState( + "Deployment with ID '%s' has more than one execution:" + " %s" % (deployment.id, [e.id for e in deployment.executions])) + return deployment.executions[0] + + @transfer_synchronized + def confirm_transfer_minions_allocation( + self, ctxt, transfer_id, minion_machine_allocations): + transfer = self._get_transfer( + ctxt, transfer_id, include_task_info=True) awaiting_minions_status = ( constants.EXECUTION_STATUS_AWAITING_MINION_ALLOCATIONS) - if replica.last_execution_status != awaiting_minions_status: - raise exception.InvalidReplicaState( - "Replica is in '%s' status instead of the expected '%s' to " + if transfer.last_execution_status != awaiting_minions_status: + raise exception.InvalidTransferState( + "Transfer is in '%s' status instead of the expected '%s' to " "have minion machines allocated for it." % ( - replica.last_execution_status, awaiting_minions_status)) + transfer.last_execution_status, awaiting_minions_status)) - last_replica_execution = self._get_last_execution_for_replica( - ctxt, replica, requery=False) + last_transfer_execution = self._get_last_execution_for_transfer( + ctxt, transfer, requery=False) self._update_task_info_for_minion_allocations( - ctxt, replica, minion_machine_allocations) + ctxt, transfer, minion_machine_allocations) - last_replica_execution = db_api.get_transfer_tasks_execution( - ctxt, replica.id, last_replica_execution.id) + last_transfer_execution = db_api.get_transfer_tasks_execution( + ctxt, transfer.id, last_transfer_execution.id) self._begin_tasks( - ctxt, replica, last_replica_execution) + ctxt, transfer, last_transfer_execution) - @replica_synchronized - def report_replica_minions_allocation_error( - self, ctxt, replica_id, minion_allocation_error_details): - replica = self._get_replica(ctxt, replica_id) + @transfer_synchronized + def report_transfer_minions_allocation_error( + self, ctxt, transfer_id, minion_allocation_error_details): + transfer = self._get_transfer(ctxt, transfer_id) awaiting_minions_status = ( constants.EXECUTION_STATUS_AWAITING_MINION_ALLOCATIONS) - if replica.last_execution_status != awaiting_minions_status: - raise exception.InvalidReplicaState( - "Replica is in '%s' status instead of the expected '%s' to " + if transfer.last_execution_status != awaiting_minions_status: + raise exception.InvalidTransferState( + "Transfer is in '%s' status instead of the expected '%s' to " "have minion machines allocations fail for it." % ( - replica.last_execution_status, awaiting_minions_status)) + transfer.last_execution_status, awaiting_minions_status)) - last_replica_execution = self._get_last_execution_for_replica( - ctxt, replica, requery=False) + last_transfer_execution = self._get_last_execution_for_transfer( + ctxt, transfer, requery=False) LOG.warn( - "Error occured while allocating minion machines for Replica '%s'. " - "Cancelling the current Replica Execution ('%s'). Error was: %s", - replica_id, last_replica_execution.id, + "Error occurred while allocating minion machines for Transfer " + "'%s'. Cancelling the current Transfer Execution ('%s'). " + "Error was: %s", + transfer_id, last_transfer_execution.id, minion_allocation_error_details) self._cancel_tasks_execution( - ctxt, last_replica_execution, requery=True) + ctxt, last_transfer_execution, requery=True) self._set_tasks_execution_status( - ctxt, last_replica_execution, + ctxt, last_transfer_execution, constants.EXECUTION_STATUS_ERROR_ALLOCATING_MINIONS) - @migration_synchronized - def confirm_migration_minions_allocation( - self, ctxt, migration_id, minion_machine_allocations): - migration = self._get_migration( - ctxt, migration_id, include_task_info=True) + @deployment_synchronized + def confirm_deployment_minions_allocation( + self, ctxt, deployment_id, minion_machine_allocations): + deployment = self._get_deployment( + ctxt, deployment_id, include_task_info=True) awaiting_minions_status = ( constants.EXECUTION_STATUS_AWAITING_MINION_ALLOCATIONS) - if migration.last_execution_status != awaiting_minions_status: - raise exception.InvalidMigrationState( - "Migration is in '%s' status instead of the expected '%s' to " + if deployment.last_execution_status != awaiting_minions_status: + raise exception.InvalidDeploymentState( + "Deployment is in '%s' status instead of the expected '%s' to " "have minion machines allocated for it." % ( - migration.last_execution_status, awaiting_minions_status)) + deployment.last_execution_status, awaiting_minions_status)) - execution = self._get_execution_for_migration( - ctxt, migration, requery=False) + execution = self._get_execution_for_deployment( + ctxt, deployment, requery=False) self._update_task_info_for_minion_allocations( - ctxt, migration, minion_machine_allocations) - self._begin_tasks(ctxt, migration, execution) + ctxt, deployment, minion_machine_allocations) + self._begin_tasks(ctxt, deployment, execution) - @migration_synchronized - def report_migration_minions_allocation_error( - self, ctxt, migration_id, minion_allocation_error_details): - migration = self._get_migration(ctxt, migration_id) + @deployment_synchronized + def report_deployment_minions_allocation_error( + self, ctxt, deployment_id, minion_allocation_error_details): + deployment = self._get_deployment(ctxt, deployment_id) awaiting_minions_status = ( constants.EXECUTION_STATUS_AWAITING_MINION_ALLOCATIONS) - if migration.last_execution_status != awaiting_minions_status: - raise exception.InvalidMigrationState( - "Migration is in '%s' status instead of the expected '%s' to " + if deployment.last_execution_status != awaiting_minions_status: + raise exception.InvalidDeploymentState( + "Deployment is in '%s' status instead of the expected '%s' to " "have minion machines allocations fail for it." % ( - migration.last_execution_status, awaiting_minions_status)) + deployment.last_execution_status, awaiting_minions_status)) - execution = self._get_execution_for_migration( - ctxt, migration, requery=False) + execution = self._get_execution_for_deployment( + ctxt, deployment, requery=False) LOG.warn( "Error occured while allocating minion machines for " - "Migration '%s'. Cancelling the current Execution ('%s'). " + "Deployment '%s'. Cancelling the current Execution ('%s'). " "Error was: %s", - migration_id, execution.id, minion_allocation_error_details) + deployment_id, execution.id, minion_allocation_error_details) self._cancel_tasks_execution( ctxt, execution, requery=True) self._set_tasks_execution_status( ctxt, execution, constants.EXECUTION_STATUS_ERROR_ALLOCATING_MINIONS) - def _get_migration(self, ctxt, migration_id, include_task_info=False, - to_dict=False): - migration = db_api.get_deployment( - ctxt, migration_id, include_task_info=include_task_info, + def _get_deployment(self, ctxt, deployment_id, include_task_info=False, + to_dict=False): + deployment = db_api.get_deployment( + ctxt, deployment_id, include_task_info=include_task_info, to_dict=to_dict) - if not migration: + if not deployment: raise exception.NotFound( - "Migration with ID '%s' not found." % migration_id) - return migration + "Deployment with ID '%s' not found." % deployment_id) + return deployment - def _delete_migration(self, ctxt, migration_id): - migration = self._get_migration(ctxt, migration_id) - execution = migration.executions[0] + def _delete_deployment(self, ctxt, deployment_id): + deployment = self._get_deployment(ctxt, deployment_id) + execution = deployment.executions[0] if execution.status in constants.ACTIVE_EXECUTION_STATUSES: - raise exception.InvalidMigrationState( - "Cannot delete Migration '%s' as it is currently in " - "'%s' state." % (migration_id, execution.status)) - db_api.delete_deployment(ctxt, migration_id) + raise exception.InvalidDeploymentState( + "Cannot delete Deployment '%s' as it is currently in " + "'%s' state." % (deployment_id, execution.status)) + db_api.delete_deployment(ctxt, deployment_id) @deployment_synchronized def delete_deployment(self, ctxt, deployment_id): - self._delete_migration(ctxt, deployment_id) - - def _cancel_migration(self, ctxt, migration_id, force): - migration = self._get_migration(ctxt, migration_id) - if len(migration.executions) != 1: - raise exception.InvalidMigrationState( - "Migration '%s' has in improper number of tasks " - "executions: %d" % (migration_id, len(migration.executions))) - execution = migration.executions[0] + self._delete_deployment(ctxt, deployment_id) + + def _cancel_deployment(self, ctxt, deployment_id, force): + deployment = self._get_deployment(ctxt, deployment_id) + if len(deployment.executions) != 1: + raise exception.InvalidDeploymentState( + "Deployment '%s' has in improper number of tasks " + "executions: %d" % (deployment_id, len(deployment.executions))) + execution = deployment.executions[0] if execution.status not in constants.ACTIVE_EXECUTION_STATUSES: - raise exception.InvalidMigrationState( - "Migration '%s' is not currently running" % migration_id) + raise exception.InvalidDeploymentState( + "Deployment '%s' is not currently running" % deployment_id) if execution.status == constants.EXECUTION_STATUS_CANCELLING and ( not force): - raise exception.InvalidMigrationState( - "Migration '%s' is already being cancelled. Please use the " + raise exception.InvalidDeploymentState( + "Deployment '%s' is already being cancelled. Please use the " "force option if you'd like to force-cancel it.") with lockutils.lock( @@ -1875,7 +1865,7 @@ def _cancel_migration(self, ctxt, migration_id, force): @deployment_synchronized def cancel_deployment(self, ctxt, deployment_id, force): - self._cancel_migration(ctxt, deployment_id, force) + self._cancel_deployment(ctxt, deployment_id, force) def _cancel_tasks_execution( self, ctxt, execution, requery=True, force=False): @@ -2052,50 +2042,50 @@ def _update_reservation_fulfillment_for_execution(self, ctxt, execution): """ Updates the reservation fulfillment status for the parent transfer action of the given execution based on its type. - Replica transfers are marked as fulfilled as soon as a Replica + Replica transfers are marked as fulfilled as soon as a Transfer Execution is successfully completed. Live migration transfers are marked as fulfilled as soon as they are deployed for the first (and only) time. """ if execution.type not in ( - constants.EXECUTION_TYPE_REPLICA_EXECUTION, - constants.EXECUTION_TYPE_REPLICA_DEPLOY): + constants.EXECUTION_TYPE_TRANSFER_EXECUTION, + constants.EXECUTION_TYPE_DEPLOYMENT): LOG.debug( f"Skipping setting reservation fulfillment for execution " f"'{execution.id}' of type '{execution.type}'.") return if execution.type not in ( - constants.EXECUTION_TYPE_REPLICA_EXECUTION, - constants.EXECUTION_TYPE_REPLICA_DEPLOY): + constants.EXECUTION_TYPE_TRANSFER_EXECUTION, + constants.EXECUTION_TYPE_DEPLOYMENT): LOG.debug( - f"Skipping setting replica fulfillment for execution " + f"Skipping setting transfer fulfillment for execution " f"'{execution.id}' of type '{execution.type}'.") return transfer_action = execution.action transfer_id = transfer_action.base_id - if transfer_action.type == constants.TRANSFER_ACTION_TYPE_MIGRATION: - deployment = self._get_migration(ctxt, transfer_id) + if transfer_action.type == constants.TRANSFER_ACTION_TYPE_DEPLOYMENT: + deployment = self._get_deployment(ctxt, transfer_id) transfer_id = deployment.transfer_id - transfer_action = self._get_replica( + transfer_action = self._get_transfer( ctxt, transfer_id, include_task_info=False) else: - transfer_action = self._get_replica( + transfer_action = self._get_transfer( ctxt, execution.action_id, include_task_info=False) scenario = transfer_action.scenario - if scenario == constants.REPLICA_SCENARIO_REPLICA and ( - execution.type == constants.EXECUTION_TYPE_REPLICA_EXECUTION): + if scenario == constants.TRANSFER_SCENARIO_REPLICA and ( + execution.type == constants.EXECUTION_TYPE_TRANSFER_EXECUTION): self._check_mark_reservation_fulfilled( transfer_action, must_unfulfilled=False) - elif scenario == constants.REPLICA_SCENARIO_LIVE_MIGRATION and ( - execution.type == constants.EXECUTION_TYPE_REPLICA_DEPLOY): + elif scenario == constants.TRANSFER_SCENARIO_LIVE_MIGRATION and ( + execution.type == constants.EXECUTION_TYPE_DEPLOYMENT): self._check_mark_reservation_fulfilled( transfer_action, must_unfulfilled=False) else: LOG.debug( - f"Skipping setting replica fulfillment for execution " + f"Skipping setting transfer fulfillment for execution " f"'{execution.id}' of type '{execution.type}' on parent" f"action {transfer_id} of scenario type " f"{transfer_action.scenario}.") @@ -2368,7 +2358,7 @@ def _advance_execution_state( requery=not requery) == ( constants.EXECUTION_STATUS_DEADLOCKED): LOG.error( - "Execution '%s' deadlocked even before Replica state " + "Execution '%s' deadlocked even before Transfer state " "advancement . Cleanup has been perfomed. Returning.", execution.id) return [] @@ -2597,9 +2587,9 @@ def _start_task(task): ctxt, execution, task_statuses=task_statuses) == ( constants.EXECUTION_STATUS_DEADLOCKED): LOG.error( - "Execution '%s' deadlocked after Replica state advancement" - ". Cleanup has been perfomed. Returning early.", - execution.id) + "Execution '%s' deadlocked after Transfer state " + "advancement. Cleanup has been performed. " + "Returning early.", execution.id) return [] LOG.debug( "No new tasks were started for execution '%s'", execution.id) @@ -2623,26 +2613,27 @@ def _start_task(task): return started_tasks - def _update_replica_volumes_info(self, ctxt, replica_id, instance, - updated_task_info): - """ WARN: the lock for the Replica must be pre-acquired. """ + @staticmethod + def _update_transfer_volumes_info(ctxt, transfer_id, instance, + updated_task_info): + """ WARN: the lock for the Transfer must be pre-acquired. """ db_api.update_transfer_action_info_for_instance( - ctxt, replica_id, instance, + ctxt, transfer_id, instance, updated_task_info) - def _update_volumes_info_for_migration_parent_replica( - self, ctxt, migration_id, instance, updated_task_info): - migration = db_api.get_deployment(ctxt, migration_id) - replica_id = migration.transfer_id + def _update_volumes_info_for_deployment_parent_transfer( + self, ctxt, deployment_id, instance, updated_task_info): + deployment = db_api.get_deployment(ctxt, deployment_id) + transfer_id = deployment.transfer_id with lockutils.lock( - constants.REPLICA_LOCK_NAME_FORMAT % replica_id, + constants.TRANSFER_LOCK_NAME_FORMAT % transfer_id, external=True): LOG.debug( - "Updating volume_info in replica due to snapshot " - "restore during migration. replica id: %s", replica_id) - self._update_replica_volumes_info( - ctxt, replica_id, instance, updated_task_info) + "Updating volume_info in transfer due to snapshot " + "restore during deployment. transfer id: %s", transfer_id) + self._update_transfer_volumes_info( + ctxt, transfer_id, instance, updated_task_info) def _handle_post_task_actions(self, ctxt, task, execution, task_info): task_type = task.task_type @@ -2657,11 +2648,11 @@ def _check_other_tasks_running(execution, current_task): break return still_running - if task_type == constants.TASK_TYPE_RESTORE_REPLICA_DISK_SNAPSHOTS: + if task_type == constants.TASK_TYPE_RESTORE_TRANSFER_DISK_SNAPSHOTS: # When restoring a snapshot in some import providers (OpenStack), # a new volume_id is generated. This needs to be updated in the - # Replica instance as well. + # Transfer instance as well. volumes_info = task_info.get('volumes_info') if not volumes_info: LOG.warn( @@ -2675,30 +2666,28 @@ def _check_other_tasks_running(execution, current_task): task.instance, execution.action_id, task.id, task_type, utils.sanitize_task_info( {'volumes_info': volumes_info})) - self._update_volumes_info_for_migration_parent_replica( + self._update_volumes_info_for_deployment_parent_transfer( ctxt, execution.action_id, task.instance, {"volumes_info": volumes_info}) elif task_type == ( - constants.TASK_TYPE_DELETE_REPLICA_TARGET_DISK_SNAPSHOTS): + constants.TASK_TYPE_DELETE_TRANSFER_TARGET_DISK_SNAPSHOTS): if not task_info.get("clone_disks"): - # The migration completed. If the replica is executed again, - # new volumes need to be deployed in place of the migrated + # The deployment completed. If the transfer is executed again, + # new volumes need to be created in place of the deployed # ones. LOG.info( - "Unsetting 'volumes_info' for instance '%s' in Replica " - "'%s' after completion of Replica task '%s' (type '%s') " - "with clone_disks=False.", + "Unsetting 'volumes_info' for instance '%s' in Transfer " + "'%s' after completion of Transfer task '%s' " + "(type '%s') with clone_disks=False.", task.instance, execution.action_id, task.id, task_type) - self._update_volumes_info_for_migration_parent_replica( + self._update_volumes_info_for_deployment_parent_transfer( ctxt, execution.action_id, task.instance, {"volumes_info": []}) - elif task_type in ( - constants.TASK_TYPE_FINALIZE_REPLICA_INSTANCE_DEPLOYMENT, - constants.TASK_TYPE_FINALIZE_INSTANCE_DEPLOYMENT): + elif task_type == constants.TASK_TYPE_FINALIZE_INSTANCE_DEPLOYMENT: # set 'transfer_result' in the 'base_transfer_action' # table if the task returned a result. if "transfer_result" in task_info: @@ -2724,30 +2713,30 @@ def _check_other_tasks_running(execution, current_task): "No 'transfer_result' was returned for task type '%s' " "for transfer action '%s'", task_type, execution.action_id) elif task_type in ( - constants.TASK_TYPE_UPDATE_SOURCE_REPLICA, - constants.TASK_TYPE_UPDATE_DESTINATION_REPLICA): + constants.TASK_TYPE_UPDATE_SOURCE_TRANSFER, + constants.TASK_TYPE_UPDATE_DESTINATION_TRANSFER): # NOTE: remember to update the `volumes_info`: # NOTE: considering this method is only called with a lock on the - # `execution.action_id` (in a Replica update tasks' case that's the - # ID of the Replica itself) we can safely call - # `_update_replica_volumes_info` below: - self._update_replica_volumes_info( + # `execution.action_id` (in a Transfer update tasks' case that's + # the ID of the Transfer itself) we can safely call + # `_update_transfer_volumes_info` below: + self._update_transfer_volumes_info( ctxt, execution.action_id, task.instance, {"volumes_info": task_info.get("volumes_info", [])}) - if task_type == constants.TASK_TYPE_UPDATE_DESTINATION_REPLICA: + if task_type == constants.TASK_TYPE_UPDATE_DESTINATION_TRANSFER: # check if this was the last task in the update execution: still_running = _check_other_tasks_running(execution, task) if not still_running: # it means this was the last update task in the Execution - # and we may safely update the params of the Replica + # and we may safely update the params of the Transfer # as they are in the DB: LOG.info( - "All tasks of the '%s' Replica update procedure have " + "All tasks of the '%s' Transfer update procedure have " "completed successfully. Setting the updated " - "parameter values on the parent Replica itself.", + "parameter values on the parent Transfer itself.", execution.action_id) - # NOTE: considering all the instances of the Replica get + # NOTE: considering all the instances of the Transfer get # the same params, it doesn't matter which instance's # update task finishes last: db_api.update_transfer( @@ -3077,10 +3066,6 @@ def confirm_task_cancellation(self, ctxt, task_id, cancellation_details): "confirmation of its cancellation.", task.id, task.status, final_status) execution = db_api.get_tasks_execution(ctxt, task.execution_id) - if execution.type == constants.EXECUTION_TYPE_MIGRATION: - action = db_api.get_action( - ctxt, execution.action_id, include_task_info=False) - self._check_delete_reservation_for_transfer(action) self._advance_execution_state(ctxt, execution, requery=False) @parent_tasks_execution_synchronized @@ -3183,7 +3168,7 @@ def set_task_error(self, ctxt, task_id, exception_details): "connection info. Original error was: %s" % ( exception_details))) LOG.warn( - "All subtasks for Migration '%s' have been cancelled " + "All subtasks for Deployment '%s' have been cancelled " "to allow for OSMorphing debugging. The connection " "info for the worker VM is: %s", action_id, action.info.get(task.instance, {}).get( @@ -3239,97 +3224,98 @@ def update_task_progress_update( ctxt, task_id, progress_update_index, new_current_step, new_total_steps=new_total_steps, new_message=new_message) - def _get_replica_schedule(self, ctxt, replica_id, - schedule_id, expired=True): + @staticmethod + def _get_transfer_schedule(ctxt, transfer_id, schedule_id, expired=True): schedule = db_api.get_transfer_schedule( - ctxt, replica_id, schedule_id, expired=expired) + ctxt, transfer_id, schedule_id, expired=expired) if not schedule: raise exception.NotFound( - "Schedule with ID '%s' for Replica '%s' not found." % ( - schedule_id, replica_id)) + "Schedule with ID '%s' for Transfer '%s' not found." % ( + schedule_id, transfer_id)) return schedule - def create_replica_schedule(self, ctxt, replica_id, - schedule, enabled, exp_date, - shutdown_instance): + def create_transfer_schedule(self, ctxt, transfer_id, + schedule, enabled, exp_date, + shutdown_instance): keystone.create_trust(ctxt) - replica = self._get_replica(ctxt, replica_id) - replica_schedule = models.TransferSchedule() - replica_schedule.id = str(uuid.uuid4()) - replica_schedule.transfer = replica - replica_schedule.transfer_id = replica_id - replica_schedule.schedule = schedule - replica_schedule.expiration_date = exp_date - replica_schedule.enabled = enabled - replica_schedule.shutdown_instance = shutdown_instance - replica_schedule.trust_id = ctxt.trust_id + transfer = self._get_transfer(ctxt, transfer_id) + transfer_schedule = models.TransferSchedule() + transfer_schedule.id = str(uuid.uuid4()) + transfer_schedule.transfer = transfer + transfer_schedule.transfer_id = transfer_id + transfer_schedule.schedule = schedule + transfer_schedule.expiration_date = exp_date + transfer_schedule.enabled = enabled + transfer_schedule.shutdown_instance = shutdown_instance + transfer_schedule.trust_id = ctxt.trust_id db_api.add_transfer_schedule( - ctxt, replica_schedule, - lambda ctxt, sched: self._replica_cron_client.register( + ctxt, transfer_schedule, + lambda ctxt, sched: self._transfer_cron_client.register( ctxt, sched)) - return self.get_replica_schedule( - ctxt, replica_id, replica_schedule.id) + return self.get_transfer_schedule( + ctxt, transfer_id, transfer_schedule.id) @schedule_synchronized - def update_replica_schedule(self, ctxt, replica_id, schedule_id, - updated_values): + def update_transfer_schedule(self, ctxt, transfer_id, schedule_id, + updated_values): db_api.update_transfer_schedule( - ctxt, replica_id, schedule_id, updated_values, None, - lambda ctxt, sched: self._replica_cron_client.register( + ctxt, transfer_id, schedule_id, updated_values, None, + lambda ctxt, sched: self._transfer_cron_client.register( ctxt, sched)) - return self._get_replica_schedule(ctxt, replica_id, schedule_id) + return self._get_transfer_schedule(ctxt, transfer_id, schedule_id) def _cleanup_schedule_resources(self, ctxt, schedule): - self._replica_cron_client.unregister(ctxt, schedule) + self._transfer_cron_client.unregister(ctxt, schedule) if schedule.trust_id: tmp_trust = context.get_admin_context( trust_id=schedule.trust_id) keystone.delete_trust(tmp_trust) @schedule_synchronized - def delete_replica_schedule(self, ctxt, replica_id, schedule_id): - replica = self._get_replica(ctxt, replica_id) - replica_status = replica.last_execution_status + def delete_transfer_schedule(self, ctxt, transfer_id, schedule_id): + transfer = self._get_transfer(ctxt, transfer_id) + transfer_status = transfer.last_execution_status valid_statuses = list(itertools.chain( constants.FINALIZED_EXECUTION_STATUSES, [constants.EXECUTION_STATUS_UNEXECUTED])) - if replica_status not in valid_statuses: - raise exception.InvalidReplicaState( - 'Replica Schedule cannot be deleted while the Replica is in ' - '%s state. Please wait for the Replica execution to finish' % - (replica_status)) + if transfer_status not in valid_statuses: + raise exception.InvalidTransferState( + 'Transfer Schedule cannot be deleted while the Transfer is in ' + '%s state. Please wait for the Transfer execution to finish' % + (transfer_status)) db_api.delete_transfer_schedule( - ctxt, replica_id, schedule_id, None, + ctxt, transfer_id, schedule_id, None, lambda ctxt, sched: self._cleanup_schedule_resources( ctxt, sched)) - @replica_synchronized - def get_replica_schedules(self, ctxt, replica_id=None, expired=True): + @transfer_synchronized + def get_transfer_schedules(self, ctxt, transfer_id=None, expired=True): return db_api.get_transfer_schedules( - ctxt, transfer_id=replica_id, expired=expired) + ctxt, transfer_id=transfer_id, expired=expired) @schedule_synchronized - def get_replica_schedule(self, ctxt, replica_id, - schedule_id, expired=True): - return self._get_replica_schedule( - ctxt, replica_id, schedule_id, expired=expired) + def get_transfer_schedule(self, ctxt, transfer_id, + schedule_id, expired=True): + return self._get_transfer_schedule( + ctxt, transfer_id, schedule_id, expired=expired) - @replica_synchronized - def update_replica( - self, ctxt, replica_id, updated_properties): - replica = self._get_replica(ctxt, replica_id, include_task_info=True) + @transfer_synchronized + def update_transfer( + self, ctxt, transfer_id, updated_properties): + transfer = self._get_transfer( + ctxt, transfer_id, include_task_info=True) minion_pool_fields = [ "origin_minion_pool_id", "destination_minion_pool_id", "instance_osmorphing_minion_pool_mappings"] if any([mpf in updated_properties for mpf in minion_pool_fields]): - # NOTE: this is just a dummy Replica model to use for validation: + # NOTE: this is just a dummy Transfer model to use for validation: dummy = models.Transfer() - dummy.id = replica.id - dummy.instances = replica.instances - dummy.origin_endpoint_id = replica.origin_endpoint_id - dummy.destination_endpoint_id = replica.destination_endpoint_id + dummy.id = transfer.id + dummy.instances = transfer.instances + dummy.origin_endpoint_id = transfer.origin_endpoint_id + dummy.destination_endpoint_id = transfer.destination_endpoint_id dummy.origin_minion_pool_id = updated_properties.get( 'origin_minion_pool_id') dummy.destination_minion_pool_id = updated_properties.get( @@ -3339,33 +3325,33 @@ def update_replica( 'instance_osmorphing_minion_pool_mappings')) self._check_minion_pools_for_action(ctxt, dummy) - self._check_replica_running_executions(ctxt, replica) - self._check_valid_replica_tasks_execution(replica, force=True) + self._check_transfer_running_executions(ctxt, transfer) + self._check_valid_transfer_tasks_execution(transfer, force=True) if updated_properties.get('user_scripts'): - replica.user_scripts = updated_properties['user_scripts'] + transfer.user_scripts = updated_properties['user_scripts'] execution = models.TasksExecution() execution.id = str(uuid.uuid4()) execution.status = constants.EXECUTION_STATUS_UNEXECUTED - execution.action = replica - execution.type = constants.EXECUTION_TYPE_REPLICA_UPDATE + execution.action = transfer + execution.type = constants.EXECUTION_TYPE_TRANSFER_UPDATE - for instance in replica.instances: + for instance in transfer.instances: LOG.debug( - "Pre-replica-update task_info for instance '%s' of Replica " - "'%s': %s", instance, replica_id, + "Pre-transfer-update task_info for instance '%s' of Transfer " + "'%s': %s", instance, transfer_id, utils.sanitize_task_info( - replica.info[instance])) + transfer.info[instance])) # NOTE: "circular assignment" would lead to a `None` value # so we must operate on a copy: - inst_info_copy = copy.deepcopy(replica.info[instance]) + inst_info_copy = copy.deepcopy(transfer.info[instance]) # NOTE: we update the various values in the task info itself # As a result, the values within the task_info will be the updated # values which will be checked. The old values will be sent to the # tasks through the origin/destination parameters for them to be # compared to the new ones. - # The actual values on the Replica object itself will be set + # The actual values on the Transfer object itself will be set # during _handle_post_task_actions once the final destination-side # update task will be completed. inst_info_copy.update({ @@ -3377,45 +3363,45 @@ def update_replica( if "destination_environment" in updated_properties: inst_info_copy["target_environment"] = updated_properties[ "destination_environment"] - replica.info[instance] = inst_info_copy + transfer.info[instance] = inst_info_copy LOG.debug( - "Updated task_info for instance '%s' of Replica " + "Updated task_info for instance '%s' of Transfer " "'%s' which will be verified during update procedure: %s", - instance, replica_id, utils.sanitize_task_info( - replica.info[instance])) + instance, transfer_id, utils.sanitize_task_info( + transfer.info[instance])) get_instance_info_task = self._create_task( instance, constants.TASK_TYPE_GET_INSTANCE_INFO, execution) - update_source_replica_task = self._create_task( - instance, constants.TASK_TYPE_UPDATE_SOURCE_REPLICA, + update_source_transfer_task = self._create_task( + instance, constants.TASK_TYPE_UPDATE_SOURCE_TRANSFER, execution) self._create_task( - instance, constants.TASK_TYPE_UPDATE_DESTINATION_REPLICA, + instance, constants.TASK_TYPE_UPDATE_DESTINATION_TRANSFER, execution, depends_on=[ get_instance_info_task.id, # NOTE: the dest-side update task must be done after # the source-side one as both can potentially modify # the 'volumes_info' together: - update_source_replica_task.id]) + update_source_transfer_task.id]) - self._check_execution_tasks_sanity(execution, replica.info) + self._check_execution_tasks_sanity(execution, transfer.info) - # update the action info for all of the instances in the Replica: + # update the action info for all of the instances in the Transfer: for instance in execution.action.instances: db_api.update_transfer_action_info_for_instance( - ctxt, replica.id, instance, replica.info[instance]) + ctxt, transfer.id, instance, transfer.info[instance]) db_api.add_transfer_tasks_execution(ctxt, execution) - LOG.debug("Execution for Replica update tasks created: %s", + LOG.debug("Execution for Transfer update tasks created: %s", execution.id) - self._begin_tasks(ctxt, replica, execution) + self._begin_tasks(ctxt, transfer, execution) - return self.get_replica_tasks_execution( - ctxt, replica_id, execution.id) + return self.get_transfer_tasks_execution( + ctxt, transfer_id, execution.id) def get_diagnostics(self, ctxt): diagnostics = utils.get_diagnostics_info() diff --git a/coriolis/constants.py b/coriolis/constants.py index 0d8068b9..2bfbf6e3 100644 --- a/coriolis/constants.py +++ b/coriolis/constants.py @@ -3,8 +3,8 @@ DEFAULT_CORIOLIS_REGION_NAME = "Default Region" -REPLICA_SCENARIO_REPLICA = "replica" -REPLICA_SCENARIO_LIVE_MIGRATION = "live_migration" +TRANSFER_SCENARIO_REPLICA = "replica" +TRANSFER_SCENARIO_LIVE_MIGRATION = "live_migration" EXECUTION_STATUS_UNEXECUTED = "UNEXECUTED" EXECUTION_STATUS_RUNNING = "RUNNING" @@ -82,62 +82,37 @@ TASK_STATUS_FAILED_TO_CANCEL ] -TASK_TYPE_DEPLOY_MIGRATION_SOURCE_RESOURCES = ( - "DEPLOY_MIGRATION_SOURCE_RESOURCES") -TASK_TYPE_DEPLOY_MIGRATION_TARGET_RESOURCES = ( - "DEPLOY_MIGRATION_TARGET_RESOURCES") -TASK_TYPE_DELETE_MIGRATION_SOURCE_RESOURCES = ( - "DELETE_MIGRATION_SOURCE_RESOURCES") -TASK_TYPE_DELETE_MIGRATION_TARGET_RESOURCES = ( - "DELETE_MIGRATION_TARGET_RESOURCES") -TASK_TYPE_DEPLOY_INSTANCE_RESOURCES = "DEPLOY_INSTANCE_RESOURCES" TASK_TYPE_FINALIZE_INSTANCE_DEPLOYMENT = "FINALIZE_INSTANCE_DEPLOYMENT" TASK_TYPE_CLEANUP_FAILED_INSTANCE_DEPLOYMENT = ( "CLEANUP_FAILED_INSTANCE_DEPLOYMENT") -TASK_TYPE_CLEANUP_INSTANCE_SOURCE_STORAGE = ( - "CLEANUP_INSTANCE_SOURCE_STORAGE") -TASK_TYPE_CLEANUP_INSTANCE_TARGET_STORAGE = ( - "CLEANUP_INSTANCE_TARGET_STORAGE") - -TASK_TYPE_CREATE_INSTANCE_DISKS = "CREATE_INSTANCE_DISKS" TASK_TYPE_DEPLOY_OS_MORPHING_RESOURCES = "DEPLOY_OS_MORPHING_RESOURCES" TASK_TYPE_OS_MORPHING = "OS_MORPHING" TASK_TYPE_DELETE_OS_MORPHING_RESOURCES = "DELETE_OS_MORPHING_RESOURCES" TASK_TYPE_GET_INSTANCE_INFO = "GET_INSTANCE_INFO" -TASK_TYPE_DEPLOY_REPLICA_DISKS = "DEPLOY_REPLICA_DISKS" -TASK_TYPE_DELETE_REPLICA_SOURCE_DISK_SNAPSHOTS = ( - "DELETE_REPLICA_SOURCE_DISK_SNAPSHOTS") -TASK_TYPE_DELETE_REPLICA_DISKS = "DELETE_REPLICA_DISKS" +TASK_TYPE_DEPLOY_TRANSFER_DISKS = "DEPLOY_TRANSFER_DISKS" +TASK_TYPE_DELETE_TRANSFER_SOURCE_DISK_SNAPSHOTS = ( + "DELETE_TRANSFER_SOURCE_DISK_SNAPSHOTS") +TASK_TYPE_DELETE_TRANSFER_DISKS = "DELETE_TRANSFER_DISKS" TASK_TYPE_REPLICATE_DISKS = "REPLICATE_DISKS" -TASK_TYPE_DEPLOY_REPLICA_SOURCE_RESOURCES = "DEPLOY_REPLICA_SOURCE_RESOURCES" -TASK_TYPE_DELETE_REPLICA_SOURCE_RESOURCES = "DELETE_REPLICA_SOURCE_RESOURCES" -TASK_TYPE_DEPLOY_REPLICA_TARGET_RESOURCES = "DEPLOY_REPLICA_TARGET_RESOURCES" -TASK_TYPE_DELETE_REPLICA_TARGET_RESOURCES = "DELETE_REPLICA_TARGET_RESOURCES" +TASK_TYPE_DEPLOY_TRANSFER_SOURCE_RESOURCES = "DEPLOY_TRANSFER_SOURCE_RESOURCES" +TASK_TYPE_DELETE_TRANSFER_SOURCE_RESOURCES = "DELETE_TRANSFER_SOURCE_RESOURCES" +TASK_TYPE_DEPLOY_TRANSFER_TARGET_RESOURCES = "DEPLOY_TRANSFER_TARGET_RESOURCES" +TASK_TYPE_DELETE_TRANSFER_TARGET_RESOURCES = "DELETE_TRANSFER_TARGET_RESOURCES" TASK_TYPE_SHUTDOWN_INSTANCE = "SHUTDOWN_INSTANCE" -TASK_TYPE_DEPLOY_REPLICA_INSTANCE_RESOURCES = ( - "DEPLOY_REPLICA_INSTANCE_RESOURCES") -TASK_TYPE_FINALIZE_REPLICA_INSTANCE_DEPLOYMENT = ( - "FINALIZE_REPLICA_INSTANCE_DEPLOYMENT") -TASK_TYPE_CLEANUP_FAILED_REPLICA_INSTANCE_DEPLOYMENT = ( - "CLEANUP_FAILED_REPLICA_INSTANCE_DEPLOYMENT") -TASK_TYPE_CREATE_REPLICA_DISK_SNAPSHOTS = "CREATE_REPLICA_DISK_SNAPSHOTS" -TASK_TYPE_DELETE_REPLICA_TARGET_DISK_SNAPSHOTS = ( - "DELETE_REPLICA_TARGET_DISK_SNAPSHOTS") -TASK_TYPE_RESTORE_REPLICA_DISK_SNAPSHOTS = "RESTORE_REPLICA_DISK_SNAPSHOTS" +TASK_TYPE_DEPLOY_INSTANCE_RESOURCES = "DEPLOY_INSTANCE_RESOURCES" +TASK_TYPE_CREATE_TRANSFER_DISK_SNAPSHOTS = "CREATE_TRANSFER_DISK_SNAPSHOTS" +TASK_TYPE_DELETE_TRANSFER_TARGET_DISK_SNAPSHOTS = ( + "DELETE_TRANSFER_TARGET_DISK_SNAPSHOTS") +TASK_TYPE_RESTORE_TRANSFER_DISK_SNAPSHOTS = "RESTORE_TRANSFER_DISK_SNAPSHOTS" TASK_TYPE_GET_OPTIMAL_FLAVOR = "GET_OPTIMAL_FLAVOR" -TASK_TYPE_VALIDATE_MIGRATION_SOURCE_INPUTS = ( - "VALIDATE_MIGRATION_SOURCE_INPUTS") -TASK_TYPE_VALIDATE_MIGRATION_DESTINATION_INPUTS = ( - "VALIDATE_MIGRATION_DESTINATION_INPUTS") -TASK_TYPE_VALIDATE_REPLICA_SOURCE_INPUTS = "VALIDATE_REPLICA_SOURCE_INPUTS" -TASK_TYPE_VALIDATE_REPLICA_DESTINATION_INPUTS = ( - "VALIDATE_REPLICA_DESTINATION_INPUTS") -TASK_TYPE_VALIDATE_REPLICA_DEPLOYMENT_INPUTS = ( - "VALIDATE_REPLICA_DEPLOYMENT_INPUTS") -TASK_TYPE_UPDATE_SOURCE_REPLICA = "UPDATE_SOURCE_REPLICA" -TASK_TYPE_UPDATE_DESTINATION_REPLICA = "UPDATE_DESTINATION_REPLICA" +TASK_TYPE_VALIDATE_TRANSFER_SOURCE_INPUTS = "VALIDATE_TRANSFER_SOURCE_INPUTS" +TASK_TYPE_VALIDATE_TRANSFER_DESTINATION_INPUTS = ( + "VALIDATE_TRANSFER_DESTINATION_INPUTS") +TASK_TYPE_VALIDATE_DEPLOYMENT_INPUTS = "VALIDATE_DEPLOYMENT_INPUTS" +TASK_TYPE_UPDATE_SOURCE_TRANSFER = "UPDATE_SOURCE_TRANSFER" +TASK_TYPE_UPDATE_DESTINATION_TRANSFER = "UPDATE_DESTINATION_TRANSFER" TASK_TYPE_VALIDATE_SOURCE_MINION_POOL_OPTIONS = ( "VALIDATE_SOURCE_MINION_POOL_ENVIRONMENT_OPTIONS") @@ -214,8 +189,8 @@ PROVIDER_TYPE_IMPORT = 1 PROVIDER_TYPE_EXPORT = 2 -PROVIDER_TYPE_REPLICA_IMPORT = 4 -PROVIDER_TYPE_REPLICA_EXPORT = 8 +PROVIDER_TYPE_TRANSFER_IMPORT = 4 +PROVIDER_TYPE_TRANSFER_EXPORT = 8 PROVIDER_TYPE_ENDPOINT = 16 PROVIDER_TYPE_ENDPOINT_INSTANCES = 32 PROVIDER_TYPE_OS_MORPHING = 64 @@ -223,16 +198,18 @@ PROVIDER_TYPE_INSTANCE_FLAVOR = 256 PROVIDER_TYPE_DESTINATION_ENDPOINT_OPTIONS = 512 PROVIDER_TYPE_SETUP_LIBS = 1024 -PROVIDER_TYPE_VALIDATE_MIGRATION_EXPORT = 2048 -PROVIDER_TYPE_VALIDATE_REPLICA_EXPORT = 4096 -PROVIDER_TYPE_VALIDATE_MIGRATION_IMPORT = 8192 -PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT = 16384 +PROVIDER_TYPE_VALIDATE_TRANSFER_EXPORT = 4096 +PROVIDER_TYPE_VALIDATE_TRANSFER_IMPORT = 16384 PROVIDER_TYPE_ENDPOINT_STORAGE = 32768 -PROVIDER_TYPE_SOURCE_REPLICA_UPDATE = 65536 +PROVIDER_TYPE_SOURCE_TRANSFER_UPDATE = 65536 PROVIDER_TYPE_SOURCE_ENDPOINT_OPTIONS = 131072 -PROVIDER_TYPE_DESTINATION_REPLICA_UPDATE = 262144 +PROVIDER_TYPE_DESTINATION_TRANSFER_UPDATE = 262144 PROVIDER_TYPE_SOURCE_MINION_POOL = 524288 PROVIDER_TYPE_DESTINATION_MINION_POOL = 1048576 +# NOTE(dvincze): These are deprecated, we should remove them, +# and de-increment the rest +PROVIDER_TYPE_VALIDATE_MIGRATION_EXPORT = 2048 +PROVIDER_TYPE_VALIDATE_MIGRATION_IMPORT = 8192 DISK_FORMAT_VMDK = 'vmdk' DISK_FORMAT_RAW = 'raw' @@ -284,14 +261,13 @@ COMPRESSION_FORMAT_ZLIB ] -TRANSFER_ACTION_TYPE_MIGRATION = "migration" -TRANSFER_ACTION_TYPE_REPLICA = "replica" +TRANSFER_ACTION_TYPE_DEPLOYMENT = "deployment" +TRANSFER_ACTION_TYPE_TRANSFER = "transfer" -EXECUTION_TYPE_REPLICA_EXECUTION = "replica_execution" -EXECUTION_TYPE_REPLICA_DISKS_DELETE = "replica_disks_delete" -EXECUTION_TYPE_REPLICA_DEPLOY = "replica_deploy" -EXECUTION_TYPE_MIGRATION = "migration" -EXECUTION_TYPE_REPLICA_UPDATE = "replica_update" +EXECUTION_TYPE_TRANSFER_EXECUTION = "transfer_execution" +EXECUTION_TYPE_TRANSFER_DISKS_DELETE = "transfer_disks_delete" +EXECUTION_TYPE_DEPLOYMENT = "deployment" +EXECUTION_TYPE_TRANSFER_UPDATE = "transfer_update" EXECUTION_TYPE_MINION_POOL_MAINTENANCE = "minion_pool_maintenance" EXECUTION_TYPE_MINION_POOL_UPDATE = "minion_pool_update" EXECUTION_TYPE_MINION_POOL_SET_UP_SHARED_RESOURCES = ( @@ -306,10 +282,8 @@ TASKFLOW_LOCK_NAME_FORMAT = "taskflow-%s" EXECUTION_LOCK_NAME_FORMAT = "execution-%s" ENDPOINT_LOCK_NAME_FORMAT = "endpoint-%s" -MIGRATION_LOCK_NAME_FORMAT = "migration-%s" -# NOTE(aznashwan): intentionately left identical to Migration locks. -DEPLOYMENT_LOCK_NAME_FORMAT = "migration-%s" -REPLICA_LOCK_NAME_FORMAT = "replica-%s" +DEPLOYMENT_LOCK_NAME_FORMAT = "deployment-%s" +TRANSFER_LOCK_NAME_FORMAT = "transfer-%s" SCHEDULE_LOCK_NAME_FORMAT = "schedule-%s" REGION_LOCK_NAME_FORMAT = "region-%s" SERVICE_LOCK_NAME_FORMAT = "service-%s" @@ -317,11 +291,10 @@ MINION_MACHINE_LOCK_NAME_FORMAT = "minion-pool-%s-machine-%s" EXECUTION_TYPE_TO_ACTION_LOCK_NAME_FORMAT_MAP = { - EXECUTION_TYPE_MIGRATION: MIGRATION_LOCK_NAME_FORMAT, - EXECUTION_TYPE_REPLICA_EXECUTION: REPLICA_LOCK_NAME_FORMAT, - EXECUTION_TYPE_REPLICA_DEPLOY: REPLICA_LOCK_NAME_FORMAT, - EXECUTION_TYPE_REPLICA_UPDATE: REPLICA_LOCK_NAME_FORMAT, - EXECUTION_TYPE_REPLICA_DISKS_DELETE: REPLICA_LOCK_NAME_FORMAT, + EXECUTION_TYPE_TRANSFER_EXECUTION: TRANSFER_LOCK_NAME_FORMAT, + EXECUTION_TYPE_DEPLOYMENT: TRANSFER_LOCK_NAME_FORMAT, + EXECUTION_TYPE_TRANSFER_UPDATE: TRANSFER_LOCK_NAME_FORMAT, + EXECUTION_TYPE_TRANSFER_DISKS_DELETE: TRANSFER_LOCK_NAME_FORMAT, EXECUTION_TYPE_MINION_POOL_MAINTENANCE: MINION_POOL_LOCK_NAME_FORMAT, EXECUTION_TYPE_MINION_POOL_UPDATE: MINION_POOL_LOCK_NAME_FORMAT, EXECUTION_TYPE_MINION_POOL_SET_UP_SHARED_RESOURCES: ( @@ -340,7 +313,7 @@ CONDUCTOR_MAIN_MESSAGING_TOPIC = "coriolis_conductor" WORKER_MAIN_MESSAGING_TOPIC = "coriolis_worker" SCHEDULER_MAIN_MESSAGING_TOPIC = "coriolis_scheduler" -REPLICA_CRON_MAIN_MESSAGING_TOPIC = "coriolis_replica_cron_worker" +TRANSFER_CRON_MAIN_MESSAGING_TOPIC = "coriolis_transfer_cron_worker" MINION_MANAGER_MAIN_MESSAGING_TOPIC = "coriolis_minion_manager" MINION_POOL_MACHINE_RETENTION_STRATEGY_DELETE = "delete" diff --git a/coriolis/db/sqlalchemy/models.py b/coriolis/db/sqlalchemy/models.py index 2f101be7..638d13d9 100644 --- a/coriolis/db/sqlalchemy/models.py +++ b/coriolis/db/sqlalchemy/models.py @@ -332,7 +332,7 @@ class Transfer(BaseTransferAction): 'base_transfer_action.base_id'), primary_key=True) scenario = sqlalchemy.Column( sqlalchemy.String(255), nullable=False, - default=constants.REPLICA_SCENARIO_REPLICA) + default=constants.TRANSFER_SCENARIO_REPLICA) __mapper_args__ = { 'polymorphic_identity': 'transfer', diff --git a/coriolis/deployments/api.py b/coriolis/deployments/api.py index fa25b6e2..dbb4fe95 100644 --- a/coriolis/deployments/api.py +++ b/coriolis/deployments/api.py @@ -12,7 +12,7 @@ def deploy_replica_instances(self, ctxt, replica_id, instance_osmorphing_minion_pool_mappings, clone_disks=False, force=False, skip_os_morphing=False, user_scripts=None): - return self._rpc_client.deploy_replica_instances( + return self._rpc_client.deploy_transfer_instances( ctxt, replica_id, instance_osmorphing_minion_pool_mappings=( instance_osmorphing_minion_pool_mappings), clone_disks=clone_disks, force=force, diff --git a/coriolis/diagnostics/api.py b/coriolis/diagnostics/api.py index 36425930..aaa865a8 100644 --- a/coriolis/diagnostics/api.py +++ b/coriolis/diagnostics/api.py @@ -2,7 +2,7 @@ # All Rights Reserved. from coriolis.conductor.rpc import client as conductor_rpc -from coriolis.replica_cron.rpc import client as cron_rpc +from coriolis.transfer_cron.rpc import client as cron_rpc from coriolis import utils from coriolis.worker.rpc import client as worker_rpc @@ -10,7 +10,7 @@ class API(object): def __init__(self): self._conductor_cli = conductor_rpc.ConductorClient() - self._cron_cli = cron_rpc.ReplicaCronClient() + self._cron_cli = cron_rpc.TransferCronClient() self._worker_cli = worker_rpc.WorkerClient() def get(self, ctxt): diff --git a/coriolis/exception.py b/coriolis/exception.py index 8e0a54d6..603b0b9c 100644 --- a/coriolis/exception.py +++ b/coriolis/exception.py @@ -235,12 +235,12 @@ class InvalidActionTasksExecutionState(Invalid): message = _("Invalid tasks execution state: %(reason)s") -class InvalidMigrationState(Invalid): - message = _("Invalid migration state: %(reason)s") +class InvalidDeploymentState(Invalid): + message = _("Invalid deployment state: %(reason)s") -class InvalidReplicaState(Invalid): - message = _("Invalid replica state: %(reason)s") +class InvalidTransferState(Invalid): + message = _("Invalid transfer state: %(reason)s") class InvalidInstanceState(Invalid): diff --git a/coriolis/minion_manager/rpc/client.py b/coriolis/minion_manager/rpc/client.py index 4803047d..1de89e2d 100644 --- a/coriolis/minion_manager/rpc/client.py +++ b/coriolis/minion_manager/rpc/client.py @@ -67,12 +67,12 @@ def validate_minion_pool_selections_for_action(self, ctxt, action): ctxt, 'validate_minion_pool_selections_for_action', action=action) - def allocate_minion_machines_for_replica( + def allocate_minion_machines_for_transfer( self, ctxt, replica): return self._cast( ctxt, 'allocate_minion_machines_for_replica', replica=replica) - def allocate_minion_machines_for_migration( + def allocate_minion_machines_for_deployment( self, ctxt, migration, include_transfer_minions=True, include_osmorphing_minions=True): return self._cast( diff --git a/coriolis/minion_manager/rpc/server.py b/coriolis/minion_manager/rpc/server.py index d639cd7f..761a10a1 100644 --- a/coriolis/minion_manager/rpc/server.py +++ b/coriolis/minion_manager/rpc/server.py @@ -515,7 +515,7 @@ def allocate_minion_machines_for_replica( try: self._run_machine_allocation_subflow_for_action( ctxt, replica, - constants.TRANSFER_ACTION_TYPE_REPLICA, + constants.TRANSFER_ACTION_TYPE_TRANSFER, include_transfer_minions=True, include_osmorphing_minions=False) except Exception as ex: @@ -529,8 +529,9 @@ def allocate_minion_machines_for_replica( [constants.MINION_MACHINE_STATUS_UNINITIALIZED]) self.deallocate_minion_machines_for_action( ctxt, replica['id']) - self._rpc_conductor_client.report_replica_minions_allocation_error( - ctxt, replica['id'], str(ex)) + (self._rpc_conductor_client + .report_transfer_minions_allocation_error( + ctxt, replica['id'], str(ex))) raise def allocate_minion_machines_for_migration( @@ -539,7 +540,7 @@ def allocate_minion_machines_for_migration( try: self._run_machine_allocation_subflow_for_action( ctxt, migration, - constants.TRANSFER_ACTION_TYPE_MIGRATION, + constants.TRANSFER_ACTION_TYPE_DEPLOYMENT, include_transfer_minions=include_transfer_minions, include_osmorphing_minions=include_osmorphing_minions) except Exception as ex: @@ -554,7 +555,7 @@ def allocate_minion_machines_for_migration( self.deallocate_minion_machines_for_action( ctxt, migration['id']) (self._rpc_conductor_client - .report_migration_minions_allocation_error( + .report_deployment_minions_allocation_error( ctxt, migration['id'], str(ex))) raise @@ -779,7 +780,7 @@ def _run_machine_allocation_subflow_for_action( machine_action_allocation_subflow_name_format = None allocation_failure_reporting_task_class = None allocation_confirmation_reporting_task_class = None - if action_type == constants.TRANSFER_ACTION_TYPE_MIGRATION: + if action_type == constants.TRANSFER_ACTION_TYPE_DEPLOYMENT: allocation_flow_name_format = ( (minion_mgr_tasks. MINION_POOL_MIGRATION_ALLOCATION_FLOW_NAME_FORMAT)) @@ -793,7 +794,7 @@ def _run_machine_allocation_subflow_for_action( machine_action_allocation_subflow_name_format = ( (minion_mgr_tasks. MINION_POOL_ALLOCATE_MACHINES_FOR_MIGRATION_SUBFLOW_NAME_FORMAT)) # noqa: E501 - elif action_type == constants.TRANSFER_ACTION_TYPE_REPLICA: + elif action_type == constants.TRANSFER_ACTION_TYPE_TRANSFER: allocation_flow_name_format = ( (minion_mgr_tasks. MINION_POOL_REPLICA_ALLOCATION_FLOW_NAME_FORMAT)) diff --git a/coriolis/minion_manager/rpc/tasks.py b/coriolis/minion_manager/rpc/tasks.py index a3daef2a..5e8c6acf 100644 --- a/coriolis/minion_manager/rpc/tasks.py +++ b/coriolis/minion_manager/rpc/tasks.py @@ -191,7 +191,7 @@ def _get_task_name(self, action_id): def _report_machine_allocation_failure( self, context, action_id, failure_str): - self._conductor_client.report_migration_minions_allocation_error( + self._conductor_client.report_deployment_minions_allocation_error( context, action_id, failure_str) @@ -205,7 +205,7 @@ def _get_task_name(self, action_id): def _report_machine_allocation_failure( self, context, action_id, failure_str): - self._conductor_client.report_replica_minions_allocation_error( + self._conductor_client.report_transfer_minions_allocation_error( context, action_id, failure_str) @@ -379,8 +379,8 @@ def _check_minion_properties( raise exception.MinionMachineAllocationFailure( msg) from ex except ( - exception.InvalidMigrationState, - exception.InvalidReplicaState) as ex: + exception.InvalidDeploymentState, + exception.InvalidTransferState) as ex: msg = ( "The Conductor has refused minion machine allocations for " "%s with ID '%s' as it is purportedly in an invalid state " @@ -410,7 +410,7 @@ def _get_task_name(self, action_id): def _confirm_machine_allocation_for_action( self, context, action_id, machine_allocations): - self._conductor_client.confirm_migration_minions_allocation( + self._conductor_client.confirm_deployment_minions_allocation( context, action_id, machine_allocations) @@ -427,7 +427,7 @@ def _get_task_name(self, action_id): def _confirm_machine_allocation_for_action( self, context, action_id, machine_allocations): - self._conductor_client.confirm_replica_minions_allocation( + self._conductor_client.confirm_transfer_minions_allocation( context, action_id, machine_allocations) diff --git a/coriolis/providers/factory.py b/coriolis/providers/factory.py index 7a11652f..ff3a5cdb 100644 --- a/coriolis/providers/factory.py +++ b/coriolis/providers/factory.py @@ -22,8 +22,8 @@ # classical disk-export-based migrations to Replica-based ones: # constants.PROVIDER_TYPE_EXPORT: base.BaseExportProvider, # constants.PROVIDER_TYPE_IMPORT: base.BaseImportProvider, - constants.PROVIDER_TYPE_REPLICA_EXPORT: base.BaseReplicaExportProvider, - constants.PROVIDER_TYPE_REPLICA_IMPORT: base.BaseReplicaImportProvider, + constants.PROVIDER_TYPE_TRANSFER_EXPORT: base.BaseReplicaExportProvider, + constants.PROVIDER_TYPE_TRANSFER_IMPORT: base.BaseReplicaImportProvider, constants.PROVIDER_TYPE_ENDPOINT: base.BaseEndpointProvider, constants.PROVIDER_TYPE_DESTINATION_ENDPOINT_OPTIONS: base.BaseEndpointDestinationOptionsProvider, @@ -38,15 +38,15 @@ constants.PROVIDER_TYPE_SETUP_LIBS: base.BaseProviderSetupExtraLibsMixin, constants.PROVIDER_TYPE_VALIDATE_MIGRATION_EXPORT: ( base.BaseMigrationExportValidationProvider), - constants.PROVIDER_TYPE_VALIDATE_REPLICA_EXPORT: ( + constants.PROVIDER_TYPE_VALIDATE_TRANSFER_EXPORT: ( base.BaseReplicaExportValidationProvider), constants.PROVIDER_TYPE_VALIDATE_MIGRATION_IMPORT: ( base.BaseMigrationImportValidationProvider), - constants.PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT: ( + constants.PROVIDER_TYPE_VALIDATE_TRANSFER_IMPORT: ( base.BaseReplicaImportValidationProvider), - constants.PROVIDER_TYPE_SOURCE_REPLICA_UPDATE: ( + constants.PROVIDER_TYPE_SOURCE_TRANSFER_UPDATE: ( base.BaseUpdateSourceReplicaProvider), - constants.PROVIDER_TYPE_DESTINATION_REPLICA_UPDATE: ( + constants.PROVIDER_TYPE_DESTINATION_TRANSFER_UPDATE: ( base.BaseUpdateDestinationReplicaProvider), constants.PROVIDER_TYPE_SOURCE_ENDPOINT_OPTIONS: ( base.BaseEndpointSourceOptionsProvider), @@ -81,7 +81,7 @@ def get_provider( parent = PROVIDER_TYPE_MAP.get(provider_type) if not parent: continue - if (cls.platform == platform_name and issubclass(cls, parent)): + if cls.platform == platform_name and issubclass(cls, parent): return cls(event_handler) if raise_if_not_found: diff --git a/coriolis/replica_tasks_executions/api.py b/coriolis/replica_tasks_executions/api.py index f78b5e9e..234e785a 100644 --- a/coriolis/replica_tasks_executions/api.py +++ b/coriolis/replica_tasks_executions/api.py @@ -9,21 +9,21 @@ def __init__(self): self._rpc_client = rpc_client.ConductorClient() def create(self, ctxt, replica_id, shutdown_instances): - return self._rpc_client.execute_replica_tasks( + return self._rpc_client.execute_transfer_tasks( ctxt, replica_id, shutdown_instances) def delete(self, ctxt, replica_id, execution_id): - self._rpc_client.delete_replica_tasks_execution( + self._rpc_client.delete_transfer_tasks_execution( ctxt, replica_id, execution_id) def cancel(self, ctxt, replica_id, execution_id, force): - self._rpc_client.cancel_replica_tasks_execution( + self._rpc_client.cancel_transfer_tasks_execution( ctxt, replica_id, execution_id, force) def get_executions(self, ctxt, replica_id, include_tasks=False): - return self._rpc_client.get_replica_tasks_executions( + return self._rpc_client.get_transfer_tasks_executions( ctxt, replica_id, include_tasks) def get_execution(self, ctxt, replica_id, execution_id): - return self._rpc_client.get_replica_tasks_execution( + return self._rpc_client.get_transfer_tasks_execution( ctxt, replica_id, execution_id) diff --git a/coriolis/replicas/api.py b/coriolis/replicas/api.py index 73353ea0..f6642202 100644 --- a/coriolis/replicas/api.py +++ b/coriolis/replicas/api.py @@ -14,7 +14,7 @@ def create(self, ctxt, replica_scenario, instance_osmorphing_minion_pool_mappings, source_environment, destination_environment, instances, network_map, storage_mappings, notes=None, user_scripts=None): - return self._rpc_client.create_instances_replica( + return self._rpc_client.create_instances_transfer( ctxt, replica_scenario, origin_endpoint_id, destination_endpoint_id, origin_minion_pool_id, destination_minion_pool_id, @@ -23,21 +23,21 @@ def create(self, ctxt, replica_scenario, network_map, storage_mappings, notes, user_scripts) def update(self, ctxt, replica_id, updated_properties): - return self._rpc_client.update_replica( + return self._rpc_client.update_transfer( ctxt, replica_id, updated_properties) def delete(self, ctxt, replica_id): - self._rpc_client.delete_replica(ctxt, replica_id) + self._rpc_client.delete_transfer(ctxt, replica_id) def get_replicas(self, ctxt, include_tasks_executions=False, include_task_info=False): - return self._rpc_client.get_replicas( + return self._rpc_client.get_transfers( ctxt, include_tasks_executions, include_task_info=include_task_info) def get_replica(self, ctxt, replica_id, include_task_info=False): - return self._rpc_client.get_replica( + return self._rpc_client.get_transfer( ctxt, replica_id, include_task_info=include_task_info) def delete_disks(self, ctxt, replica_id): - return self._rpc_client.delete_replica_disks(ctxt, replica_id) + return self._rpc_client.delete_transfer_disks(ctxt, replica_id) diff --git a/coriolis/scheduler/scheduler_utils.py b/coriolis/scheduler/scheduler_utils.py index 00b30a79..defd6dca 100644 --- a/coriolis/scheduler/scheduler_utils.py +++ b/coriolis/scheduler/scheduler_utils.py @@ -8,8 +8,8 @@ from coriolis import constants from coriolis.db import api as db_api from coriolis import exception -from coriolis.replica_cron.rpc import client as rpc_cron_client from coriolis.scheduler.rpc import client as rpc_scheduler_client +from coriolis.transfer_cron.rpc import client as rpc_cron_client from coriolis.worker.rpc import client as rpc_worker_client @@ -21,8 +21,8 @@ constants.WORKER_MAIN_MESSAGING_TOPIC: rpc_worker_client.WorkerClient, constants.SCHEDULER_MAIN_MESSAGING_TOPIC: ( rpc_scheduler_client.SchedulerClient), - constants.REPLICA_CRON_MAIN_MESSAGING_TOPIC: ( - rpc_cron_client.ReplicaCronClient) + constants.TRANSFER_CRON_MAIN_MESSAGING_TOPIC: ( + rpc_cron_client.TransferCronClient) } diff --git a/coriolis/tasks/factory.py b/coriolis/tasks/factory.py index a3df2e9d..e638ea45 100644 --- a/coriolis/tasks/factory.py +++ b/coriolis/tasks/factory.py @@ -9,32 +9,12 @@ from coriolis.tasks import replica_tasks _TASKS_MAP = { - constants.TASK_TYPE_DEPLOY_MIGRATION_SOURCE_RESOURCES: - migration_tasks.DeployMigrationSourceResourcesTask, - constants.TASK_TYPE_DEPLOY_MIGRATION_TARGET_RESOURCES: - migration_tasks.DeployMigrationTargetResourcesTask, - constants.TASK_TYPE_DELETE_MIGRATION_SOURCE_RESOURCES: - migration_tasks.DeleteMigrationSourceResourcesTask, - constants.TASK_TYPE_DELETE_MIGRATION_TARGET_RESOURCES: - migration_tasks.DeleteMigrationTargetResourcesTask, - constants.TASK_TYPE_DEPLOY_INSTANCE_RESOURCES: - migration_tasks.DeployInstanceResourcesTask, constants.TASK_TYPE_FINALIZE_INSTANCE_DEPLOYMENT: migration_tasks.FinalizeInstanceDeploymentTask, - constants.TASK_TYPE_CREATE_INSTANCE_DISKS: - migration_tasks.CreateInstanceDisksTask, constants.TASK_TYPE_CLEANUP_FAILED_INSTANCE_DEPLOYMENT: migration_tasks.CleanupFailedInstanceDeploymentTask, - constants.TASK_TYPE_CLEANUP_INSTANCE_TARGET_STORAGE: - migration_tasks.CleanupInstanceTargetStorageTask, - constants.TASK_TYPE_CLEANUP_INSTANCE_SOURCE_STORAGE: - migration_tasks.CleanupInstanceSourceStorageTask, constants.TASK_TYPE_GET_OPTIMAL_FLAVOR: migration_tasks.GetOptimalFlavorTask, - constants.TASK_TYPE_VALIDATE_MIGRATION_SOURCE_INPUTS: - migration_tasks.ValidateMigrationSourceInputsTask, - constants.TASK_TYPE_VALIDATE_MIGRATION_DESTINATION_INPUTS: - migration_tasks.ValidateMigrationDestinationInputsTask, constants.TASK_TYPE_DEPLOY_OS_MORPHING_RESOURCES: osmorphing_tasks.DeployOSMorphingResourcesTask, constants.TASK_TYPE_OS_MORPHING: @@ -47,41 +27,37 @@ replica_tasks.ReplicateDisksTask, constants.TASK_TYPE_SHUTDOWN_INSTANCE: replica_tasks.ShutdownInstanceTask, - constants.TASK_TYPE_DEPLOY_REPLICA_DISKS: + constants.TASK_TYPE_DEPLOY_TRANSFER_DISKS: replica_tasks.DeployReplicaDisksTask, - constants.TASK_TYPE_DELETE_REPLICA_SOURCE_DISK_SNAPSHOTS: + constants.TASK_TYPE_DELETE_TRANSFER_SOURCE_DISK_SNAPSHOTS: replica_tasks.DeleteReplicaSourceDiskSnapshotsTask, - constants.TASK_TYPE_DELETE_REPLICA_DISKS: + constants.TASK_TYPE_DELETE_TRANSFER_DISKS: replica_tasks.DeleteReplicaDisksTask, - constants.TASK_TYPE_DEPLOY_REPLICA_TARGET_RESOURCES: + constants.TASK_TYPE_DEPLOY_TRANSFER_TARGET_RESOURCES: replica_tasks.DeployReplicaTargetResourcesTask, - constants.TASK_TYPE_DELETE_REPLICA_TARGET_RESOURCES: + constants.TASK_TYPE_DELETE_TRANSFER_TARGET_RESOURCES: replica_tasks.DeleteReplicaTargetResourcesTask, - constants.TASK_TYPE_DEPLOY_REPLICA_SOURCE_RESOURCES: + constants.TASK_TYPE_DEPLOY_TRANSFER_SOURCE_RESOURCES: replica_tasks.DeployReplicaSourceResourcesTask, - constants.TASK_TYPE_DELETE_REPLICA_SOURCE_RESOURCES: + constants.TASK_TYPE_DELETE_TRANSFER_SOURCE_RESOURCES: replica_tasks.DeleteReplicaSourceResourcesTask, - constants.TASK_TYPE_DEPLOY_REPLICA_INSTANCE_RESOURCES: + constants.TASK_TYPE_DEPLOY_INSTANCE_RESOURCES: replica_tasks.DeployReplicaInstanceResourcesTask, - constants.TASK_TYPE_FINALIZE_REPLICA_INSTANCE_DEPLOYMENT: - replica_tasks.FinalizeReplicaInstanceDeploymentTask, - constants.TASK_TYPE_CLEANUP_FAILED_REPLICA_INSTANCE_DEPLOYMENT: - replica_tasks.CleanupFailedReplicaInstanceDeploymentTask, - constants.TASK_TYPE_CREATE_REPLICA_DISK_SNAPSHOTS: + constants.TASK_TYPE_CREATE_TRANSFER_DISK_SNAPSHOTS: replica_tasks.CreateReplicaDiskSnapshotsTask, - constants.TASK_TYPE_DELETE_REPLICA_TARGET_DISK_SNAPSHOTS: + constants.TASK_TYPE_DELETE_TRANSFER_TARGET_DISK_SNAPSHOTS: replica_tasks.DeleteReplicaTargetDiskSnapshotsTask, - constants.TASK_TYPE_RESTORE_REPLICA_DISK_SNAPSHOTS: + constants.TASK_TYPE_RESTORE_TRANSFER_DISK_SNAPSHOTS: replica_tasks.RestoreReplicaDiskSnapshotsTask, - constants.TASK_TYPE_VALIDATE_REPLICA_SOURCE_INPUTS: + constants.TASK_TYPE_VALIDATE_TRANSFER_SOURCE_INPUTS: replica_tasks.ValidateReplicaExecutionSourceInputsTask, - constants.TASK_TYPE_VALIDATE_REPLICA_DESTINATION_INPUTS: + constants.TASK_TYPE_VALIDATE_TRANSFER_DESTINATION_INPUTS: replica_tasks.ValidateReplicaExecutionDestinationInputsTask, - constants.TASK_TYPE_VALIDATE_REPLICA_DEPLOYMENT_INPUTS: + constants.TASK_TYPE_VALIDATE_DEPLOYMENT_INPUTS: replica_tasks.ValidateReplicaDeploymentParametersTask, - constants.TASK_TYPE_UPDATE_SOURCE_REPLICA: + constants.TASK_TYPE_UPDATE_SOURCE_TRANSFER: replica_tasks.UpdateSourceReplicaTask, - constants.TASK_TYPE_UPDATE_DESTINATION_REPLICA: + constants.TASK_TYPE_UPDATE_DESTINATION_TRANSFER: replica_tasks.UpdateDestinationReplicaTask, constants.TASK_TYPE_VALIDATE_SOURCE_MINION_POOL_OPTIONS: minion_pool_tasks.ValidateSourceMinionPoolOptionsTask, diff --git a/coriolis/tasks/osmorphing_tasks.py b/coriolis/tasks/osmorphing_tasks.py index 05969810..3005a525 100644 --- a/coriolis/tasks/osmorphing_tasks.py +++ b/coriolis/tasks/osmorphing_tasks.py @@ -34,20 +34,20 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_SOURCE: [ - constants.PROVIDER_TYPE_REPLICA_EXPORT], + constants.PROVIDER_TYPE_TRANSFER_EXPORT], constants.PROVIDER_PLATFORM_DESTINATION: [ - constants.PROVIDER_TYPE_REPLICA_IMPORT], + constants.PROVIDER_TYPE_TRANSFER_IMPORT], } def _run(self, ctxt, instance, origin, destination, task_info, event_handler): origin_provider = providers_factory.get_provider( - origin["type"], constants.PROVIDER_TYPE_REPLICA_EXPORT, + origin["type"], constants.PROVIDER_TYPE_TRANSFER_EXPORT, event_handler) destination_provider = providers_factory.get_provider( - destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination["type"], constants.PROVIDER_TYPE_TRANSFER_IMPORT, event_handler) osmorphing_connection_info = base.unmarshal_migr_conn_info( diff --git a/coriolis/tasks/replica_tasks.py b/coriolis/tasks/replica_tasks.py index f2c195dc..bdc2382e 100644 --- a/coriolis/tasks/replica_tasks.py +++ b/coriolis/tasks/replica_tasks.py @@ -93,13 +93,13 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_SOURCE: [ - constants.PROVIDER_TYPE_REPLICA_EXPORT] + constants.PROVIDER_TYPE_TRANSFER_EXPORT] } def _run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( - origin["type"], constants.PROVIDER_TYPE_REPLICA_EXPORT, + origin["type"], constants.PROVIDER_TYPE_TRANSFER_EXPORT, event_handler) connection_info = base.get_connection_info(ctxt, origin) @@ -134,13 +134,13 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_SOURCE: [ - constants.PROVIDER_TYPE_REPLICA_EXPORT] + constants.PROVIDER_TYPE_TRANSFER_EXPORT] } def _run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( - origin["type"], constants.PROVIDER_TYPE_REPLICA_EXPORT, + origin["type"], constants.PROVIDER_TYPE_TRANSFER_EXPORT, event_handler) connection_info = base.get_connection_info(ctxt, origin) @@ -175,13 +175,13 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_SOURCE: [ - constants.PROVIDER_TYPE_REPLICA_EXPORT] + constants.PROVIDER_TYPE_TRANSFER_EXPORT] } def _run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( - origin["type"], constants.PROVIDER_TYPE_REPLICA_EXPORT, + origin["type"], constants.PROVIDER_TYPE_TRANSFER_EXPORT, event_handler) connection_info = base.get_connection_info(ctxt, origin) export_info = task_info["export_info"] @@ -245,7 +245,7 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_DESTINATION: [ - constants.PROVIDER_TYPE_REPLICA_IMPORT] + constants.PROVIDER_TYPE_TRANSFER_IMPORT] } def _run(self, ctxt, instance, origin, destination, task_info, @@ -254,7 +254,7 @@ def _run(self, ctxt, instance, origin, destination, task_info, export_info = task_info["export_info"] provider = providers_factory.get_provider( - destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination["type"], constants.PROVIDER_TYPE_TRANSFER_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) @@ -291,7 +291,7 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_SOURCE: [ - constants.PROVIDER_TYPE_REPLICA_EXPORT] + constants.PROVIDER_TYPE_TRANSFER_EXPORT] } def _run(self, ctxt, instance, origin, destination, task_info, @@ -305,7 +305,7 @@ def _run(self, ctxt, instance, origin, destination, task_info, return {'volumes_info': []} provider = providers_factory.get_provider( - origin['type'], constants.PROVIDER_TYPE_REPLICA_EXPORT, + origin['type'], constants.PROVIDER_TYPE_TRANSFER_EXPORT, event_handler) connection_info = base.get_connection_info(ctxt, origin) source_environment = task_info['source_environment'] @@ -337,7 +337,7 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_DESTINATION: [ - constants.PROVIDER_TYPE_REPLICA_IMPORT] + constants.PROVIDER_TYPE_TRANSFER_IMPORT] } def _run(self, ctxt, instance, origin, destination, task_info, @@ -351,7 +351,7 @@ def _run(self, ctxt, instance, origin, destination, task_info, return {'volumes_info': []} provider = providers_factory.get_provider( - destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination["type"], constants.PROVIDER_TYPE_TRANSFER_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) @@ -389,13 +389,13 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_SOURCE: [ - constants.PROVIDER_TYPE_REPLICA_EXPORT] + constants.PROVIDER_TYPE_TRANSFER_EXPORT] } def _run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( - origin["type"], constants.PROVIDER_TYPE_REPLICA_EXPORT, + origin["type"], constants.PROVIDER_TYPE_TRANSFER_EXPORT, event_handler) connection_info = base.get_connection_info(ctxt, origin) @@ -460,13 +460,13 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_SOURCE: [ - constants.PROVIDER_TYPE_REPLICA_EXPORT] + constants.PROVIDER_TYPE_TRANSFER_EXPORT] } def _run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( - origin["type"], constants.PROVIDER_TYPE_REPLICA_EXPORT, + origin["type"], constants.PROVIDER_TYPE_TRANSFER_EXPORT, event_handler) connection_info = base.get_connection_info(ctxt, origin) @@ -504,7 +504,7 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_DESTINATION: [ - constants.PROVIDER_TYPE_REPLICA_IMPORT] + constants.PROVIDER_TYPE_TRANSFER_IMPORT] } def _validate_connection_info(self, migr_connection_info): @@ -535,7 +535,7 @@ def _run(self, ctxt, instance, origin, destination, task_info, export_info = task_info['export_info'] provider = providers_factory.get_provider( - destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination["type"], constants.PROVIDER_TYPE_TRANSFER_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) @@ -603,13 +603,13 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_DESTINATION: [ - constants.PROVIDER_TYPE_REPLICA_IMPORT] + constants.PROVIDER_TYPE_TRANSFER_IMPORT] } def _run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( - destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination["type"], constants.PROVIDER_TYPE_TRANSFER_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) @@ -644,7 +644,7 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_DESTINATION: [ - constants.PROVIDER_TYPE_REPLICA_IMPORT] + constants.PROVIDER_TYPE_TRANSFER_IMPORT] } def _run(self, ctxt, instance, origin, destination, task_info, @@ -653,7 +653,7 @@ def _run(self, ctxt, instance, origin, destination, task_info, export_info = task_info["export_info"] provider = providers_factory.get_provider( - destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination["type"], constants.PROVIDER_TYPE_TRANSFER_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) @@ -688,13 +688,13 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_DESTINATION: [ - constants.PROVIDER_TYPE_REPLICA_IMPORT] + constants.PROVIDER_TYPE_TRANSFER_IMPORT] } def _run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( - destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination["type"], constants.PROVIDER_TYPE_TRANSFER_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) target_environment = task_info["target_environment"] @@ -730,13 +730,13 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_DESTINATION: [ - constants.PROVIDER_TYPE_REPLICA_IMPORT] + constants.PROVIDER_TYPE_TRANSFER_IMPORT] } def _run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( - destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination["type"], constants.PROVIDER_TYPE_TRANSFER_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) target_environment = task_info["target_environment"] @@ -768,13 +768,13 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_DESTINATION: [ - constants.PROVIDER_TYPE_REPLICA_IMPORT] + constants.PROVIDER_TYPE_TRANSFER_IMPORT] } def _run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( - destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination["type"], constants.PROVIDER_TYPE_TRANSFER_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) export_info = task_info['export_info'] @@ -812,14 +812,14 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_DESTINATION: [ - constants.PROVIDER_TYPE_REPLICA_IMPORT] + constants.PROVIDER_TYPE_TRANSFER_IMPORT] } def _run(self, ctxt, instance, origin, destination, task_info, event_handler): export_info = task_info['export_info'] provider = providers_factory.get_provider( - destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination["type"], constants.PROVIDER_TYPE_TRANSFER_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) @@ -856,13 +856,13 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_DESTINATION: [ - constants.PROVIDER_TYPE_REPLICA_IMPORT] + constants.PROVIDER_TYPE_TRANSFER_IMPORT] } def _run(self, ctxt, instance, origin, destination, task_info, event_handler): provider = providers_factory.get_provider( - destination["type"], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination["type"], constants.PROVIDER_TYPE_TRANSFER_IMPORT, event_handler) connection_info = base.get_connection_info(ctxt, destination) export_info = task_info['export_info'] @@ -900,7 +900,7 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_SOURCE: [ - constants.PROVIDER_TYPE_VALIDATE_REPLICA_EXPORT] + constants.PROVIDER_TYPE_VALIDATE_TRANSFER_EXPORT] } def _run(self, ctxt, instance, origin, destination, task_info, @@ -908,7 +908,7 @@ def _run(self, ctxt, instance, origin, destination, task_info, event_manager = events.EventManager(event_handler) origin_type = origin["type"] source_provider = providers_factory.get_provider( - origin_type, constants.PROVIDER_TYPE_VALIDATE_REPLICA_EXPORT, + origin_type, constants.PROVIDER_TYPE_VALIDATE_TRANSFER_EXPORT, event_handler, raise_if_not_found=False) origin_connection_info = base.get_connection_info(ctxt, origin) if not source_provider: @@ -941,7 +941,7 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_DESTINATION: [ - constants.PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT] + constants.PROVIDER_TYPE_VALIDATE_TRANSFER_IMPORT] } def _validate_provider_replica_import_input( @@ -960,7 +960,7 @@ def _run(self, ctxt, instance, origin, destination, task_info, ctxt, destination) destination_provider = providers_factory.get_provider( destination_type, - constants.PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT, event_handler, + constants.PROVIDER_TYPE_VALIDATE_TRANSFER_IMPORT, event_handler, raise_if_not_found=False) if not destination_provider: event_manager.progress_update( @@ -1001,7 +1001,7 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_DESTINATION: [ - constants.PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT] + constants.PROVIDER_TYPE_VALIDATE_TRANSFER_IMPORT] } def _run(self, ctxt, instance, origin, destination, task_info, @@ -1018,7 +1018,7 @@ def _run(self, ctxt, instance, origin, destination, task_info, # validate destination params: destination_provider = providers_factory.get_provider( destination_type, - constants.PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT, event_handler, + constants.PROVIDER_TYPE_VALIDATE_TRANSFER_IMPORT, event_handler, raise_if_not_found=False) if not destination_provider: event_manager.progress_update( @@ -1054,7 +1054,7 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_SOURCE: [ - constants.PROVIDER_TYPE_SOURCE_REPLICA_UPDATE] + constants.PROVIDER_TYPE_SOURCE_TRANSFER_UPDATE] } def _run(self, ctxt, instance, origin, destination, task_info, @@ -1075,7 +1075,7 @@ def _run(self, ctxt, instance, origin, destination, task_info, 'source_environment': old_source_env} source_provider = providers_factory.get_provider( - origin["type"], constants.PROVIDER_TYPE_SOURCE_REPLICA_UPDATE, + origin["type"], constants.PROVIDER_TYPE_SOURCE_TRANSFER_UPDATE, event_handler, raise_if_not_found=False) if not source_provider: raise exception.InvalidActionTasksExecutionState( @@ -1122,7 +1122,7 @@ def get_returned_task_info_properties(cls): def get_required_provider_types(cls): return { constants.PROVIDER_PLATFORM_DESTINATION: [ - constants.PROVIDER_TYPE_DESTINATION_REPLICA_UPDATE] + constants.PROVIDER_TYPE_DESTINATION_TRANSFER_UPDATE] } def _run(self, ctxt, instance, origin, destination, task_info, @@ -1144,7 +1144,7 @@ def _run(self, ctxt, instance, origin, destination, task_info, destination_provider = providers_factory.get_provider( destination["type"], - constants.PROVIDER_TYPE_DESTINATION_REPLICA_UPDATE, + constants.PROVIDER_TYPE_DESTINATION_TRANSFER_UPDATE, event_handler, raise_if_not_found=False) if not destination_provider: raise exception.InvalidActionTasksExecutionState( diff --git a/coriolis/tests/api/v1/test_replica_schedules.py b/coriolis/tests/api/v1/test_replica_schedules.py index c32cc792..0fcaec7e 100644 --- a/coriolis/tests/api/v1/test_replica_schedules.py +++ b/coriolis/tests/api/v1/test_replica_schedules.py @@ -10,9 +10,9 @@ from coriolis.api.v1 import replica_schedules from coriolis.api.v1.views import replica_schedule_view from coriolis import exception -from coriolis.replica_cron import api from coriolis import schemas from coriolis.tests import test_base +from coriolis.transfer_cron import api class ReplicaScheduleControllerTestCase(test_base.CoriolisBaseTestCase): diff --git a/coriolis/tests/cmd/test_replica_cron.py b/coriolis/tests/cmd/test_replica_cron.py index 7b5e4996..131ee075 100644 --- a/coriolis/tests/cmd/test_replica_cron.py +++ b/coriolis/tests/cmd/test_replica_cron.py @@ -6,14 +6,14 @@ from coriolis.cmd import replica_cron from coriolis import constants -from coriolis.replica_cron.rpc import server as rpc_server from coriolis import service from coriolis.tests import test_base +from coriolis.transfer_cron.rpc import server as rpc_server from coriolis import utils class ReplicaCronTestCase(test_base.CoriolisBaseTestCase): - """Test suite for the Coriolis replica_cron CMD""" + """Test suite for the Coriolis transfer_cron CMD""" @mock.patch.object(service, 'service') @mock.patch.object(service, 'MessagingService') @@ -37,7 +37,7 @@ def test_main( mock_setup_logging.assert_called_once() mock_ReplicaCronServerEndpoint.assert_called_once() mock_MessagingService.assert_called_once_with( - constants.REPLICA_CRON_MAIN_MESSAGING_TOPIC, + constants.TRANSFER_CRON_MAIN_MESSAGING_TOPIC, [mock_ReplicaCronServerEndpoint.return_value], rpc_server.VERSION, worker_count=1) diff --git a/coriolis/tests/conductor/rpc/data/deploy_replica_instance_config.yml b/coriolis/tests/conductor/rpc/data/deploy_replica_instance_config.yml deleted file mode 100644 index ba0fc064..00000000 --- a/coriolis/tests/conductor/rpc/data/deploy_replica_instance_config.yml +++ /dev/null @@ -1,182 +0,0 @@ -- config: - skip_os_morphing: False - has_os_morphing_minion: True - expected_tasks: - - type: 'VALIDATE_OSMORPHING_MINION_POOL_COMPATIBILITY' - depends_on: ['VALIDATE_REPLICA_DEPLOYMENT_INPUTS'] - - type: 'CREATE_REPLICA_DISK_SNAPSHOTS' - depends_on: ['VALIDATE_OSMORPHING_MINION_POOL_COMPATIBILITY'] - - type: 'DEPLOY_REPLICA_INSTANCE_RESOURCES' - depends_on: ['CREATE_REPLICA_DISK_SNAPSHOTS'] - - type: 'ATTACH_VOLUMES_TO_OSMORPHING_MINION' - depends_on: ['VALIDATE_OSMORPHING_MINION_POOL_COMPATIBILITY', 'DEPLOY_REPLICA_INSTANCE_RESOURCES'] - - type: 'COLLECT_OS_MORPHING_INFO' - depends_on: ['ATTACH_VOLUMES_TO_OSMORPHING_MINION'] - - type: 'OS_MORPHING' - depends_on: ['COLLECT_OS_MORPHING_INFO'] - - type: 'DETACH_VOLUMES_FROM_OSMORPHING_MINION' - depends_on: ['ATTACH_VOLUMES_TO_OSMORPHING_MINION', 'OS_MORPHING'] - on_error: True - - type: 'RELEASE_OSMORPHING_MINION' - depends_on: ['VALIDATE_OSMORPHING_MINION_POOL_COMPATIBILITY', 'DETACH_VOLUMES_FROM_OSMORPHING_MINION'] - on_error: True - - type: 'FINALIZE_REPLICA_INSTANCE_DEPLOYMENT' - depends_on: ['OS_MORPHING', 'RELEASE_OSMORPHING_MINION'] - - type: 'DELETE_REPLICA_TARGET_DISK_SNAPSHOTS' - depends_on: ['CREATE_REPLICA_DISK_SNAPSHOTS', 'FINALIZE_REPLICA_INSTANCE_DEPLOYMENT'] - on_error: False - - type: 'CLEANUP_FAILED_REPLICA_INSTANCE_DEPLOYMENT' - depends_on: ['DEPLOY_REPLICA_INSTANCE_RESOURCES', 'FINALIZE_REPLICA_INSTANCE_DEPLOYMENT'] - on_error_only: True - - type: 'RESTORE_REPLICA_DISK_SNAPSHOTS' - depends_on: ['CLEANUP_FAILED_REPLICA_INSTANCE_DEPLOYMENT'] - on_error: True - -- config: - skip_os_morphing: False - has_os_morphing_minion: False - expected_tasks: - - type: 'CREATE_REPLICA_DISK_SNAPSHOTS' - depends_on: ['VALIDATE_REPLICA_DEPLOYMENT_INPUTS'] - - type: 'DEPLOY_REPLICA_INSTANCE_RESOURCES' - depends_on: ['CREATE_REPLICA_DISK_SNAPSHOTS'] - - type: 'DEPLOY_OS_MORPHING_RESOURCES' - depends_on: ['DEPLOY_REPLICA_INSTANCE_RESOURCES'] - - type: 'OS_MORPHING' - depends_on: ['DEPLOY_OS_MORPHING_RESOURCES'] - - type: 'DELETE_OS_MORPHING_RESOURCES' - depends_on: ['DEPLOY_OS_MORPHING_RESOURCES', 'OS_MORPHING'] - on_error: True - - type: 'FINALIZE_REPLICA_INSTANCE_DEPLOYMENT' - depends_on: ['OS_MORPHING', 'DELETE_OS_MORPHING_RESOURCES'] - - type: 'DELETE_REPLICA_TARGET_DISK_SNAPSHOTS' - depends_on: ['CREATE_REPLICA_DISK_SNAPSHOTS', 'FINALIZE_REPLICA_INSTANCE_DEPLOYMENT'] - on_error: False - - type: 'CLEANUP_FAILED_REPLICA_INSTANCE_DEPLOYMENT' - depends_on: ['DEPLOY_REPLICA_INSTANCE_RESOURCES', 'FINALIZE_REPLICA_INSTANCE_DEPLOYMENT'] - on_error_only: True - - type: 'RESTORE_REPLICA_DISK_SNAPSHOTS' - depends_on: ['CLEANUP_FAILED_REPLICA_INSTANCE_DEPLOYMENT'] - on_error: True - -- config: - skip_os_morphing: True - expected_tasks: - - type: 'CREATE_REPLICA_DISK_SNAPSHOTS' - depends_on: ['VALIDATE_REPLICA_DEPLOYMENT_INPUTS'] - - type: 'DEPLOY_REPLICA_INSTANCE_RESOURCES' - depends_on: ['CREATE_REPLICA_DISK_SNAPSHOTS'] - - type: 'FINALIZE_REPLICA_INSTANCE_DEPLOYMENT' - depends_on: ['DEPLOY_REPLICA_INSTANCE_RESOURCES'] - - type: 'DELETE_REPLICA_TARGET_DISK_SNAPSHOTS' - depends_on: ['CREATE_REPLICA_DISK_SNAPSHOTS', 'FINALIZE_REPLICA_INSTANCE_DEPLOYMENT'] - on_error: False - - type: 'CLEANUP_FAILED_REPLICA_INSTANCE_DEPLOYMENT' - depends_on: ['DEPLOY_REPLICA_INSTANCE_RESOURCES', 'FINALIZE_REPLICA_INSTANCE_DEPLOYMENT'] - on_error_only: True - - type: 'RESTORE_REPLICA_DISK_SNAPSHOTS' - depends_on: ['CLEANUP_FAILED_REPLICA_INSTANCE_DEPLOYMENT'] - on_error: True - -- config: - skip_os_morphing: True - get_optimal_flavor: True - expected_tasks: - - type: 'CREATE_REPLICA_DISK_SNAPSHOTS' - depends_on: ['VALIDATE_REPLICA_DEPLOYMENT_INPUTS'] - - type: 'DEPLOY_REPLICA_INSTANCE_RESOURCES' - depends_on: ['CREATE_REPLICA_DISK_SNAPSHOTS'] - - type: 'GET_OPTIMAL_FLAVOR' - depends_on: ['DEPLOY_REPLICA_INSTANCE_RESOURCES'] - - type: 'FINALIZE_REPLICA_INSTANCE_DEPLOYMENT' - depends_on: ['GET_OPTIMAL_FLAVOR'] - - type: 'DELETE_REPLICA_TARGET_DISK_SNAPSHOTS' - depends_on: ['CREATE_REPLICA_DISK_SNAPSHOTS', 'FINALIZE_REPLICA_INSTANCE_DEPLOYMENT'] - on_error: False - - type: 'CLEANUP_FAILED_REPLICA_INSTANCE_DEPLOYMENT' - depends_on: ['DEPLOY_REPLICA_INSTANCE_RESOURCES', 'FINALIZE_REPLICA_INSTANCE_DEPLOYMENT'] - on_error_only: True - - type: 'RESTORE_REPLICA_DISK_SNAPSHOTS' - depends_on: ['CLEANUP_FAILED_REPLICA_INSTANCE_DEPLOYMENT'] - on_error: True - -- config: - skip_os_morphing: True - clone_disks: True - expected_tasks: - - type: 'CREATE_REPLICA_DISK_SNAPSHOTS' - depends_on: ['VALIDATE_REPLICA_DEPLOYMENT_INPUTS'] - - type: 'DEPLOY_REPLICA_INSTANCE_RESOURCES' - depends_on: ['CREATE_REPLICA_DISK_SNAPSHOTS'] - - type: 'FINALIZE_REPLICA_INSTANCE_DEPLOYMENT' - depends_on: ['DEPLOY_REPLICA_INSTANCE_RESOURCES'] - - type: 'DELETE_REPLICA_TARGET_DISK_SNAPSHOTS' - depends_on: ['CREATE_REPLICA_DISK_SNAPSHOTS', 'FINALIZE_REPLICA_INSTANCE_DEPLOYMENT'] - on_error: True - - type: 'CLEANUP_FAILED_REPLICA_INSTANCE_DEPLOYMENT' - depends_on: ['DEPLOY_REPLICA_INSTANCE_RESOURCES', 'FINALIZE_REPLICA_INSTANCE_DEPLOYMENT'] - on_error_only: True - -- config: - get_optimal_flavor: True - skip_os_morphing: False - expected_tasks: - - type: CREATE_REPLICA_DISK_SNAPSHOTS - depends_on: ['VALIDATE_REPLICA_DEPLOYMENT_INPUTS'] - - type: DEPLOY_REPLICA_INSTANCE_RESOURCES - depends_on: ['CREATE_REPLICA_DISK_SNAPSHOTS'] - - type: DEPLOY_OS_MORPHING_RESOURCES - depends_on: ['DEPLOY_REPLICA_INSTANCE_RESOURCES'] - - type: OS_MORPHING - depends_on: ['DEPLOY_OS_MORPHING_RESOURCES'] - - type: DELETE_OS_MORPHING_RESOURCES - depends_on: ['DEPLOY_OS_MORPHING_RESOURCES', 'OS_MORPHING'] - on_error: True - - type: GET_OPTIMAL_FLAVOR - depends_on: ['OS_MORPHING', 'DELETE_OS_MORPHING_RESOURCES'] - - type: FINALIZE_REPLICA_INSTANCE_DEPLOYMENT - depends_on: ['GET_OPTIMAL_FLAVOR'] - - type: DELETE_REPLICA_TARGET_DISK_SNAPSHOTS - depends_on: ['CREATE_REPLICA_DISK_SNAPSHOTS', 'FINALIZE_REPLICA_INSTANCE_DEPLOYMENT'] - on_error: False - - type: CLEANUP_FAILED_REPLICA_INSTANCE_DEPLOYMENT - depends_on: ['DEPLOY_REPLICA_INSTANCE_RESOURCES', 'FINALIZE_REPLICA_INSTANCE_DEPLOYMENT'] - on_error_only: True - - type: RESTORE_REPLICA_DISK_SNAPSHOTS - depends_on: ['CLEANUP_FAILED_REPLICA_INSTANCE_DEPLOYMENT'] - on_error: True - -- config: - get_optimal_flavor: True - skip_os_morphing: False - has_os_morphing_minion: True - expected_tasks: - - type: VALIDATE_OSMORPHING_MINION_POOL_COMPATIBILITY - depends_on: ['VALIDATE_REPLICA_DEPLOYMENT_INPUTS'] - - type: DEPLOY_REPLICA_INSTANCE_RESOURCES - depends_on: ['CREATE_REPLICA_DISK_SNAPSHOTS'] - - type: ATTACH_VOLUMES_TO_OSMORPHING_MINION - depends_on: ['VALIDATE_OSMORPHING_MINION_POOL_COMPATIBILITY', 'DEPLOY_REPLICA_INSTANCE_RESOURCES'] - - type: COLLECT_OS_MORPHING_INFO - depends_on: ['ATTACH_VOLUMES_TO_OSMORPHING_MINION'] - - type: OS_MORPHING - depends_on: ['COLLECT_OS_MORPHING_INFO'] - - type: DETACH_VOLUMES_FROM_OSMORPHING_MINION - depends_on: ['ATTACH_VOLUMES_TO_OSMORPHING_MINION', 'OS_MORPHING'] - on_error: True - - type: RELEASE_OSMORPHING_MINION - depends_on: ['VALIDATE_OSMORPHING_MINION_POOL_COMPATIBILITY', 'DETACH_VOLUMES_FROM_OSMORPHING_MINION'] - on_error: True - - type: GET_OPTIMAL_FLAVOR - depends_on: ['OS_MORPHING', 'RELEASE_OSMORPHING_MINION'] - - type: FINALIZE_REPLICA_INSTANCE_DEPLOYMENT - depends_on: ['GET_OPTIMAL_FLAVOR'] - - type: DELETE_REPLICA_TARGET_DISK_SNAPSHOTS - depends_on: ['CREATE_REPLICA_DISK_SNAPSHOTS', 'FINALIZE_REPLICA_INSTANCE_DEPLOYMENT'] - on_error: False - - type: CLEANUP_FAILED_REPLICA_INSTANCE_DEPLOYMENT - depends_on: ['DEPLOY_REPLICA_INSTANCE_RESOURCES', 'FINALIZE_REPLICA_INSTANCE_DEPLOYMENT'] - on_error_only: True - - type: RESTORE_REPLICA_DISK_SNAPSHOTS - depends_on: ['CLEANUP_FAILED_REPLICA_INSTANCE_DEPLOYMENT'] - on_error: True diff --git a/coriolis/tests/conductor/rpc/data/deploy_transfer_instance_config.yml b/coriolis/tests/conductor/rpc/data/deploy_transfer_instance_config.yml new file mode 100644 index 00000000..b3baa5cf --- /dev/null +++ b/coriolis/tests/conductor/rpc/data/deploy_transfer_instance_config.yml @@ -0,0 +1,182 @@ +- config: + skip_os_morphing: False + has_os_morphing_minion: True + expected_tasks: + - type: 'VALIDATE_OSMORPHING_MINION_POOL_COMPATIBILITY' + depends_on: ['VALIDATE_DEPLOYMENT_INPUTS'] + - type: 'CREATE_TRANSFER_DISK_SNAPSHOTS' + depends_on: ['VALIDATE_OSMORPHING_MINION_POOL_COMPATIBILITY'] + - type: 'DEPLOY_INSTANCE_RESOURCES' + depends_on: ['CREATE_TRANSFER_DISK_SNAPSHOTS'] + - type: 'ATTACH_VOLUMES_TO_OSMORPHING_MINION' + depends_on: ['VALIDATE_OSMORPHING_MINION_POOL_COMPATIBILITY', 'DEPLOY_INSTANCE_RESOURCES'] + - type: 'COLLECT_OS_MORPHING_INFO' + depends_on: ['ATTACH_VOLUMES_TO_OSMORPHING_MINION'] + - type: 'OS_MORPHING' + depends_on: ['COLLECT_OS_MORPHING_INFO'] + - type: 'DETACH_VOLUMES_FROM_OSMORPHING_MINION' + depends_on: ['ATTACH_VOLUMES_TO_OSMORPHING_MINION', 'OS_MORPHING'] + on_error: True + - type: 'RELEASE_OSMORPHING_MINION' + depends_on: ['VALIDATE_OSMORPHING_MINION_POOL_COMPATIBILITY', 'DETACH_VOLUMES_FROM_OSMORPHING_MINION'] + on_error: True + - type: 'FINALIZE_INSTANCE_DEPLOYMENT' + depends_on: ['OS_MORPHING', 'RELEASE_OSMORPHING_MINION'] + - type: 'DELETE_TRANSFER_TARGET_DISK_SNAPSHOTS' + depends_on: ['CREATE_TRANSFER_DISK_SNAPSHOTS', 'FINALIZE_INSTANCE_DEPLOYMENT'] + on_error: False + - type: 'CLEANUP_FAILED_INSTANCE_DEPLOYMENT' + depends_on: ['DEPLOY_INSTANCE_RESOURCES', 'FINALIZE_INSTANCE_DEPLOYMENT'] + on_error_only: True + - type: 'RESTORE_TRANSFER_DISK_SNAPSHOTS' + depends_on: ['CLEANUP_FAILED_INSTANCE_DEPLOYMENT'] + on_error: True + +- config: + skip_os_morphing: False + has_os_morphing_minion: False + expected_tasks: + - type: 'CREATE_TRANSFER_DISK_SNAPSHOTS' + depends_on: ['VALIDATE_DEPLOYMENT_INPUTS'] + - type: 'DEPLOY_INSTANCE_RESOURCES' + depends_on: ['CREATE_TRANSFER_DISK_SNAPSHOTS'] + - type: 'DEPLOY_OS_MORPHING_RESOURCES' + depends_on: ['DEPLOY_INSTANCE_RESOURCES'] + - type: 'OS_MORPHING' + depends_on: ['DEPLOY_OS_MORPHING_RESOURCES'] + - type: 'DELETE_OS_MORPHING_RESOURCES' + depends_on: ['DEPLOY_OS_MORPHING_RESOURCES', 'OS_MORPHING'] + on_error: True + - type: 'FINALIZE_INSTANCE_DEPLOYMENT' + depends_on: ['OS_MORPHING', 'DELETE_OS_MORPHING_RESOURCES'] + - type: 'DELETE_TRANSFER_TARGET_DISK_SNAPSHOTS' + depends_on: ['CREATE_TRANSFER_DISK_SNAPSHOTS', 'FINALIZE_INSTANCE_DEPLOYMENT'] + on_error: False + - type: 'CLEANUP_FAILED_INSTANCE_DEPLOYMENT' + depends_on: ['DEPLOY_INSTANCE_RESOURCES', 'FINALIZE_INSTANCE_DEPLOYMENT'] + on_error_only: True + - type: 'RESTORE_TRANSFER_DISK_SNAPSHOTS' + depends_on: ['CLEANUP_FAILED_INSTANCE_DEPLOYMENT'] + on_error: True + +- config: + skip_os_morphing: True + expected_tasks: + - type: 'CREATE_TRANSFER_DISK_SNAPSHOTS' + depends_on: ['VALIDATE_DEPLOYMENT_INPUTS'] + - type: 'DEPLOY_INSTANCE_RESOURCES' + depends_on: ['CREATE_TRANSFER_DISK_SNAPSHOTS'] + - type: 'FINALIZE_INSTANCE_DEPLOYMENT' + depends_on: ['DEPLOY_INSTANCE_RESOURCES'] + - type: 'DELETE_TRANSFER_TARGET_DISK_SNAPSHOTS' + depends_on: ['CREATE_TRANSFER_DISK_SNAPSHOTS', 'FINALIZE_INSTANCE_DEPLOYMENT'] + on_error: False + - type: 'CLEANUP_FAILED_INSTANCE_DEPLOYMENT' + depends_on: ['DEPLOY_INSTANCE_RESOURCES', 'FINALIZE_INSTANCE_DEPLOYMENT'] + on_error_only: True + - type: 'RESTORE_TRANSFER_DISK_SNAPSHOTS' + depends_on: ['CLEANUP_FAILED_INSTANCE_DEPLOYMENT'] + on_error: True + +- config: + skip_os_morphing: True + get_optimal_flavor: True + expected_tasks: + - type: 'CREATE_TRANSFER_DISK_SNAPSHOTS' + depends_on: ['VALIDATE_DEPLOYMENT_INPUTS'] + - type: 'DEPLOY_INSTANCE_RESOURCES' + depends_on: ['CREATE_TRANSFER_DISK_SNAPSHOTS'] + - type: 'GET_OPTIMAL_FLAVOR' + depends_on: ['DEPLOY_INSTANCE_RESOURCES'] + - type: 'FINALIZE_INSTANCE_DEPLOYMENT' + depends_on: ['GET_OPTIMAL_FLAVOR'] + - type: 'DELETE_TRANSFER_TARGET_DISK_SNAPSHOTS' + depends_on: ['CREATE_TRANSFER_DISK_SNAPSHOTS', 'FINALIZE_INSTANCE_DEPLOYMENT'] + on_error: False + - type: 'CLEANUP_FAILED_INSTANCE_DEPLOYMENT' + depends_on: ['DEPLOY_INSTANCE_RESOURCES', 'FINALIZE_INSTANCE_DEPLOYMENT'] + on_error_only: True + - type: 'RESTORE_TRANSFER_DISK_SNAPSHOTS' + depends_on: ['CLEANUP_FAILED_INSTANCE_DEPLOYMENT'] + on_error: True + +- config: + skip_os_morphing: True + clone_disks: True + expected_tasks: + - type: 'CREATE_TRANSFER_DISK_SNAPSHOTS' + depends_on: ['VALIDATE_DEPLOYMENT_INPUTS'] + - type: 'DEPLOY_INSTANCE_RESOURCES' + depends_on: ['CREATE_TRANSFER_DISK_SNAPSHOTS'] + - type: 'FINALIZE_INSTANCE_DEPLOYMENT' + depends_on: ['DEPLOY_INSTANCE_RESOURCES'] + - type: 'DELETE_TRANSFER_TARGET_DISK_SNAPSHOTS' + depends_on: ['CREATE_TRANSFER_DISK_SNAPSHOTS', 'FINALIZE_INSTANCE_DEPLOYMENT'] + on_error: True + - type: 'CLEANUP_FAILED_INSTANCE_DEPLOYMENT' + depends_on: ['DEPLOY_INSTANCE_RESOURCES', 'FINALIZE_INSTANCE_DEPLOYMENT'] + on_error_only: True + +- config: + get_optimal_flavor: True + skip_os_morphing: False + expected_tasks: + - type: CREATE_TRANSFER_DISK_SNAPSHOTS + depends_on: ['VALIDATE_DEPLOYMENT_INPUTS'] + - type: DEPLOY_INSTANCE_RESOURCES + depends_on: ['CREATE_TRANSFER_DISK_SNAPSHOTS'] + - type: DEPLOY_OS_MORPHING_RESOURCES + depends_on: ['DEPLOY_INSTANCE_RESOURCES'] + - type: OS_MORPHING + depends_on: ['DEPLOY_OS_MORPHING_RESOURCES'] + - type: DELETE_OS_MORPHING_RESOURCES + depends_on: ['DEPLOY_OS_MORPHING_RESOURCES', 'OS_MORPHING'] + on_error: True + - type: GET_OPTIMAL_FLAVOR + depends_on: ['OS_MORPHING', 'DELETE_OS_MORPHING_RESOURCES'] + - type: FINALIZE_INSTANCE_DEPLOYMENT + depends_on: ['GET_OPTIMAL_FLAVOR'] + - type: DELETE_TRANSFER_TARGET_DISK_SNAPSHOTS + depends_on: ['CREATE_TRANSFER_DISK_SNAPSHOTS', 'FINALIZE_INSTANCE_DEPLOYMENT'] + on_error: False + - type: CLEANUP_FAILED_INSTANCE_DEPLOYMENT + depends_on: ['DEPLOY_INSTANCE_RESOURCES', 'FINALIZE_INSTANCE_DEPLOYMENT'] + on_error_only: True + - type: RESTORE_TRANSFER_DISK_SNAPSHOTS + depends_on: ['CLEANUP_FAILED_INSTANCE_DEPLOYMENT'] + on_error: True + +- config: + get_optimal_flavor: True + skip_os_morphing: False + has_os_morphing_minion: True + expected_tasks: + - type: VALIDATE_OSMORPHING_MINION_POOL_COMPATIBILITY + depends_on: ['VALIDATE_DEPLOYMENT_INPUTS'] + - type: DEPLOY_INSTANCE_RESOURCES + depends_on: ['CREATE_TRANSFER_DISK_SNAPSHOTS'] + - type: ATTACH_VOLUMES_TO_OSMORPHING_MINION + depends_on: ['VALIDATE_OSMORPHING_MINION_POOL_COMPATIBILITY', 'DEPLOY_INSTANCE_RESOURCES'] + - type: COLLECT_OS_MORPHING_INFO + depends_on: ['ATTACH_VOLUMES_TO_OSMORPHING_MINION'] + - type: OS_MORPHING + depends_on: ['COLLECT_OS_MORPHING_INFO'] + - type: DETACH_VOLUMES_FROM_OSMORPHING_MINION + depends_on: ['ATTACH_VOLUMES_TO_OSMORPHING_MINION', 'OS_MORPHING'] + on_error: True + - type: RELEASE_OSMORPHING_MINION + depends_on: ['VALIDATE_OSMORPHING_MINION_POOL_COMPATIBILITY', 'DETACH_VOLUMES_FROM_OSMORPHING_MINION'] + on_error: True + - type: GET_OPTIMAL_FLAVOR + depends_on: ['OS_MORPHING', 'RELEASE_OSMORPHING_MINION'] + - type: FINALIZE_INSTANCE_DEPLOYMENT + depends_on: ['GET_OPTIMAL_FLAVOR'] + - type: DELETE_TRANSFER_TARGET_DISK_SNAPSHOTS + depends_on: ['CREATE_TRANSFER_DISK_SNAPSHOTS', 'FINALIZE_INSTANCE_DEPLOYMENT'] + on_error: False + - type: CLEANUP_FAILED_INSTANCE_DEPLOYMENT + depends_on: ['DEPLOY_INSTANCE_RESOURCES', 'FINALIZE_INSTANCE_DEPLOYMENT'] + on_error_only: True + - type: RESTORE_TRANSFER_DISK_SNAPSHOTS + depends_on: ['CLEANUP_FAILED_INSTANCE_DEPLOYMENT'] + on_error: True diff --git a/coriolis/tests/conductor/rpc/data/execute_replica_tasks_config.yml b/coriolis/tests/conductor/rpc/data/execute_transfer_tasks_config.yml similarity index 57% rename from coriolis/tests/conductor/rpc/data/execute_replica_tasks_config.yml rename to coriolis/tests/conductor/rpc/data/execute_transfer_tasks_config.yml index 5d55b175..e4093ea3 100644 --- a/coriolis/tests/conductor/rpc/data/execute_replica_tasks_config.yml +++ b/coriolis/tests/conductor/rpc/data/execute_transfer_tasks_config.yml @@ -5,24 +5,24 @@ shutdown_instances: False expected_tasks: - - type: 'DEPLOY_REPLICA_DISKS' - depends_on: ['VALIDATE_REPLICA_SOURCE_INPUTS', 'VALIDATE_REPLICA_DESTINATION_INPUTS'] + type: 'DEPLOY_TRANSFER_DISKS' + depends_on: ['VALIDATE_TRANSFER_SOURCE_INPUTS', 'VALIDATE_TRANSFER_DESTINATION_INPUTS'] - - type: 'DEPLOY_REPLICA_SOURCE_RESOURCES' - depends_on: ['DEPLOY_REPLICA_DISKS'] + type: 'DEPLOY_TRANSFER_SOURCE_RESOURCES' + depends_on: ['DEPLOY_TRANSFER_DISKS'] - - type: 'DEPLOY_REPLICA_TARGET_RESOURCES' - depends_on: ['DEPLOY_REPLICA_DISKS'] + type: 'DEPLOY_TRANSFER_TARGET_RESOURCES' + depends_on: ['DEPLOY_TRANSFER_DISKS'] - type: 'REPLICATE_DISKS' - depends_on: ['DEPLOY_REPLICA_SOURCE_RESOURCES', 'DEPLOY_REPLICA_TARGET_RESOURCES'] + depends_on: ['DEPLOY_TRANSFER_SOURCE_RESOURCES', 'DEPLOY_TRANSFER_TARGET_RESOURCES'] - - type: 'DELETE_REPLICA_SOURCE_RESOURCES' - depends_on: ['DEPLOY_REPLICA_SOURCE_RESOURCES', 'REPLICATE_DISKS'] + type: 'DELETE_TRANSFER_SOURCE_RESOURCES' + depends_on: ['DEPLOY_TRANSFER_SOURCE_RESOURCES', 'REPLICATE_DISKS'] on_error: True - - type: 'DELETE_REPLICA_TARGET_RESOURCES' - depends_on: ['DEPLOY_REPLICA_TARGET_RESOURCES', 'REPLICATE_DISKS'] + type: 'DELETE_TRANSFER_TARGET_RESOURCES' + depends_on: ['DEPLOY_TRANSFER_TARGET_RESOURCES', 'REPLICATE_DISKS'] on_error: True - config: @@ -32,23 +32,23 @@ expected_tasks: - type: 'VALIDATE_DESTINATION_MINION_POOL_COMPATIBILITY' - depends_on: ['VALIDATE_REPLICA_DESTINATION_INPUTS'] + depends_on: ['VALIDATE_TRANSFER_DESTINATION_INPUTS'] - - type: 'DEPLOY_REPLICA_DISKS' - depends_on: ['VALIDATE_REPLICA_SOURCE_INPUTS', 'VALIDATE_DESTINATION_MINION_POOL_COMPATIBILITY'] + type: 'DEPLOY_TRANSFER_DISKS' + depends_on: ['VALIDATE_TRANSFER_SOURCE_INPUTS', 'VALIDATE_DESTINATION_MINION_POOL_COMPATIBILITY'] - - type: 'DELETE_REPLICA_SOURCE_RESOURCES' - depends_on: ['DEPLOY_REPLICA_SOURCE_RESOURCES', 'REPLICATE_DISKS'] + type: 'DELETE_TRANSFER_SOURCE_RESOURCES' + depends_on: ['DEPLOY_TRANSFER_SOURCE_RESOURCES', 'REPLICATE_DISKS'] on_error: True - type: 'ATTACH_VOLUMES_TO_DESTINATION_MINION' - depends_on: ['DEPLOY_REPLICA_DISKS'] + depends_on: ['DEPLOY_TRANSFER_DISKS'] - type: 'REPLICATE_DISKS' - depends_on: ['DEPLOY_REPLICA_SOURCE_RESOURCES', 'ATTACH_VOLUMES_TO_DESTINATION_MINION'] + depends_on: ['DEPLOY_TRANSFER_SOURCE_RESOURCES', 'ATTACH_VOLUMES_TO_DESTINATION_MINION'] - - type: 'DEPLOY_REPLICA_SOURCE_RESOURCES' - depends_on: ['DEPLOY_REPLICA_DISKS'] + type: 'DEPLOY_TRANSFER_SOURCE_RESOURCES' + depends_on: ['DEPLOY_TRANSFER_DISKS'] - type: 'DETACH_VOLUMES_FROM_DESTINATION_MINION' depends_on: ['ATTACH_VOLUMES_TO_DESTINATION_MINION', 'REPLICATE_DISKS'] @@ -57,7 +57,7 @@ type: 'RELEASE_DESTINATION_MINION' depends_on: ['VALIDATE_DESTINATION_MINION_POOL_COMPATIBILITY', 'DETACH_VOLUMES_FROM_DESTINATION_MINION'] on_error: True - + - config: origin_minion_pool: True @@ -66,23 +66,23 @@ expected_tasks: - type: 'VALIDATE_SOURCE_MINION_POOL_COMPATIBILITY' - depends_on: ['GET_INSTANCE_INFO', 'VALIDATE_REPLICA_SOURCE_INPUTS'] + depends_on: ['GET_INSTANCE_INFO', 'VALIDATE_TRANSFER_SOURCE_INPUTS'] - - type: 'DEPLOY_REPLICA_DISKS' - depends_on: ['VALIDATE_SOURCE_MINION_POOL_COMPATIBILITY', 'VALIDATE_REPLICA_DESTINATION_INPUTS'] + type: 'DEPLOY_TRANSFER_DISKS' + depends_on: ['VALIDATE_SOURCE_MINION_POOL_COMPATIBILITY', 'VALIDATE_TRANSFER_DESTINATION_INPUTS'] - - type: 'DEPLOY_REPLICA_TARGET_RESOURCES' - depends_on: ['DEPLOY_REPLICA_DISKS'] + type: 'DEPLOY_TRANSFER_TARGET_RESOURCES' + depends_on: ['DEPLOY_TRANSFER_DISKS'] - type: 'REPLICATE_DISKS' - depends_on: ['DEPLOY_REPLICA_TARGET_RESOURCES'] + depends_on: ['DEPLOY_TRANSFER_TARGET_RESOURCES'] - type: 'RELEASE_SOURCE_MINION' depends_on: ['VALIDATE_SOURCE_MINION_POOL_COMPATIBILITY', 'REPLICATE_DISKS'] on_error: True - - type: 'DELETE_REPLICA_TARGET_RESOURCES' - depends_on: ['DEPLOY_REPLICA_TARGET_RESOURCES', 'REPLICATE_DISKS'] + type: 'DELETE_TRANSFER_TARGET_RESOURCES' + depends_on: ['DEPLOY_TRANSFER_TARGET_RESOURCES', 'REPLICATE_DISKS'] on_error: True - config: @@ -92,16 +92,16 @@ expected_tasks: - type: 'VALIDATE_SOURCE_MINION_POOL_COMPATIBILITY' - depends_on: ['GET_INSTANCE_INFO', 'VALIDATE_REPLICA_SOURCE_INPUTS'] + depends_on: ['GET_INSTANCE_INFO', 'VALIDATE_TRANSFER_SOURCE_INPUTS'] - type: 'VALIDATE_DESTINATION_MINION_POOL_COMPATIBILITY' - depends_on: ['VALIDATE_REPLICA_DESTINATION_INPUTS'] + depends_on: ['VALIDATE_TRANSFER_DESTINATION_INPUTS'] - - type: 'DEPLOY_REPLICA_DISKS' + type: 'DEPLOY_TRANSFER_DISKS' depends_on: ['VALIDATE_SOURCE_MINION_POOL_COMPATIBILITY', 'VALIDATE_DESTINATION_MINION_POOL_COMPATIBILITY'] - type: 'ATTACH_VOLUMES_TO_DESTINATION_MINION' - depends_on: ['DEPLOY_REPLICA_DISKS'] + depends_on: ['DEPLOY_TRANSFER_DISKS'] - type: 'REPLICATE_DISKS' depends_on: ['ATTACH_VOLUMES_TO_DESTINATION_MINION'] @@ -124,27 +124,27 @@ shutdown_instances: True expected_tasks: - - type: 'DEPLOY_REPLICA_DISKS' - depends_on: ['VALIDATE_REPLICA_SOURCE_INPUTS', 'VALIDATE_REPLICA_DESTINATION_INPUTS'] + type: 'DEPLOY_TRANSFER_DISKS' + depends_on: ['VALIDATE_TRANSFER_SOURCE_INPUTS', 'VALIDATE_TRANSFER_DESTINATION_INPUTS'] - - type: 'DEPLOY_REPLICA_SOURCE_RESOURCES' - depends_on: ['DEPLOY_REPLICA_DISKS'] + type: 'DEPLOY_TRANSFER_SOURCE_RESOURCES' + depends_on: ['DEPLOY_TRANSFER_DISKS'] - - type: 'DEPLOY_REPLICA_TARGET_RESOURCES' - depends_on: ['DEPLOY_REPLICA_DISKS'] + type: 'DEPLOY_TRANSFER_TARGET_RESOURCES' + depends_on: ['DEPLOY_TRANSFER_DISKS'] - type: 'SHUTDOWN_INSTANCE' - depends_on: ['DEPLOY_REPLICA_SOURCE_RESOURCES', 'DEPLOY_REPLICA_TARGET_RESOURCES'] + depends_on: ['DEPLOY_TRANSFER_SOURCE_RESOURCES', 'DEPLOY_TRANSFER_TARGET_RESOURCES'] - type: 'REPLICATE_DISKS' depends_on: ['SHUTDOWN_INSTANCE'] - - type: 'DELETE_REPLICA_SOURCE_RESOURCES' - depends_on: ['DEPLOY_REPLICA_SOURCE_RESOURCES', 'REPLICATE_DISKS'] + type: 'DELETE_TRANSFER_SOURCE_RESOURCES' + depends_on: ['DEPLOY_TRANSFER_SOURCE_RESOURCES', 'REPLICATE_DISKS'] on_error: True - - type: 'DELETE_REPLICA_TARGET_RESOURCES' - depends_on: ['DEPLOY_REPLICA_TARGET_RESOURCES', 'REPLICATE_DISKS'] + type: 'DELETE_TRANSFER_TARGET_RESOURCES' + depends_on: ['DEPLOY_TRANSFER_TARGET_RESOURCES', 'REPLICATE_DISKS'] on_error: True - config: @@ -154,25 +154,25 @@ expected_tasks: - type: 'VALIDATE_DESTINATION_MINION_POOL_COMPATIBILITY' - depends_on: ['VALIDATE_REPLICA_DESTINATION_INPUTS'] + depends_on: ['VALIDATE_TRANSFER_DESTINATION_INPUTS'] - - type: 'DEPLOY_REPLICA_DISKS' - depends_on: ['VALIDATE_REPLICA_SOURCE_INPUTS', 'VALIDATE_DESTINATION_MINION_POOL_COMPATIBILITY'] + type: 'DEPLOY_TRANSFER_DISKS' + depends_on: ['VALIDATE_TRANSFER_SOURCE_INPUTS', 'VALIDATE_DESTINATION_MINION_POOL_COMPATIBILITY'] - - type: 'DEPLOY_REPLICA_SOURCE_RESOURCES' - depends_on: ['DEPLOY_REPLICA_DISKS'] + type: 'DEPLOY_TRANSFER_SOURCE_RESOURCES' + depends_on: ['DEPLOY_TRANSFER_DISKS'] - type: 'ATTACH_VOLUMES_TO_DESTINATION_MINION' - depends_on: ['DEPLOY_REPLICA_DISKS'] + depends_on: ['DEPLOY_TRANSFER_DISKS'] - type: 'SHUTDOWN_INSTANCE' - depends_on: ['DEPLOY_REPLICA_SOURCE_RESOURCES', 'ATTACH_VOLUMES_TO_DESTINATION_MINION'] + depends_on: ['DEPLOY_TRANSFER_SOURCE_RESOURCES', 'ATTACH_VOLUMES_TO_DESTINATION_MINION'] - type: 'REPLICATE_DISKS' depends_on: ['SHUTDOWN_INSTANCE'] - - type: 'DELETE_REPLICA_SOURCE_RESOURCES' - depends_on: ['DEPLOY_REPLICA_SOURCE_RESOURCES', 'REPLICATE_DISKS'] + type: 'DELETE_TRANSFER_SOURCE_RESOURCES' + depends_on: ['DEPLOY_TRANSFER_SOURCE_RESOURCES', 'REPLICATE_DISKS'] on_error: True - type: 'DETACH_VOLUMES_FROM_DESTINATION_MINION' @@ -190,16 +190,16 @@ expected_tasks: - type: 'VALIDATE_SOURCE_MINION_POOL_COMPATIBILITY' - depends_on: ['GET_INSTANCE_INFO', 'VALIDATE_REPLICA_SOURCE_INPUTS'] + depends_on: ['GET_INSTANCE_INFO', 'VALIDATE_TRANSFER_SOURCE_INPUTS'] - - type: 'DEPLOY_REPLICA_DISKS' - depends_on: ['VALIDATE_SOURCE_MINION_POOL_COMPATIBILITY', 'VALIDATE_REPLICA_DESTINATION_INPUTS'] + type: 'DEPLOY_TRANSFER_DISKS' + depends_on: ['VALIDATE_SOURCE_MINION_POOL_COMPATIBILITY', 'VALIDATE_TRANSFER_DESTINATION_INPUTS'] - - type: 'DEPLOY_REPLICA_TARGET_RESOURCES' - depends_on: ['DEPLOY_REPLICA_DISKS'] + type: 'DEPLOY_TRANSFER_TARGET_RESOURCES' + depends_on: ['DEPLOY_TRANSFER_DISKS'] - type: 'SHUTDOWN_INSTANCE' - depends_on: ['DEPLOY_REPLICA_TARGET_RESOURCES'] + depends_on: ['DEPLOY_TRANSFER_TARGET_RESOURCES'] - type: 'REPLICATE_DISKS' depends_on: ['SHUTDOWN_INSTANCE'] @@ -208,8 +208,8 @@ depends_on: ['VALIDATE_SOURCE_MINION_POOL_COMPATIBILITY', 'REPLICATE_DISKS'] on_error: True - - type: 'DELETE_REPLICA_TARGET_RESOURCES' - depends_on: ['DEPLOY_REPLICA_TARGET_RESOURCES', 'REPLICATE_DISKS'] + type: 'DELETE_TRANSFER_TARGET_RESOURCES' + depends_on: ['DEPLOY_TRANSFER_TARGET_RESOURCES', 'REPLICATE_DISKS'] on_error: True - config: @@ -219,16 +219,16 @@ expected_tasks: - type: 'VALIDATE_SOURCE_MINION_POOL_COMPATIBILITY' - depends_on: ['GET_INSTANCE_INFO', 'VALIDATE_REPLICA_SOURCE_INPUTS'] + depends_on: ['GET_INSTANCE_INFO', 'VALIDATE_TRANSFER_SOURCE_INPUTS'] - type: 'VALIDATE_DESTINATION_MINION_POOL_COMPATIBILITY' - depends_on: ['VALIDATE_REPLICA_DESTINATION_INPUTS'] + depends_on: ['VALIDATE_TRANSFER_DESTINATION_INPUTS'] - - type: 'DEPLOY_REPLICA_DISKS' + type: 'DEPLOY_TRANSFER_DISKS' depends_on: ['VALIDATE_SOURCE_MINION_POOL_COMPATIBILITY', 'VALIDATE_DESTINATION_MINION_POOL_COMPATIBILITY'] - type: 'ATTACH_VOLUMES_TO_DESTINATION_MINION' - depends_on: ['DEPLOY_REPLICA_DISKS'] + depends_on: ['DEPLOY_TRANSFER_DISKS'] - type: 'SHUTDOWN_INSTANCE' depends_on: ['ATTACH_VOLUMES_TO_DESTINATION_MINION'] diff --git a/coriolis/tests/conductor/rpc/data/update_replica_config.yml b/coriolis/tests/conductor/rpc/data/update_transfer_config.yml similarity index 88% rename from coriolis/tests/conductor/rpc/data/update_replica_config.yml rename to coriolis/tests/conductor/rpc/data/update_transfer_config.yml index 74131f7f..4c1731d3 100644 --- a/coriolis/tests/conductor/rpc/data/update_replica_config.yml +++ b/coriolis/tests/conductor/rpc/data/update_transfer_config.yml @@ -1,5 +1,5 @@ - config: - replica: + transfer: instances: ['mock_instance_1', 'mock_instance_2'] info: mock_instance_1: {} @@ -15,10 +15,10 @@ destination_environment: network_map: "mock_network_map" has_updated_values: True - has_replica_instance: True + has_transfer_instance: True - config: - replica: {} + transfer: {} updated_properties: origin_minion_pool_id: "mock_origin_minion_pool_id" destination_minion_pool_id: "mock_destination_minion_pool_id" @@ -29,10 +29,10 @@ destination_environment: network_map: "mock_network_map" has_updated_values: True - has_replica_instance: False + has_transfer_instance: False - config: - replica: {} + transfer: {} updated_properties: {} has_updated_values: False - has_replica_instance: False \ No newline at end of file + has_transfer_instance: False \ No newline at end of file diff --git a/coriolis/tests/conductor/rpc/test_client.py b/coriolis/tests/conductor/rpc/test_client.py index 98f9c2a7..91db7bfc 100644 --- a/coriolis/tests/conductor/rpc/test_client.py +++ b/coriolis/tests/conductor/rpc/test_client.py @@ -8,7 +8,7 @@ from coriolis.tests import test_base INSTANCE_ARGS = { - "replica_scenario": "mock_replica_scenario", + "transfer_scenario": "mock_transfer_scenario", "origin_endpoint_id": "mock_origin_endpoint_id", "destination_endpoint_id": "mock_destination_endpoint_id", "origin_minion_pool_id": "mock_origin_minion_pool_id", @@ -149,44 +149,44 @@ def test_get_provider_schemas(self): } self._test(self.client.get_provider_schemas, args) - def test_execute_replica_tasks(self): + def test_execute_transfer_tasks(self): args = { - "replica_id": "mock_replica_id", + "transfer_id": "mock_transfer_id", "shutdown_instances": False } - self._test(self.client.execute_replica_tasks, args) + self._test(self.client.execute_transfer_tasks, args) - def test_get_replica_tasks_executions(self): + def test_get_transfer_tasks_executions(self): args = { - "replica_id": "mock_replica_id", + "transfer_id": "mock_transfer_id", "include_tasks": False } - self._test(self.client.get_replica_tasks_executions, args) + self._test(self.client.get_transfer_tasks_executions, args) - def test_get_replica_tasks_execution(self): + def test_get_transfer_tasks_execution(self): args = { - "replica_id": "mock_replica_id", + "transfer_id": "mock_transfer_id", "execution_id": "mock_execution_id", "include_task_info": False } - self._test(self.client.get_replica_tasks_execution, args) + self._test(self.client.get_transfer_tasks_execution, args) - def test_delete_replica_tasks_execution(self): + def test_delete_transfer_tasks_execution(self): args = { - "replica_id": "mock_replica_id", + "transfer_id": "mock_transfer_id", "execution_id": "mock_execution_id" } - self._test(self.client.delete_replica_tasks_execution, args) + self._test(self.client.delete_transfer_tasks_execution, args) - def test_cancel_replica_tasks_execution(self): + def test_cancel_transfer_tasks_execution(self): args = { - "replica_id": "mock_replica_id", + "transfer_id": "mock_transfer_id", "execution_id": "mock_execution_id", "force": "mock_force" } - self._test(self.client.cancel_replica_tasks_execution, args) + self._test(self.client.cancel_transfer_tasks_execution, args) - def test_create_instances_replica(self): + def test_create_instances_transfer(self): args = { **INSTANCE_ARGS, } @@ -195,44 +195,44 @@ def test_create_instances_replica(self): "user_scripts": None } args.update(new_args) - self._test(self.client.create_instances_replica, args) + self._test(self.client.create_instances_transfer, args) - def test_get_replicas(self): + def test_get_transfers(self): args = { "include_tasks_executions": False, "include_task_info": False, } - self._test(self.client.get_replicas, args) + self._test(self.client.get_transfers, args) - def test_get_replica(self): + def test_get_transfer(self): args = { - "replica_id": "mock_replica_id", + "transfer_id": "mock_transfer_id", "include_task_info": False, } - self._test(self.client.get_replica, args) + self._test(self.client.get_transfer, args) - def test_delete_replica(self): + def test_delete_transfer(self): args = { - "replica_id": "mock_replica_id", + "transfer_id": "mock_transfer_id", } - self._test(self.client.delete_replica, args) + self._test(self.client.delete_transfer, args) - def test_delete_replica_disks(self): + def test_delete_transfer_disks(self): args = { - "replica_id": "mock_replica_id" + "transfer_id": "mock_transfer_id" } - self._test(self.client.delete_replica_disks, args) + self._test(self.client.delete_transfer_disks, args) - def test_deploy_replica_instances(self): + def test_deploy_transfer_instances(self): args = { - "replica_id": "mock_replica_id", + "transfer_id": "mock_transfer_id", "instance_osmorphing_minion_pool_mappings": None, "clone_disks": False, "force": False, "skip_os_morphing": False, "user_scripts": None } - self._test(self.client.deploy_replica_instances, args) + self._test(self.client.deploy_transfer_instances, args) def test_set_task_host(self): args = { @@ -288,52 +288,52 @@ def test_update_task_progress_update(self): self._test(self.client.update_task_progress_update, args, rpc_op='_cast') - def test_create_replica_schedule(self): + def test_create_transfer_schedule(self): args = { - "replica_id": "mock_replica_id", + "transfer_id": "mock_transfer_id", "schedule": "mock_schedule", "enabled": "mock_enabled", "exp_date": "mock_exp_date", "shutdown_instance": "mock_shutdown_instance" } - self._test(self.client.create_replica_schedule, args) + self._test(self.client.create_transfer_schedule, args) - def test_update_replica_schedule(self): + def test_update_transfer_schedule(self): args = { - "replica_id": "mock_replica_id", + "transfer_id": "mock_transfer_id", "schedule_id": "mock_schedule_id", "updated_values": "mock_updated_values" } - self._test(self.client.update_replica_schedule, args) + self._test(self.client.update_transfer_schedule, args) - def test_delete_replica_schedule(self): + def test_delete_transfer_schedule(self): args = { - "replica_id": "mock_replica_id", + "transfer_id": "mock_transfer_id", "schedule_id": "mock_schedule_id" } - self._test(self.client.delete_replica_schedule, args) + self._test(self.client.delete_transfer_schedule, args) - def test_get_replica_schedules(self): + def test_get_transfer_schedules(self): args = { - "replica_id": None, + "transfer_id": None, "expired": True } - self._test(self.client.get_replica_schedules, args) + self._test(self.client.get_transfer_schedules, args) - def test_get_replica_schedule(self): + def test_get_transfer_schedule(self): args = { - "replica_id": "mock_replica_id", + "transfer_id": "mock_transfer_id", "schedule_id": "mock_schedule_id", "expired": True } - self._test(self.client.get_replica_schedule, args) + self._test(self.client.get_transfer_schedule, args) - def test_update_replica(self): + def test_update_transfer(self): args = { - "replica_id": "mock_replica_id", + "transfer_id": "mock_transfer_id", "updated_properties": "mock_updated_properties" } - self._test(self.client.update_replica, args) + self._test(self.client.update_transfer, args) def test_get_diagnostics(self): self._test(self.client.get_diagnostics, args={}) @@ -419,35 +419,36 @@ def test_delete_service(self): } self._test(self.client.delete_service, args) - def test_confirm_replica_minions_allocation(self): + def test_confirm_transfer_minions_allocation(self): args = { - "replica_id": "mock_replica_id", + "transfer_id": "mock_transfer_id", "minion_machine_allocations": "mock_minion_machine_allocations" } - self._test(self.client.confirm_replica_minions_allocation, args) + self._test(self.client.confirm_transfer_minions_allocation, args) - def test_report_replica_minions_allocation_error(self): + def test_report_transfer_minions_allocation_error(self): args = { - "replica_id": "mock_replica_id", + "transfer_id": "mock_transfer_id", "minion_allocation_error_details": "mock_minion_allocation_error_details" } - self._test(self.client.report_replica_minions_allocation_error, args) + self._test(self.client.report_transfer_minions_allocation_error, args) - def test_confirm_migration_minions_allocation(self): + def test_confirm_deployment_minions_allocation(self): args = { - "migration_id": "mock_migration_id", + "deployment_id": "mock_deployment_id", "minion_machine_allocations": "mock_minion_machine_allocations" } - self._test(self.client.confirm_migration_minions_allocation, args) + self._test(self.client.confirm_deployment_minions_allocation, args) - def test_report_migration_minions_allocation_error(self): + def test_report_deployment_minions_allocation_error(self): args = { - "migration_id": "mock_migration_id", + "deployment_id": "mock_deployment_id", "minion_allocation_error_details": "mock_minion_allocation_error_details" } - self._test(self.client.report_migration_minions_allocation_error, args) + self._test( + self.client.report_deployment_minions_allocation_error, args) def test_add_task_progress_update(self): args = { diff --git a/coriolis/tests/conductor/rpc/test_server.py b/coriolis/tests/conductor/rpc/test_server.py index 0582c990..3018bb45 100644 --- a/coriolis/tests/conductor/rpc/test_server.py +++ b/coriolis/tests/conductor/rpc/test_server.py @@ -285,7 +285,7 @@ def call_delete_endpoint(): mock.sentinel.context, mock.sentinel.endpoint_id ) - # endpoint has replicas + # endpoint has transfers mock_get_endpoint_transfers_count.return_value = 1 self.assertRaises(exception.NotAuthorized, call_delete_endpoint) @@ -1084,35 +1084,35 @@ def test_begin_tasks_no_newly_started_tasks( @mock.patch.object(server.ConductorServerEndpoint, "_create_task") @mock.patch.object( - server.ConductorServerEndpoint, "_check_replica_running_executions" + server.ConductorServerEndpoint, "_check_transfer_running_executions" ) - @mock.patch.object(server.ConductorServerEndpoint, "_get_replica") - def test_delete_replica_disks_invalid_state( - self, mock_get_replica, - mock_check_replica_running, mock_create_task + @mock.patch.object(server.ConductorServerEndpoint, "_get_transfer") + def test_delete_transfer_disks_invalid_state( + self, mock_get_transfer, + mock_check_transfer_running, mock_create_task ): - mock_replica = mock_get_replica.return_value - mock_replica.instances = [mock.sentinel.instance] - mock_replica.info = {} - delete_replica_disks = testutils.get_wrapped_function( - self.server.delete_replica_disks + mock_transfer = mock_get_transfer.return_value + mock_transfer.instances = [mock.sentinel.instance] + mock_transfer.info = {} + delete_transfer_disks = testutils.get_wrapped_function( + self.server.delete_transfer_disks ) self.assertRaises( - exception.InvalidReplicaState, - delete_replica_disks, + exception.InvalidTransferState, + delete_transfer_disks, self.server, mock.sentinel.context, mock.sentinel.transfer_id, ) - mock_get_replica.assert_called_once_with( + mock_get_transfer.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id, include_task_info=True, ) - mock_check_replica_running.assert_called_once_with( - mock.sentinel.context, mock_replica + mock_check_transfer_running.assert_called_once_with( + mock.sentinel.context, mock_transfer ) mock_create_task.assert_not_called() @@ -1133,7 +1133,7 @@ def convert_to_task(task_config): instance_task.depends_on = task_config.get("depends_on", None) instance_task.task_type = task_config.get( "task_type", - constants.TASK_TYPE_DEPLOY_MIGRATION_SOURCE_RESOURCES, + constants.TASK_TYPE_DEPLOY_TRANSFER_SOURCE_RESOURCES, ) return instance_task @@ -1176,7 +1176,7 @@ def convert_to_task(task_config): @mock.patch.object(copy, "deepcopy") @mock.patch.object( server.ConductorServerEndpoint, - "get_replica_tasks_execution" + "get_transfer_tasks_execution" ) @mock.patch.object( server.ConductorServerEndpoint, @@ -1208,23 +1208,23 @@ def convert_to_task(task_config): ) @mock.patch.object( server.ConductorServerEndpoint, - "_check_replica_running_executions" + "_check_transfer_running_executions" ) @mock.patch.object( server.ConductorServerEndpoint, - "_check_reservation_for_replica" + "_check_reservation_for_transfer" ) @mock.patch.object( server.ConductorServerEndpoint, - "_get_replica" + "_get_transfer" ) - @ddt.file_data("data/execute_replica_tasks_config.yml") + @ddt.file_data("data/execute_transfer_tasks_config.yml") @ddt.unpack - def test_execute_replica_tasks( + def test_execute_transfer_tasks( self, - mock_get_replica, + mock_get_transfer, mock_check_reservation, - mock_check_replica_running_executions, + mock_check_transfer_running_executions, mock_check_minion_pools_for_action, mock_tasks_execution, mock_uuid4, @@ -1235,7 +1235,7 @@ def test_execute_replica_tasks( mock_minion_manager_client, mock_set_tasks_execution_status, mock_begin_tasks, - mock_get_replica_tasks_execution, + mock_get_transfer_tasks_execution, mock_deepcopy, config, expected_tasks, @@ -1244,9 +1244,9 @@ def test_execute_replica_tasks( has_target_minion_pool = config.get("target_minion_pool", False) shutdown_instances = config.get("shutdown_instances", False) - def call_execute_replica_tasks(): + def call_execute_transfer_tasks(): return testutils\ - .get_wrapped_function(self.server.execute_replica_tasks)( + .get_wrapped_function(self.server.execute_transfer_tasks)( self.server, mock.sentinel.context, mock.sentinel.transfer_id, @@ -1254,7 +1254,7 @@ def call_execute_replica_tasks(): ) instances = [mock.sentinel.instance1, mock.sentinel.instance2] - mock_replica = mock.Mock( + mock_transfer = mock.Mock( instances=instances, network_map=mock.sentinel.network_map, info={mock.sentinel.instance1: {'volume_info': None}}, @@ -1263,7 +1263,7 @@ def call_execute_replica_tasks(): destination_minion_pool_id=mock.sentinel.destination_minion_pool_id if has_target_minion_pool else None, ) - mock_get_replica.return_value = mock_replica + mock_get_transfer.return_value = mock_transfer def create_task_side_effect( instance, @@ -1285,37 +1285,37 @@ def create_task_side_effect( mock_create_task.side_effect = create_task_side_effect - result = call_execute_replica_tasks() - mock_get_replica.assert_called_once_with( + result = call_execute_transfer_tasks() + mock_get_transfer.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id, include_task_info=True, ) - mock_check_reservation.assert_called_once_with(mock_replica) - mock_check_replica_running_executions.assert_called_once_with( - mock.sentinel.context, mock_replica) + mock_check_reservation.assert_called_once_with(mock_transfer) + mock_check_transfer_running_executions.assert_called_once_with( + mock.sentinel.context, mock_transfer) mock_check_minion_pools_for_action.assert_called_once_with( - mock.sentinel.context, mock_replica) + mock.sentinel.context, mock_transfer) mock_deepcopy.assert_called_once_with( - mock_replica.destination_environment) + mock_transfer.destination_environment) for instance in instances: - assert instance in mock_replica.info + assert instance in mock_transfer.info self.assertEqual( - mock_replica.info[instance]['source_environment'], - mock_replica.source_environment) + mock_transfer.info[instance]['source_environment'], + mock_transfer.source_environment) self.assertEqual( - mock_replica.info[instance]['target_environment'], + mock_transfer.info[instance]['target_environment'], mock_deepcopy.return_value) # generic tasks mock_create_task.assert_has_calls([ mock.call( instance, - constants.TASK_TYPE_VALIDATE_REPLICA_SOURCE_INPUTS, + constants.TASK_TYPE_VALIDATE_TRANSFER_SOURCE_INPUTS, mock_tasks_execution.return_value), mock.call( instance, @@ -1323,7 +1323,7 @@ def create_task_side_effect( mock_tasks_execution.return_value), mock.call( instance, - constants.TASK_TYPE_VALIDATE_REPLICA_DESTINATION_INPUTS, + constants.TASK_TYPE_VALIDATE_TRANSFER_DESTINATION_INPUTS, mock_tasks_execution.return_value, depends_on=[constants.TASK_TYPE_GET_INSTANCE_INFO]), ]) @@ -1346,15 +1346,15 @@ def create_task_side_effect( mock_update_transfer_action_info_for_instance.assert_has_calls([ mock.call( mock.sentinel.context, - mock_replica.id, + mock_transfer.id, instance, - mock_replica.info[instance], + mock_transfer.info[instance], ) ]) mock_check_execution_tasks_sanity.assert_called_once_with( mock_tasks_execution.return_value, - mock_replica.info) + mock_transfer.info) mock_add_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, @@ -1362,9 +1362,9 @@ def create_task_side_effect( if any([has_origin_minion_pool, has_target_minion_pool]): mock_minion_manager_client\ - .allocate_minion_machines_for_replica.assert_called_once_with( + .allocate_minion_machines_for_transfer.assert_called_once_with( mock.sentinel.context, - mock_replica, + mock_transfer, ) mock_set_tasks_execution_status.assert_called_once_with( mock.sentinel.context, @@ -1374,11 +1374,11 @@ def create_task_side_effect( else: mock_begin_tasks.assert_called_once_with( mock.sentinel.context, - mock_replica, + mock_transfer, mock_tasks_execution.return_value, ) - mock_get_replica_tasks_execution.assert_called_once_with( + mock_get_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id, mock_tasks_execution.return_value.id) @@ -1388,17 +1388,17 @@ def create_task_side_effect( constants.EXECUTION_STATUS_UNEXECUTED) self.assertEqual( mock_tasks_execution.return_value.type, - constants.EXECUTION_TYPE_REPLICA_EXECUTION) + constants.EXECUTION_TYPE_TRANSFER_EXECUTION) self.assertEqual( - result, mock_get_replica_tasks_execution.return_value) + result, mock_get_transfer_tasks_execution.return_value) @mock.patch.object(db_api, "get_transfer_tasks_executions") - def test_get_replica_tasks_executions( + def test_get_transfer_tasks_executions( self, mock_get_transfer_tasks_executions ): result = testutils.get_wrapped_function( - self.server.get_replica_tasks_executions)( + self.server.get_transfer_tasks_executions)( self.server, mock.sentinel.context, mock.sentinel.transfer_id, @@ -1419,12 +1419,12 @@ def test_get_replica_tasks_executions( ) @mock.patch.object(db_api, "get_transfer_tasks_execution") - def test_get_replica_tasks_execution( + def test_get_transfer_tasks_execution( self, mock_get_transfer_tasks_execution ): result = testutils.get_wrapped_function( - self.server.get_replica_tasks_execution)( + self.server.get_transfer_tasks_execution)( self.server, mock.sentinel.context, mock.sentinel.transfer_id, @@ -1446,24 +1446,24 @@ def test_get_replica_tasks_execution( @mock.patch.object( server.ConductorServerEndpoint, - '_get_replica_tasks_execution' + '_get_transfer_tasks_execution' ) @mock.patch.object(db_api, 'delete_transfer_tasks_execution') - def test_delete_replica_tasks_execution( + def test_delete_transfer_tasks_execution( self, mock_delete_transfer_tasks_execution, - mock_get_replica_tasks_execution + mock_get_transfer_tasks_execution ): - def call_delete_replica_tasks_execution(): + def call_delete_transfer_tasks_execution(): return testutils.get_wrapped_function( - self.server.delete_replica_tasks_execution)( + self.server.delete_transfer_tasks_execution)( self.server, mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.execution_id, # type: ignore ) - call_delete_replica_tasks_execution() - mock_get_replica_tasks_execution.assert_called_once_with( + call_delete_transfer_tasks_execution() + mock_get_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.execution_id) @@ -1472,120 +1472,120 @@ def call_delete_replica_tasks_execution(): mock.sentinel.execution_id) # raises exception if status is active - mock_get_replica_tasks_execution.return_value.status = ( + mock_get_transfer_tasks_execution.return_value.status = ( constants.EXECUTION_STATUS_RUNNING) self.assertRaises( - exception.InvalidMigrationState, - call_delete_replica_tasks_execution) + exception.InvalidActionTasksExecutionState, + call_delete_transfer_tasks_execution) @mock.patch.object(server.ConductorServerEndpoint, - '_get_replica_tasks_execution') + '_get_transfer_tasks_execution') @mock.patch.object(server.ConductorServerEndpoint, '_cancel_tasks_execution') - def test_cancel_replica_tasks_execution( + def test_cancel_transfer_tasks_execution( self, - mock_cancel_replica_tasks_execution, - mock_get_replica_tasks_execution + mock_cancel_transfer_tasks_execution, + mock_get_transfer_tasks_execution ): - mock_get_replica_tasks_execution.return_value.status = constants\ + mock_get_transfer_tasks_execution.return_value.status = constants\ .EXECUTION_STATUS_RUNNING testutils.get_wrapped_function( - self.server.cancel_replica_tasks_execution)( + self.server.cancel_transfer_tasks_execution)( self.server, mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.execution_id, False ) - mock_get_replica_tasks_execution.assert_called_once_with( + mock_get_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.execution_id) - mock_cancel_replica_tasks_execution.assert_called_once_with( + mock_cancel_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, - mock_get_replica_tasks_execution.return_value, + mock_get_transfer_tasks_execution.return_value, force=False) - mock_get_replica_tasks_execution.reset_mock() - mock_cancel_replica_tasks_execution.reset_mock() - mock_get_replica_tasks_execution.return_value.status = constants\ + mock_get_transfer_tasks_execution.reset_mock() + mock_cancel_transfer_tasks_execution.reset_mock() + mock_get_transfer_tasks_execution.return_value.status = constants\ .EXECUTION_STATUS_CANCELLING testutils.get_wrapped_function( - self.server.cancel_replica_tasks_execution)( + self.server.cancel_transfer_tasks_execution)( self.server, mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.execution_id, True ) - mock_get_replica_tasks_execution.assert_called_once_with( + mock_get_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.execution_id) - mock_cancel_replica_tasks_execution.assert_called_once_with( + mock_cancel_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, - mock_get_replica_tasks_execution.return_value, + mock_get_transfer_tasks_execution.return_value, force=True) @mock.patch.object(server.ConductorServerEndpoint, - '_get_replica_tasks_execution') + '_get_transfer_tasks_execution') @mock.patch.object(server.ConductorServerEndpoint, '_cancel_tasks_execution') - def test_cancel_replica_tasks_execution_status_not_active( + def test_cancel_transfer_tasks_execution_status_not_active( self, - mock_cancel_replica_tasks_execution, - mock_get_replica_tasks_execution + mock_cancel_transfer_tasks_execution, + mock_get_transfer_tasks_execution ): self.assertRaises( - exception.InvalidReplicaState, + exception.InvalidTransferState, testutils.get_wrapped_function( - self.server.cancel_replica_tasks_execution), + self.server.cancel_transfer_tasks_execution), self.server, mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.execution_id, False ) - mock_get_replica_tasks_execution.assert_called_once_with( + mock_get_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.execution_id) - mock_cancel_replica_tasks_execution.assert_not_called() + mock_cancel_transfer_tasks_execution.assert_not_called() @mock.patch.object(server.ConductorServerEndpoint, - '_get_replica_tasks_execution') + '_get_transfer_tasks_execution') @mock.patch.object(server.ConductorServerEndpoint, '_cancel_tasks_execution') - def test_cancel_replica_tasks_execution_status_cancelling_no_force( + def test_cancel_transfer_tasks_execution_status_cancelling_no_force( self, - mock_cancel_replica_tasks_execution, - mock_get_replica_tasks_execution + mock_cancel_transfer_tasks_execution, + mock_get_transfer_tasks_execution ): - mock_get_replica_tasks_execution.return_value.status = constants\ + mock_get_transfer_tasks_execution.return_value.status = constants\ .EXECUTION_STATUS_CANCELLING self.assertRaises( - exception.InvalidReplicaState, + exception.InvalidTransferState, testutils.get_wrapped_function( - self.server.cancel_replica_tasks_execution), + self.server.cancel_transfer_tasks_execution), self.server, mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.execution_id, False ) - mock_get_replica_tasks_execution.assert_called_once_with( + mock_get_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.execution_id) - mock_cancel_replica_tasks_execution.assert_not_called() + mock_cancel_transfer_tasks_execution.assert_not_called() @mock.patch.object(db_api, 'get_transfer_tasks_execution') - def test__get_replica_tasks_execution( + def test__get_transfer_tasks_execution( self, mock_get_transfer_tasks_execution ): - result = self.server._get_replica_tasks_execution( + result = self.server._get_transfer_tasks_execution( mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.execution_id, @@ -1604,14 +1604,14 @@ def test__get_replica_tasks_execution( to_dict=False) @mock.patch.object(db_api, 'get_transfer_tasks_execution') - def test__get_replica_tasks_execution_no_execution( + def test__get_transfer_tasks_execution_no_execution( self, mock_get_transfer_tasks_execution ): mock_get_transfer_tasks_execution.return_value = None self.assertRaises( exception.NotFound, - self.server._get_replica_tasks_execution, + self.server._get_transfer_tasks_execution, mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.execution_id, @@ -1627,8 +1627,8 @@ def test__get_replica_tasks_execution_no_execution( to_dict=False) @mock.patch.object(db_api, 'get_transfers') - def test_get_replicas(self, mock_get_transfers): - result = self.server.get_replicas( + def test_get_transfers(self, mock_get_transfers): + result = self.server.get_transfers( mock.sentinel.context, include_tasks_executions=False, include_task_info=False @@ -1645,9 +1645,9 @@ def test_get_replicas(self, mock_get_transfers): to_dict=True ) - @mock.patch.object(server.ConductorServerEndpoint, '_get_replica') - def test_get_replica(self, mock_get_replica): - result = testutils.get_wrapped_function(self.server.get_replica)( + @mock.patch.object(server.ConductorServerEndpoint, '_get_transfer') + def test_get_transfer(self, mock_get_transfer): + result = testutils.get_wrapped_function(self.server.get_transfer)( self.server, mock.sentinel.context, mock.sentinel.transfer_id, @@ -1655,10 +1655,10 @@ def test_get_replica(self, mock_get_replica): ) self.assertEqual( - mock_get_replica.return_value, + mock_get_transfer.return_value, result ) - mock_get_replica.assert_called_once_with( + mock_get_transfer.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id, include_task_info=False, @@ -1669,32 +1669,32 @@ def test_get_replica(self, mock_get_replica): @mock.patch.object(server.ConductorServerEndpoint, '_check_delete_reservation_for_transfer') @mock.patch.object(server.ConductorServerEndpoint, - '_check_replica_running_executions') - @mock.patch.object(server.ConductorServerEndpoint, '_get_replica') - def test_delete_replica( + '_check_transfer_running_executions') + @mock.patch.object(server.ConductorServerEndpoint, '_get_transfer') + def test_delete_transfer( self, - mock_get_replica, - mock_check_replica_running_executions, + mock_get_transfer, + mock_check_transfer_running_executions, mock_check_delete_reservation_for_transfer, mock_delete_transfer, ): - testutils.get_wrapped_function(self.server.delete_replica)( + testutils.get_wrapped_function(self.server.delete_transfer)( self.server, mock.sentinel.context, mock.sentinel.transfer_id ) - mock_get_replica.assert_called_once_with( + mock_get_transfer.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id) - mock_check_replica_running_executions.assert_called_once_with( - mock.sentinel.context, mock_get_replica.return_value) + mock_check_transfer_running_executions.assert_called_once_with( + mock.sentinel.context, mock_get_transfer.return_value) mock_check_delete_reservation_for_transfer.assert_called_once_with( - mock_get_replica.return_value) + mock_get_transfer.return_value) mock_delete_transfer.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id) @mock.patch.object( server.ConductorServerEndpoint, - 'get_replica_tasks_execution' + 'get_transfer_tasks_execution' ) @mock.patch.object( server.ConductorServerEndpoint, @@ -1715,16 +1715,16 @@ def test_delete_replica( @mock.patch.object(models, "TasksExecution") @mock.patch.object( server.ConductorServerEndpoint, - '_check_replica_running_executions' + '_check_transfer_running_executions' ) @mock.patch.object( server.ConductorServerEndpoint, - '_get_replica' + '_get_transfer' ) - def test_delete_replica_disks( + def test_delete_transfer_disks( self, - mock_get_replica, - mock_check_replica_running_executions, + mock_get_transfer, + mock_check_transfer_running_executions, mock_tasks_execution, mock_uuid4, mock_create_task, @@ -1733,17 +1733,17 @@ def test_delete_replica_disks( mock_update_transfer_action_info_for_instance, mock_add_transfer_tasks_execution, mock_begin_tasks, - mock_get_replica_tasks_execution, + mock_get_transfer_tasks_execution, ): - def call_delete_replica_disks(): + def call_delete_transfer_disks(): return testutils.get_wrapped_function( - self.server.delete_replica_disks)( + self.server.delete_transfer_disks)( self.server, mock.sentinel.context, mock.sentinel.transfer_id, # type: ignore ) instances = [mock.Mock(), mock.Mock()] - mock_replica = mock.Mock( + mock_transfer = mock.Mock( instances=instances, id=mock.sentinel.transfer_id, network_map=mock.sentinel.network_map, @@ -1769,17 +1769,17 @@ def create_task_side_effect( mock_create_task.side_effect = create_task_side_effect - mock_get_replica.return_value = mock_replica - result = call_delete_replica_disks() + mock_get_transfer.return_value = mock_transfer + result = call_delete_transfer_disks() - mock_get_replica.assert_called_once_with( + mock_get_transfer.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id, include_task_info=True ) - mock_check_replica_running_executions.assert_called_once_with( + mock_check_transfer_running_executions.assert_called_once_with( mock.sentinel.context, - mock_replica + mock_transfer ) self.assertEqual( @@ -1788,25 +1788,25 @@ def create_task_side_effect( ) self.assertEqual( mock_tasks_execution.return_value.type, - constants.EXECUTION_TYPE_REPLICA_DISKS_DELETE + constants.EXECUTION_TYPE_TRANSFER_DISKS_DELETE ) for instance in instances: - assert instance in mock_replica.info + assert instance in mock_transfer.info mock_create_task.assert_has_calls([ mock.call( instance, - constants.TASK_TYPE_DELETE_REPLICA_SOURCE_DISK_SNAPSHOTS, + constants.TASK_TYPE_DELETE_TRANSFER_SOURCE_DISK_SNAPSHOTS, mock_tasks_execution.return_value, ), mock.call( instance, - constants.TASK_TYPE_DELETE_REPLICA_DISKS, + constants.TASK_TYPE_DELETE_TRANSFER_DISKS, mock_tasks_execution.return_value, depends_on=[ constants - .TASK_TYPE_DELETE_REPLICA_SOURCE_DISK_SNAPSHOTS + .TASK_TYPE_DELETE_TRANSFER_SOURCE_DISK_SNAPSHOTS ], ), ]) @@ -1814,16 +1814,16 @@ def create_task_side_effect( mock_update_transfer_action_info_for_instance\ .assert_has_calls([mock.call( mock.sentinel.context, - mock_replica.id, + mock_transfer.id, instance, - mock_replica.info[instance], + mock_transfer.info[instance], )]) mock_deepcopy.assert_called_once_with( - mock_replica.destination_environment) + mock_transfer.destination_environment) mock_check_execution_tasks_sanity.assert_called_once_with( mock_tasks_execution.return_value, - mock_replica.info, + mock_transfer.info, ) mock_add_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, @@ -1831,35 +1831,35 @@ def create_task_side_effect( ) mock_begin_tasks.assert_called_once_with( mock.sentinel.context, - mock_replica, + mock_transfer, mock_tasks_execution.return_value ) - mock_get_replica_tasks_execution.assert_called_once_with( + mock_get_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, - mock_replica.id, + mock_transfer.id, mock_tasks_execution.return_value.id ) self.assertEqual( - result, mock_get_replica_tasks_execution.return_value) + result, mock_get_transfer_tasks_execution.return_value) # raises exception if instances have no volumes info instances[0].get.return_value = None instances[1].get.return_value = None self.assertRaises( - exception.InvalidReplicaState, - call_delete_replica_disks + exception.InvalidTransferState, + call_delete_transfer_disks ) - # raises exception if instance not in replica.info + # raises exception if instance not in transfer.info instances[0].get.return_value = mock.sentinel.volume_info instances[1].get.return_value = mock.sentinel.volume_info - mock_replica.info = {} + mock_transfer.info = {} self.assertRaises( - exception.InvalidReplicaState, - call_delete_replica_disks + exception.InvalidTransferState, + call_delete_transfer_disks ) def test_check_endpoints(self): @@ -1896,31 +1896,31 @@ def test_check_endpoints_same_destination_connection_info(self): destination_endpoint ) - @mock.patch.object(server.ConductorServerEndpoint, 'get_replica') + @mock.patch.object(server.ConductorServerEndpoint, 'get_transfer') @mock.patch.object(db_api, 'add_transfer') @mock.patch.object(server.ConductorServerEndpoint, - '_create_reservation_for_replica') + '_create_reservation_for_transfer') @mock.patch.object(server.ConductorServerEndpoint, '_check_minion_pools_for_action') @mock.patch.object(models, 'Transfer') @mock.patch.object(server.ConductorServerEndpoint, '_check_endpoints') @mock.patch.object(server.ConductorServerEndpoint, 'get_endpoint') - def test_create_instances_replica( + def test_create_instances_transfer( self, mock_get_endpoint, mock_check_endpoints, mock_transfer, mock_check_minion_pools_for_action, - mock_create_reservation_for_replica, + mock_create_reservation_for_transfer, mock_add_transfer, - mock_get_replica + mock_get_transfer ): mock_get_endpoint.side_effect = mock.sentinel.origin_endpoint_id, \ mock.sentinel.destination_endpoint_id mock_transfer.return_value = mock.Mock() - result = self.server.create_instances_replica( + result = self.server.create_instances_transfer( mock.sentinel.context, - constants.REPLICA_SCENARIO_REPLICA, + constants.TRANSFER_SCENARIO_REPLICA, mock.sentinel.origin_endpoint_id, mock.sentinel.destination_endpoint_id, mock.sentinel.origin_minion_pool_id, @@ -1935,7 +1935,7 @@ def test_create_instances_replica( user_scripts=None ) self.assertEqual( - mock_get_replica.return_value, + mock_get_transfer.return_value, result ) mock_get_endpoint.assert_has_calls([ @@ -1978,26 +1978,26 @@ def test_create_instances_replica( ) mock_check_minion_pools_for_action.assert_called_once_with( mock.sentinel.context, mock_transfer.return_value) - mock_create_reservation_for_replica.assert_called_once_with( + mock_create_reservation_for_transfer.assert_called_once_with( mock_transfer.return_value) mock_add_transfer.assert_called_once_with( mock.sentinel.context, mock_transfer.return_value) - mock_get_replica.assert_called_once_with( + mock_get_transfer.assert_called_once_with( mock.sentinel.context, mock_transfer.return_value.id) @mock.patch.object(db_api, 'get_transfer') - def test__get_replica(self, mock_get_replica): - result = self.server._get_replica( + def test__get_transfer(self, mock_get_transfer): + result = self.server._get_transfer( mock.sentinel.context, mock.sentinel.transfer_id, include_task_info=False, to_dict=False ) self.assertEqual( - mock_get_replica.return_value, + mock_get_transfer.return_value, result ) - mock_get_replica.assert_called_once_with( + mock_get_transfer.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id, include_task_info=False, @@ -2005,11 +2005,11 @@ def test__get_replica(self, mock_get_replica): ) @mock.patch.object(db_api, 'get_transfer') - def test__get_replica_not_found(self, mock_get_transfer): + def test__get_transfer_not_found(self, mock_get_transfer): mock_get_transfer.return_value = None self.assertRaises( exception.NotFound, - self.server._get_replica, + self.server._get_transfer, mock.sentinel.context, mock.sentinel.transfer_id, include_task_info=False, @@ -2022,41 +2022,41 @@ def test__get_replica_not_found(self, mock_get_transfer): to_dict=False ) - @mock.patch.object(server.ConductorServerEndpoint, '_get_migration') - def test_get_migration(self, mock_get_migration): - result = testutils.get_wrapped_function(self.server.get_migration)( + @mock.patch.object(server.ConductorServerEndpoint, '_get_deployment') + def test_get_deployment(self, mock_get_deployment): + result = testutils.get_wrapped_function(self.server.get_deployment)( self.server, mock.sentinel.context, - mock.sentinel.migration_id, + mock.sentinel.deployment_id, include_task_info=False ) self.assertEqual( - mock_get_migration.return_value, + mock_get_deployment.return_value, result ) - mock_get_migration.assert_called_once_with( + mock_get_deployment.assert_called_once_with( mock.sentinel.context, - mock.sentinel.migration_id, + mock.sentinel.deployment_id, include_task_info=False, to_dict=True ) @mock.patch.object(db_api, 'get_transfer_deployments') - def test_check_running_replica_migrations( + def test_check_running_transfer_deployments( self, mock_get_transfer_deployments ): - migration_1 = mock.Mock() - migration_2 = mock.Mock() - migration_1.executions = [mock.Mock()] - migration_1.executions[0].status = \ + deployment_1 = mock.Mock() + deployment_2 = mock.Mock() + deployment_1.executions = [mock.Mock()] + deployment_1.executions[0].status = \ constants.EXECUTION_STATUS_COMPLETED - migration_2.executions = [mock.Mock()] - migration_2.executions[0].status = \ + deployment_2.executions = [mock.Mock()] + deployment_2.executions[0].status = \ constants.EXECUTION_STATUS_ERROR - migrations = [migration_1, migration_2] - mock_get_transfer_deployments.return_value = migrations - self.server._check_running_replica_migrations( + deployments = [deployment_1, deployment_2] + mock_get_transfer_deployments.return_value = deployments + self.server._check_running_transfer_deployments( mock.sentinel.context, mock.sentinel.transfer_id, ) @@ -2066,22 +2066,22 @@ def test_check_running_replica_migrations( ) @mock.patch.object(db_api, 'get_transfer_deployments') - def test_check_running_replica_migrations_invalid_replica_state( + def test_check_running_transfer_deployments_invalid_transfer_state( self, mock_get_transfer_deployments ): - migration_1 = mock.Mock() - migration_2 = mock.Mock() - migration_1.executions = [mock.Mock()] - migration_1.executions[0].status = constants.EXECUTION_STATUS_RUNNING - migration_2.executions = [mock.Mock()] - migration_2.executions[0].status = \ + deployment_1 = mock.Mock() + deployment_2 = mock.Mock() + deployment_1.executions = [mock.Mock()] + deployment_1.executions[0].status = constants.EXECUTION_STATUS_RUNNING + deployment_2.executions = [mock.Mock()] + deployment_2.executions[0].status = \ constants.EXECUTION_STATUS_COMPLETED - migrations = [migration_1, migration_2] - mock_get_transfer_deployments.return_value = migrations + deployments = [deployment_1, deployment_2] + mock_get_transfer_deployments.return_value = deployments self.assertRaises( - exception.InvalidReplicaState, - self.server._check_running_replica_migrations, + exception.InvalidTransferState, + self.server._check_running_transfer_deployments, mock.sentinel.context, mock.sentinel.transfer_id, ) @@ -2113,42 +2113,42 @@ def test_check_running_executions_invalid_state(self): ) @mock.patch.object(server.ConductorServerEndpoint, - '_check_running_replica_migrations') + '_check_running_transfer_deployments') @mock.patch.object(server.ConductorServerEndpoint, '_check_running_executions') - def test_check_replica_running_executions( + def test_check_transfer_running_executions( self, mock_check_running_executions, - mock_check_running_replica_migrations + mock_check_running_transfer_deployments ): - replica = mock.Mock() - self.server._check_replica_running_executions( + transfer = mock.Mock() + self.server._check_transfer_running_executions( mock.sentinel.context, - replica + transfer ) - mock_check_running_executions.assert_called_once_with(replica) - mock_check_running_replica_migrations.assert_called_once_with( + mock_check_running_executions.assert_called_once_with(transfer) + mock_check_running_transfer_deployments.assert_called_once_with( mock.sentinel.context, - replica.id + transfer.id ) - def test_check_valid_replica_tasks_execution(self): + def test_check_valid_transfer_tasks_execution(self): execution1 = mock.Mock( number=1, - type=constants.EXECUTION_TYPE_REPLICA_EXECUTION, + type=constants.EXECUTION_TYPE_TRANSFER_EXECUTION, status=constants.EXECUTION_STATUS_COMPLETED, ) execution2 = mock.Mock( number=2, - type=constants.EXECUTION_TYPE_REPLICA_EXECUTION, + type=constants.EXECUTION_TYPE_TRANSFER_EXECUTION, status=constants.EXECUTION_STATUS_COMPLETED, ) - mock_replica = mock.Mock( + mock_transfer = mock.Mock( executions=[execution1, execution2] ) - self.server._check_valid_replica_tasks_execution( - mock_replica + self.server._check_valid_transfer_tasks_execution( + mock_transfer ) # raises exception if all executions are incomplete @@ -2156,15 +2156,15 @@ def test_check_valid_replica_tasks_execution(self): execution2.status = constants.EXECUTION_STATUS_UNEXECUTED self.assertRaises( - exception.InvalidReplicaState, - self.server._check_valid_replica_tasks_execution, - mock_replica + exception.InvalidTransferState, + self.server._check_valid_transfer_tasks_execution, + mock_transfer ) # doesn't raise exception if all executions are incomplete # and is forced - self.server._check_valid_replica_tasks_execution( - mock_replica, + self.server._check_valid_transfer_tasks_execution( + mock_transfer, True ) @@ -2172,16 +2172,16 @@ def test_check_valid_replica_tasks_execution(self): execution1.status = constants.EXECUTION_STATUS_COMPLETED execution2.status = constants.EXECUTION_STATUS_UNEXECUTED - self.server._check_valid_replica_tasks_execution( - mock_replica + self.server._check_valid_transfer_tasks_execution( + mock_transfer ) - mock_replica.executions = [] + mock_transfer.executions = [] self.assertRaises( - exception.InvalidReplicaState, - self.server._check_valid_replica_tasks_execution, - mock_replica + exception.InvalidTransferState, + self.server._check_valid_transfer_tasks_execution, + mock_transfer ) @mock.patch.object(server.ConductorServerEndpoint, @@ -2217,19 +2217,19 @@ def test_get_provider_types(self, mock_get_available_providers): @mock.patch.object( server.ConductorServerEndpoint, - '_get_replica' + '_get_transfer' ) @mock.patch.object( server.ConductorServerEndpoint, - '_check_reservation_for_replica' + '_check_reservation_for_transfer' ) @mock.patch.object( server.ConductorServerEndpoint, - '_check_replica_running_executions' + '_check_transfer_running_executions' ) @mock.patch.object( server.ConductorServerEndpoint, - '_check_valid_replica_tasks_execution' + '_check_valid_transfer_tasks_execution' ) @mock.patch.object( server.ConductorServerEndpoint, @@ -2240,7 +2240,7 @@ def test_get_provider_types(self, mock_get_available_providers): '_get_provider_types' ) @mock.patch.object(models, "Deployment") - @mock.patch.object(uuid, "uuid4", return_value="migration_id") + @mock.patch.object(uuid, "uuid4", return_value="deployment_id") @mock.patch.object(copy, "deepcopy") @mock.patch.object( server.ConductorServerEndpoint, @@ -2275,13 +2275,13 @@ def test_get_provider_types(self, mock_get_available_providers): ) @mock.patch.object( server.ConductorServerEndpoint, - "get_migration" + "get_deployment" ) - @ddt.file_data("data/deploy_replica_instance_config.yml") + @ddt.file_data("data/deploy_transfer_instance_config.yml") @ddt.unpack - def test_deploy_replica_instance( + def test_deploy_transfer_instance( self, - mock_get_migration, + mock_get_deployment, mock_begin_tasks, mock_set_tasks_execution_status, mock_minion_manager_client, @@ -2297,10 +2297,10 @@ def test_deploy_replica_instance( mock_deployment, mock_get_provider_types, mock_get_endpoint, - mock_check_valid_replica_tasks_execution, - mock_check_replica_running_executions, - mock_check_reservation_for_replica, - mock_get_replica, + mock_check_valid_transfer_tasks_execution, + mock_check_transfer_running_executions, + mock_check_reservation_for_transfer, + mock_get_transfer, config, expected_tasks, ): @@ -2321,7 +2321,7 @@ def test_deploy_replica_instance( mock.sentinel.instance2: mock.sentinel.pool2, } - mock_get_replica.return_value = mock.Mock( + mock_get_transfer.return_value = mock.Mock( instances=[mock.sentinel.instance1, mock.sentinel.instance2], info={ mock.sentinel.instance1: { @@ -2334,8 +2334,8 @@ def test_deploy_replica_instance( instance_osmorphing_minion_pool_mappings={} ) - def call_deploy_replica_instance(): - return self.server.deploy_replica_instances( + def call_deploy_transfer_instance(): + return self.server.deploy_transfer_instances( mock.sentinel.context, mock.sentinel.transfer_id, clone_disks=clone_disks, @@ -2348,26 +2348,26 @@ def call_deploy_replica_instance(): # One of the instances has no volumes info self.assertRaises( - exception.InvalidReplicaState, - call_deploy_replica_instance, + exception.InvalidTransferState, + call_deploy_transfer_instance, ) mock_get_endpoint.assert_called_once_with( mock.sentinel.context, - mock_get_replica.return_value.destination_endpoint_id + mock_get_transfer.return_value.destination_endpoint_id ) - mock_get_replica.assert_called_once_with( + mock_get_transfer.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id, include_task_info=True, ) - mock_check_replica_running_executions.assert_called_once_with( + mock_check_transfer_running_executions.assert_called_once_with( mock.sentinel.context, - mock_get_replica.return_value + mock_get_transfer.return_value ) - mock_check_valid_replica_tasks_execution.assert_called_once_with( - mock_get_replica.return_value, + mock_check_valid_transfer_tasks_execution.assert_called_once_with( + mock_get_transfer.return_value, False ) mock_get_provider_types.assert_called_once_with( @@ -2376,7 +2376,7 @@ def call_deploy_replica_instance(): ) # add the missing volumes info - mock_get_replica.return_value.info[mock.sentinel.instance2] = { + mock_get_transfer.return_value.info[mock.sentinel.instance2] = { 'volumes_info': mock.sentinel.volumes_info2 } @@ -2401,14 +2401,14 @@ def create_task_side_effect( mock_create_task.side_effect = create_task_side_effect # no longer raises exception - migration = call_deploy_replica_instance() + deployment = call_deploy_transfer_instance() mock_check_minion_pools_for_action.assert_called_once_with( mock.sentinel.context, mock_deployment.return_value ) - mock_check_reservation_for_replica.assert_called_once_with( - mock_get_replica.return_value) + mock_check_reservation_for_transfer.assert_called_once_with( + mock_get_transfer.return_value) self.assertEqual( mock_tasks_execution.return_value.status, @@ -2416,17 +2416,17 @@ def create_task_side_effect( ) self.assertEqual( mock_tasks_execution.return_value.type, - constants.EXECUTION_TYPE_REPLICA_DEPLOY + constants.EXECUTION_TYPE_DEPLOYMENT ) - for instance in mock_get_replica.return_value.instances: + for instance in mock_get_transfer.return_value.instances: mock_get_instance_scripts.assert_any_call( mock.sentinel.user_scripts, instance, ) mock_create_task.assert_any_call( instance, - constants.TASK_TYPE_VALIDATE_REPLICA_DEPLOYMENT_INPUTS, + constants.TASK_TYPE_VALIDATE_DEPLOYMENT_INPUTS, mock_tasks_execution.return_value, ) @@ -2459,12 +2459,12 @@ def create_task_side_effect( if not skip_os_morphing and has_os_morphing_minion: mock_lock.assert_any_call( - constants.MIGRATION_LOCK_NAME_FORMAT + constants.DEPLOYMENT_LOCK_NAME_FORMAT % mock_deployment.return_value.id, external=True, ) mock_minion_manager_client\ - .allocate_minion_machines_for_migration\ + .allocate_minion_machines_for_deployment\ .assert_called_once_with( mock.sentinel.context, mock_deployment.return_value, @@ -2483,14 +2483,14 @@ def create_task_side_effect( mock_tasks_execution.return_value, ) - mock_get_migration.assert_called_once_with( + mock_get_deployment.assert_called_once_with( mock.sentinel.context, mock_deployment.return_value.id, ) self.assertEqual( - migration, - mock_get_migration.return_value + deployment, + mock_get_deployment.return_value ) def test_get_instance_scripts( @@ -2736,76 +2736,76 @@ def test_update_task_info_for_minion_allocations( expected_action_info[mock.sentinel.instance2] ) - @mock.patch.object(server.ConductorServerEndpoint, '_get_replica') - def test_get_last_execution_for_replica( + @mock.patch.object(server.ConductorServerEndpoint, '_get_transfer') + def test_get_last_execution_for_transfer( self, - mock_get_replica + mock_get_transfer ): - replica = mock.Mock() - replica.id = mock.sentinel.id + transfer = mock.Mock() + transfer.id = mock.sentinel.id execution1 = mock.Mock(id=mock.sentinel.execution_id1, number=1) execution2 = mock.Mock(id=mock.sentinel.execution_id2, number=3) execution3 = mock.Mock(id=mock.sentinel.execution_id3, number=2) - replica.executions = [execution1, execution2, execution3] - mock_get_replica.return_value = replica - result = self.server._get_last_execution_for_replica( + transfer.executions = [execution1, execution2, execution3] + mock_get_transfer.return_value = transfer + result = self.server._get_last_execution_for_transfer( mock.sentinel.context, - replica, + transfer, requery=False ) self.assertEqual( execution2, result ) - mock_get_replica.assert_not_called() - replica.executions = None + mock_get_transfer.assert_not_called() + transfer.executions = None self.assertRaises( - exception.InvalidReplicaState, - self.server._get_last_execution_for_replica, + exception.InvalidTransferState, + self.server._get_last_execution_for_transfer, mock.sentinel.context, - replica, + transfer, requery=True ) - mock_get_replica.assert_called_once_with( + mock_get_transfer.assert_called_once_with( mock.sentinel.context, mock.sentinel.id) - @mock.patch.object(server.ConductorServerEndpoint, '_get_migration') - def test_get_execution_for_migration( + @mock.patch.object(server.ConductorServerEndpoint, '_get_deployment') + def test_get_execution_for_deployment( self, - mock_get_migration + mock_get_deployment ): - migration = mock.Mock() - migration.id = mock.sentinel.id + deployment = mock.Mock() + deployment.id = mock.sentinel.id execution1 = mock.Mock(id=mock.sentinel.execution_id1) execution2 = mock.Mock(id=mock.sentinel.execution_id2) - migration.executions = [execution1] - mock_get_migration.return_value = migration - result = self.server._get_execution_for_migration( + deployment.executions = [execution1] + mock_get_deployment.return_value = deployment + result = self.server._get_execution_for_deployment( mock.sentinel.context, - migration, + deployment, requery=False ) self.assertEqual( execution1, result ) - mock_get_migration.assert_not_called() - migration.executions = [execution1, execution2] + mock_get_deployment.assert_not_called() + deployment.executions = [execution1, execution2] self.assertRaises( - exception.InvalidMigrationState, - self.server._get_execution_for_migration, + exception.InvalidDeploymentState, + self.server._get_execution_for_deployment, mock.sentinel.context, - migration, + deployment, requery=True ) - mock_get_migration.assert_called_once_with( + mock_get_deployment.assert_called_once_with( mock.sentinel.context, mock.sentinel.id) - migration.executions = [] + deployment.executions = [] self.assertRaises( - exception.InvalidMigrationState, - self.server._get_execution_for_migration, + exception.InvalidDeploymentState, + self.server._get_execution_for_deployment, mock.sentinel.context, - migration, + deployment, requery=False ) @@ -2814,50 +2814,50 @@ def test_get_execution_for_migration( @mock.patch.object(server.ConductorServerEndpoint, '_update_task_info_for_minion_allocations') @mock.patch.object(server.ConductorServerEndpoint, - '_get_last_execution_for_replica') - @mock.patch.object(server.ConductorServerEndpoint, '_get_replica') - def test_confirm_replica_minions_allocation( + '_get_last_execution_for_transfer') + @mock.patch.object(server.ConductorServerEndpoint, '_get_transfer') + def test_confirm_transfer_minions_allocation( self, - mock_get_replica, - mock_get_last_execution_for_replica, + mock_get_transfer, + mock_get_last_execution_for_transfer, mock_update_task_info_for_minion_allocations, mock_get_transfer_tasks_execution, mock_begin_tasks ): - mock_get_replica.return_value.last_execution_status = \ + mock_get_transfer.return_value.last_execution_status = \ constants.EXECUTION_STATUS_AWAITING_MINION_ALLOCATIONS testutils.get_wrapped_function( - self.server.confirm_replica_minions_allocation)( + self.server.confirm_transfer_minions_allocation)( self.server, mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.minion_machine_allocations ) - mock_get_replica.assert_called_once_with( + mock_get_transfer.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id, include_task_info=True ) - mock_get_last_execution_for_replica.assert_called_once_with( + mock_get_last_execution_for_transfer.assert_called_once_with( mock.sentinel.context, - mock_get_replica.return_value, + mock_get_transfer.return_value, requery=False ) mock_update_task_info_for_minion_allocations.assert_called_once_with( mock.sentinel.context, - mock_get_replica.return_value, + mock_get_transfer.return_value, mock.sentinel.minion_machine_allocations ) mock_get_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, - mock_get_replica.return_value.id, - mock_get_last_execution_for_replica.return_value.id + mock_get_transfer.return_value.id, + mock_get_last_execution_for_transfer.return_value.id ) mock_begin_tasks.assert_called_once_with( mock.sentinel.context, - mock_get_replica.return_value, + mock_get_transfer.return_value, mock_get_transfer_tasks_execution.return_value ) @@ -2866,35 +2866,35 @@ def test_confirm_replica_minions_allocation( @mock.patch.object(server.ConductorServerEndpoint, '_update_task_info_for_minion_allocations') @mock.patch.object(server.ConductorServerEndpoint, - '_get_last_execution_for_replica') - @mock.patch.object(server.ConductorServerEndpoint, '_get_replica') - def test_confirm_replica_minions_allocation_unexpected_status( + '_get_last_execution_for_transfer') + @mock.patch.object(server.ConductorServerEndpoint, '_get_transfer') + def test_confirm_transfer_minions_allocation_unexpected_status( self, - mock_get_replica, - mock_get_last_execution_for_replica, + mock_get_transfer, + mock_get_last_execution_for_transfer, mock_update_task_info_for_minion_allocations, mock_get_transfer_tasks_execution, mock_begin_tasks ): - mock_get_replica.return_value.last_execution_status = \ + mock_get_transfer.return_value.last_execution_status = \ constants.EXECUTION_STATUS_CANCELED self.assertRaises( - exception.InvalidReplicaState, + exception.InvalidTransferState, testutils.get_wrapped_function( - self.server.confirm_replica_minions_allocation), + self.server.confirm_transfer_minions_allocation), self.server, mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.minion_machine_allocations ) - mock_get_replica.assert_called_once_with( + mock_get_transfer.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id, include_task_info=True ) - mock_get_last_execution_for_replica.assert_not_called() + mock_get_last_execution_for_transfer.assert_not_called() mock_update_task_info_for_minion_allocations.assert_not_called() mock_get_transfer_tasks_execution.assert_not_called() mock_begin_tasks.assert_not_called() @@ -2904,43 +2904,43 @@ def test_confirm_replica_minions_allocation_unexpected_status( @mock.patch.object(server.ConductorServerEndpoint, '_cancel_tasks_execution') @mock.patch.object(server.ConductorServerEndpoint, - '_get_last_execution_for_replica') - @mock.patch.object(server.ConductorServerEndpoint, '_get_replica') - def test_report_replica_minions_allocation_error( + '_get_last_execution_for_transfer') + @mock.patch.object(server.ConductorServerEndpoint, '_get_transfer') + def test_report_transfer_minions_allocation_error( self, - mock_get_replica, - mock_get_last_execution_for_replica, + mock_get_transfer, + mock_get_last_execution_for_transfer, mock_cancel_tasks_execution, mock_set_tasks_execution_status ): - mock_get_replica.return_value.last_execution_status = \ + mock_get_transfer.return_value.last_execution_status = \ constants.EXECUTION_STATUS_AWAITING_MINION_ALLOCATIONS testutils.get_wrapped_function( - self.server.report_replica_minions_allocation_error)( + self.server.report_transfer_minions_allocation_error)( self.server, mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.minion_allocation_error_details ) - mock_get_replica.assert_called_once_with( + mock_get_transfer.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id ) - mock_get_last_execution_for_replica.assert_called_once_with( + mock_get_last_execution_for_transfer.assert_called_once_with( mock.sentinel.context, - mock_get_replica.return_value, + mock_get_transfer.return_value, requery=False ) mock_cancel_tasks_execution.assert_called_once_with( mock.sentinel.context, - mock_get_last_execution_for_replica.return_value, + mock_get_last_execution_for_transfer.return_value, requery=True ) mock_set_tasks_execution_status.assert_called_once_with( mock.sentinel.context, - mock_get_last_execution_for_replica.return_value, + mock_get_last_execution_for_transfer.return_value, constants.EXECUTION_STATUS_ERROR_ALLOCATING_MINIONS ) @@ -2949,33 +2949,33 @@ def test_report_replica_minions_allocation_error( @mock.patch.object(server.ConductorServerEndpoint, '_cancel_tasks_execution') @mock.patch.object(server.ConductorServerEndpoint, - '_get_last_execution_for_replica') - @mock.patch.object(server.ConductorServerEndpoint, '_get_replica') - def test_report_replica_minions_allocation_error_unexpected_status( + '_get_last_execution_for_transfer') + @mock.patch.object(server.ConductorServerEndpoint, '_get_transfer') + def test_report_transfer_minions_allocation_error_unexpected_status( self, - mock_get_replica, - mock_get_last_execution_for_replica, + mock_get_transfer, + mock_get_last_execution_for_transfer, mock_cancel_tasks_execution, mock_set_tasks_execution_status ): - mock_get_replica.return_value.last_execution_status = \ + mock_get_transfer.return_value.last_execution_status = \ constants.EXECUTION_STATUS_CANCELED self.assertRaises( - exception.InvalidReplicaState, + exception.InvalidTransferState, testutils.get_wrapped_function( - self.server.report_replica_minions_allocation_error), + self.server.report_transfer_minions_allocation_error), self.server, mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.minion_allocation_error_details ) - mock_get_replica.assert_called_once_with( + mock_get_transfer.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id ) - mock_get_last_execution_for_replica.assert_not_called() + mock_get_last_execution_for_transfer.assert_not_called() mock_cancel_tasks_execution.assert_not_called() mock_set_tasks_execution_status.assert_not_called() @@ -2983,79 +2983,79 @@ def test_report_replica_minions_allocation_error_unexpected_status( @mock.patch.object(server.ConductorServerEndpoint, '_update_task_info_for_minion_allocations') @mock.patch.object(server.ConductorServerEndpoint, - '_get_execution_for_migration') - @mock.patch.object(server.ConductorServerEndpoint, '_get_migration') - def test_confirm_migration_minions_allocation( + '_get_execution_for_deployment') + @mock.patch.object(server.ConductorServerEndpoint, '_get_deployment') + def test_confirm_deployment_minions_allocation( self, - mock_get_migration, - mock_get_execution_for_migration, + mock_get_deployment, + mock_get_execution_for_deployment, mock_update_task_info_for_minion_allocations, mock_begin_tasks ): - mock_get_migration.return_value.last_execution_status = \ + mock_get_deployment.return_value.last_execution_status = \ constants.EXECUTION_STATUS_AWAITING_MINION_ALLOCATIONS testutils.get_wrapped_function( - self.server.confirm_migration_minions_allocation)( + self.server.confirm_deployment_minions_allocation)( self.server, mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.minion_machine_allocations ) - mock_get_migration.assert_called_once_with( + mock_get_deployment.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id, include_task_info=True ) - mock_get_execution_for_migration.assert_called_once_with( + mock_get_execution_for_deployment.assert_called_once_with( mock.sentinel.context, - mock_get_migration.return_value, + mock_get_deployment.return_value, requery=False ) mock_update_task_info_for_minion_allocations.assert_called_once_with( mock.sentinel.context, - mock_get_migration.return_value, + mock_get_deployment.return_value, mock.sentinel.minion_machine_allocations ) mock_begin_tasks.assert_called_once_with( mock.sentinel.context, - mock_get_migration.return_value, - mock_get_execution_for_migration.return_value + mock_get_deployment.return_value, + mock_get_execution_for_deployment.return_value ) @mock.patch.object(server.ConductorServerEndpoint, '_begin_tasks') @mock.patch.object(server.ConductorServerEndpoint, '_update_task_info_for_minion_allocations') @mock.patch.object(server.ConductorServerEndpoint, - '_get_execution_for_migration') - @mock.patch.object(server.ConductorServerEndpoint, '_get_migration') - def test_confirm_migration_minions_allocation_unexpected_status( + '_get_execution_for_deployment') + @mock.patch.object(server.ConductorServerEndpoint, '_get_deployment') + def test_confirm_deployment_minions_allocation_unexpected_status( self, - mock_get_migration, - mock_get_execution_for_migration, + mock_get_deployment, + mock_get_execution_for_deployment, mock_update_task_info_for_minion_allocations, mock_begin_tasks ): - mock_get_migration.return_value.last_execution_status = \ + mock_get_deployment.return_value.last_execution_status = \ constants.EXECUTION_STATUS_CANCELED self.assertRaises( - exception.InvalidMigrationState, + exception.InvalidDeploymentState, testutils.get_wrapped_function( - self.server.confirm_migration_minions_allocation), + self.server.confirm_deployment_minions_allocation), self.server, mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.minion_machine_allocations ) - mock_get_migration.assert_called_once_with( + mock_get_deployment.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id, include_task_info=True ) - mock_get_execution_for_migration.assert_not_called() + mock_get_execution_for_deployment.assert_not_called() mock_update_task_info_for_minion_allocations.assert_not_called() mock_begin_tasks.assert_not_called() @@ -3065,43 +3065,43 @@ def test_confirm_migration_minions_allocation_unexpected_status( @mock.patch.object(server.ConductorServerEndpoint, '_cancel_tasks_execution') @mock.patch.object(server.ConductorServerEndpoint, - '_get_execution_for_migration') - @mock.patch.object(server.ConductorServerEndpoint, '_get_migration') - def test_report_migration_minions_allocation_error( + '_get_execution_for_deployment') + @mock.patch.object(server.ConductorServerEndpoint, '_get_deployment') + def test_report_deployment_minions_allocation_error( self, - mock_get_migration, - mock_get_execution_for_migration, + mock_get_deployment, + mock_get_execution_for_deployment, mock_cancel_tasks_execution, mock_set_tasks_execution_status ): - mock_get_migration.return_value.last_execution_status = \ + mock_get_deployment.return_value.last_execution_status = \ constants.EXECUTION_STATUS_AWAITING_MINION_ALLOCATIONS testutils.get_wrapped_function( - self.server.report_migration_minions_allocation_error)( + self.server.report_deployment_minions_allocation_error)( self.server, mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.minion_allocation_error_details ) - mock_get_migration.assert_called_once_with( + mock_get_deployment.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id ) - mock_get_execution_for_migration.assert_called_once_with( + mock_get_execution_for_deployment.assert_called_once_with( mock.sentinel.context, - mock_get_migration.return_value, + mock_get_deployment.return_value, requery=False ) mock_cancel_tasks_execution.assert_called_once_with( mock.sentinel.context, - mock_get_execution_for_migration.return_value, + mock_get_execution_for_deployment.return_value, requery=True ) mock_set_tasks_execution_status.assert_called_once_with( mock.sentinel.context, - mock_get_execution_for_migration.return_value, + mock_get_execution_for_deployment.return_value, constants.EXECUTION_STATUS_ERROR_ALLOCATING_MINIONS ) @@ -3110,33 +3110,33 @@ def test_report_migration_minions_allocation_error( @mock.patch.object(server.ConductorServerEndpoint, '_cancel_tasks_execution') @mock.patch.object(server.ConductorServerEndpoint, - '_get_execution_for_migration') - @mock.patch.object(server.ConductorServerEndpoint, '_get_migration') - def test_report_migration_minions_allocation_error_unexpected_status( + '_get_execution_for_deployment') + @mock.patch.object(server.ConductorServerEndpoint, '_get_deployment') + def test_report_deployment_minions_allocation_error_unexpected_status( self, - mock_get_migration, - mock_get_execution_for_migration, + mock_get_deployment, + mock_get_execution_for_deployment, mock_cancel_tasks_execution, mock_set_tasks_execution_status ): - mock_get_migration.return_value.last_execution_status = \ + mock_get_deployment.return_value.last_execution_status = \ constants.EXECUTION_STATUS_CANCELED self.assertRaises( - exception.InvalidMigrationState, + exception.InvalidDeploymentState, testutils.get_wrapped_function( - self.server.report_migration_minions_allocation_error), + self.server.report_deployment_minions_allocation_error), self.server, mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.minion_allocation_error_details ) - mock_get_migration.assert_called_once_with( + mock_get_deployment.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id ) - mock_get_execution_for_migration.assert_not_called() + mock_get_execution_for_deployment.assert_not_called() mock_cancel_tasks_execution.assert_not_called() mock_set_tasks_execution_status.assert_not_called() @@ -3247,13 +3247,13 @@ def call_cancel_tasks_execution( ) @mock.patch.object(db_api, 'get_deployment') - def test__get_migration( + def test__get_deployment( self, mock_get_deployment ): - result = self.server._get_migration( + result = self.server._get_deployment( mock.sentinel.context, - mock.sentinel.migration_id, + mock.sentinel.deployment_id, include_task_info=False, to_dict=False ) @@ -3264,7 +3264,7 @@ def test__get_migration( mock_get_deployment.assert_called_once_with( mock.sentinel.context, - mock.sentinel.migration_id, + mock.sentinel.deployment_id, include_task_info=False, to_dict=False ) @@ -3273,16 +3273,16 @@ def test__get_migration( self.assertRaises( exception.NotFound, - self.server._get_migration, + self.server._get_deployment, mock.sentinel.context, - mock.sentinel.migration_id, + mock.sentinel.deployment_id, include_task_info=False, to_dict=False ) mock_get_deployment.assert_called_once_with( mock.sentinel.context, - mock.sentinel.migration_id, + mock.sentinel.deployment_id, include_task_info=False, to_dict=False ) @@ -3997,11 +3997,11 @@ def test_advance_execution_state_scheduled_tasks( ) @mock.patch.object(db_api, 'update_transfer_action_info_for_instance') - def test_update_replica_volumes_info( + def test_update_transfer_volumes_info( self, mock_update_transfer_action_info_for_instance ): - self.server._update_replica_volumes_info( + self.server._update_transfer_volumes_info( mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.instance, @@ -4016,35 +4016,35 @@ def test_update_replica_volumes_info( ) @mock.patch.object(server.ConductorServerEndpoint, - '_update_replica_volumes_info') + '_update_transfer_volumes_info') @mock.patch.object(lockutils, 'lock') @mock.patch.object(db_api, 'get_deployment') - def test_update_volumes_info_for_migration_parent_replica( + def test_update_volumes_info_for_deployment_parent_transfer( self, mock_get_deployment, mock_lock, - mock_update_replica_volumes_info + mock_update_transfer_volumes_info ): deployment = mock.Mock() mock_get_deployment.return_value = deployment - self.server._update_volumes_info_for_migration_parent_replica( + self.server._update_volumes_info_for_deployment_parent_transfer( mock.sentinel.context, - mock.sentinel.migration_id, + mock.sentinel.deployment_id, mock.sentinel.instance, mock.sentinel.updated_task_info ) mock_get_deployment.assert_called_once_with( mock.sentinel.context, - mock.sentinel.migration_id + mock.sentinel.deployment_id ) mock_lock.assert_called_once_with( - constants.REPLICA_LOCK_NAME_FORMAT % + constants.TRANSFER_LOCK_NAME_FORMAT % mock_get_deployment.return_value.transfer_id, external=True ) - mock_update_replica_volumes_info.assert_called_once_with( + mock_update_transfer_volumes_info.assert_called_once_with( mock.sentinel.context, mock_get_deployment.return_value.transfer_id, mock.sentinel.instance, @@ -4059,27 +4059,26 @@ def test_update_volumes_info_for_migration_parent_replica( @mock.patch.object(db_api, 'update_transfer') @mock.patch.object( server.ConductorServerEndpoint, - '_update_replica_volumes_info' + '_update_transfer_volumes_info' ) @mock.patch.object(db_api, 'set_transfer_action_result') @mock.patch.object(schemas, 'validate_value') @mock.patch.object( server.ConductorServerEndpoint, - '_update_volumes_info_for_migration_parent_replica' + '_update_volumes_info_for_deployment_parent_transfer' ) def test_handle_post_task_actions( self, - mock_update_volumes_info_for_migration_parent_replica, + mock_update_volumes_info_for_deployment_parent_transfer, mock_validate_value, mock_set_transfer_action_result, - mock_update_replica_volumes_info, + mock_update_transfer_volumes_info, mock_update_transfer, mock_update_minion_machine, mock_minion_manager_client, ): - # TASK_TYPE_RESTORE_REPLICA_DISK_SNAPSHOTS task = mock.Mock( - task_type=constants.TASK_TYPE_RESTORE_REPLICA_DISK_SNAPSHOTS, + task_type=constants.TASK_TYPE_RESTORE_TRANSFER_DISK_SNAPSHOTS, instance=mock.sentinel.instance, ) execution = mock.Mock( @@ -4099,7 +4098,7 @@ def call_handle_post_task_actions(): call_handle_post_task_actions() # no volumes_info - mock_update_volumes_info_for_migration_parent_replica\ + mock_update_volumes_info_for_deployment_parent_transfer\ .assert_not_called() # has volumes_info @@ -4109,7 +4108,7 @@ def call_handle_post_task_actions(): } ] call_handle_post_task_actions() - mock_update_volumes_info_for_migration_parent_replica\ + mock_update_volumes_info_for_deployment_parent_transfer\ .assert_called_once_with( mock.sentinel.context, mock.sentinel.action_id, @@ -4117,12 +4116,11 @@ def call_handle_post_task_actions(): {"volumes_info": task_info["volumes_info"]}, ) - # TASK_TYPE_DELETE_REPLICA_TARGET_DISK_SNAPSHOTS task.task_type = constants\ - .TASK_TYPE_DELETE_REPLICA_TARGET_DISK_SNAPSHOTS + .TASK_TYPE_DELETE_TRANSFER_TARGET_DISK_SNAPSHOTS call_handle_post_task_actions() # no clone_disks, reset volumes_info - mock_update_volumes_info_for_migration_parent_replica\ + mock_update_volumes_info_for_deployment_parent_transfer\ .assert_called_with( mock.sentinel.context, mock.sentinel.action_id, @@ -4136,17 +4134,14 @@ def call_handle_post_task_actions(): 'id': 'clone_disk_id', } ] - mock_update_volumes_info_for_migration_parent_replica\ + mock_update_volumes_info_for_deployment_parent_transfer\ .reset_mock() call_handle_post_task_actions() - mock_update_volumes_info_for_migration_parent_replica\ + mock_update_volumes_info_for_deployment_parent_transfer\ .assert_not_called() - # TASK_TYPE_FINALIZE_INSTANCE_DEPLOYMENT - # TASK_TYPE_FINALIZE_REPLICA_INSTANCE_DEPLOYMENT types = [ constants.TASK_TYPE_FINALIZE_INSTANCE_DEPLOYMENT, - constants.TASK_TYPE_FINALIZE_REPLICA_INSTANCE_DEPLOYMENT, ] for task_type in types: task.task_type = task_type @@ -4186,11 +4181,9 @@ def call_handle_post_task_actions(): mock_validate_value.reset_mock() mock_set_transfer_action_result.reset_mock() - # TASK_TYPE_UPDATE_SOURCE_REPLICA - # TASK_TYPE_UPDATE_DESTINATION_REPLICA types = [ - constants.TASK_TYPE_UPDATE_SOURCE_REPLICA, - constants.TASK_TYPE_UPDATE_DESTINATION_REPLICA, + constants.TASK_TYPE_UPDATE_SOURCE_TRANSFER, + constants.TASK_TYPE_UPDATE_DESTINATION_TRANSFER, ] execution.tasks = [ mock.Mock( @@ -4200,9 +4193,9 @@ def call_handle_post_task_actions(): ] for task_type in types: task.task_type = task_type - mock_update_replica_volumes_info.reset_mock() + mock_update_transfer_volumes_info.reset_mock() call_handle_post_task_actions() - mock_update_replica_volumes_info.assert_called_once_with( + mock_update_transfer_volumes_info.assert_called_once_with( mock.sentinel.context, mock.sentinel.action_id, mock.sentinel.instance, @@ -4210,7 +4203,7 @@ def call_handle_post_task_actions(): ) # execution has active tasks - task.type = constants.TASK_TYPE_UPDATE_DESTINATION_REPLICA + task.type = constants.TASK_TYPE_UPDATE_DESTINATION_TRANSFER call_handle_post_task_actions() mock_update_transfer.assert_not_called() @@ -4371,7 +4364,7 @@ def test_task_completed( mock_get_tasks_execution.return_value = mock.Mock( id=mock.sentinel.execution_id, - type=constants.EXECUTION_TYPE_MIGRATION, + type=constants.EXECUTION_TYPE_DEPLOYMENT, action_id=mock.sentinel.action_id, tasks=[ mock.Mock( @@ -4475,9 +4468,6 @@ def test_cancel_execution_for_osmorphing_debugging( @mock.patch.object(server.ConductorServerEndpoint, "_advance_execution_state") - @mock.patch.object(server.ConductorServerEndpoint, - "_check_delete_reservation_for_transfer") - @mock.patch.object(db_api, "get_action") @mock.patch.object(db_api, "get_tasks_execution") @mock.patch.object(db_api, "set_task_status") @mock.patch.object(db_api, "get_task") @@ -4488,8 +4478,6 @@ def test_confirm_task_cancellation( mock_get_task, mock_set_task_status, mock_get_tasks_execution, - mock_get_action, - mock_check_delete_reservation, mock_advance_execution_state, task_status, expected_final_status, @@ -4500,7 +4488,7 @@ def test_confirm_task_cancellation( expected_final_status = getattr(constants, expected_final_status) mock_get_task.return_value = task mock_execution = mock.MagicMock() - mock_execution.type = constants.EXECUTION_TYPE_MIGRATION + mock_execution.type = constants.EXECUTION_TYPE_DEPLOYMENT mock_get_tasks_execution.return_value = mock_execution testutils.get_wrapped_function(self.server.confirm_task_cancellation)( @@ -4519,11 +4507,6 @@ def test_confirm_task_cancellation( if expected_advance_execution_state_call: mock_get_tasks_execution.assert_called_once_with( mock.sentinel.context, task.execution_id) - mock_get_action.assert_called_once_with( - mock.sentinel.context, mock_execution.action_id, - include_task_info=False) - mock_check_delete_reservation.assert_called_once_with( - mock_get_action.return_value) mock_advance_execution_state.assert_called_once_with( mock.sentinel.context, mock_get_tasks_execution.return_value, @@ -4572,7 +4555,7 @@ def test_set_task_error( ): task_status = config['task_status'] mock_get_tasks_execution.return_value = mock.Mock( - type=constants.EXECUTION_TYPE_MIGRATION, + type=constants.EXECUTION_TYPE_DEPLOYMENT, action_id=mock.sentinel.action_id, tasks=[ mock.Mock( @@ -4733,11 +4716,11 @@ def test_update_task_progress_update( ) @mock.patch.object(db_api, "get_transfer_schedule") - def test__get_replica_schedule( + def test__get_transfer_schedule( self, mock_get_transfer_schedule ): - result = self.server._get_replica_schedule( + result = self.server._get_transfer_schedule( mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.schedule_id, @@ -4760,7 +4743,7 @@ def test__get_replica_schedule( self.assertRaises( exception.NotFound, - self.server._get_replica_schedule, + self.server._get_transfer_schedule, mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.schedule_id, @@ -4774,25 +4757,25 @@ def test__get_replica_schedule( expired=False ) - @mock.patch.object(server.ConductorServerEndpoint, "get_replica_schedule") + @mock.patch.object(server.ConductorServerEndpoint, "get_transfer_schedule") @mock.patch.object(db_api, "add_transfer_schedule") @mock.patch.object(models, "TransferSchedule") - @mock.patch.object(server.ConductorServerEndpoint, "_get_replica") + @mock.patch.object(server.ConductorServerEndpoint, "_get_transfer") @mock.patch.object(keystone, "create_trust") - def test_create_replica_schedule( + def test_create_transfer_schedule( self, mock_create_trust, - mock_get_replica, + mock_get_transfer, mock_transfer_schedule, mock_add_transfer_schedule, - mock_get_replica_schedule + mock_get_transfer_schedule ): context = mock.Mock() transfer_schedule = mock.Mock() context.trust_id = mock.sentinel.trust_id mock_transfer_schedule.return_value = transfer_schedule - result = self.server.create_replica_schedule( + result = self.server.create_transfer_schedule( context, mock.sentinel.transfer_id, mock.sentinel.schedule, @@ -4802,7 +4785,7 @@ def test_create_replica_schedule( ) self.assertEqual( - mock_get_replica_schedule.return_value, + mock_get_transfer_schedule.return_value, result ) self.assertEqual( @@ -4816,7 +4799,7 @@ def test_create_replica_schedule( transfer_schedule.trust_id ), ( - mock_get_replica.return_value, + mock_get_transfer.return_value, mock.sentinel.transfer_id, mock.sentinel.schedule, mock.sentinel.exp_date, @@ -4826,7 +4809,7 @@ def test_create_replica_schedule( ) ) mock_create_trust.assert_called_once_with(context) - mock_get_replica.assert_called_once_with( + mock_get_transfer.assert_called_once_with( context, mock.sentinel.transfer_id, ) @@ -4836,21 +4819,22 @@ def test_create_replica_schedule( transfer_schedule, mock.ANY ) - mock_get_replica_schedule.assert_called_once_with( + mock_get_transfer_schedule.assert_called_once_with( context, mock.sentinel.transfer_id, transfer_schedule.id ) - @mock.patch.object(server.ConductorServerEndpoint, "_get_replica_schedule") + @mock.patch.object( + server.ConductorServerEndpoint, "_get_transfer_schedule") @mock.patch.object(db_api, "update_transfer_schedule") - def test_update_replica_schedule( + def test_update_transfer_schedule( self, mock_update_transfer_schedule, - mock_get_replica_schedule + mock_get_transfer_schedule ): result = testutils.get_wrapped_function( - self.server.update_replica_schedule)( + self.server.update_transfer_schedule)( self.server, mock.sentinel.context, mock.sentinel.transfer_id, @@ -4859,7 +4843,7 @@ def test_update_replica_schedule( ) self.assertEqual( - mock_get_replica_schedule.return_value, + mock_get_transfer_schedule.return_value, result ) mock_update_transfer_schedule.assert_called_once_with( @@ -4870,7 +4854,7 @@ def test_update_replica_schedule( None, mock.ANY ) - mock_get_replica_schedule.assert_called_once_with( + mock_get_transfer_schedule.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.schedule_id, @@ -4878,10 +4862,10 @@ def test_update_replica_schedule( @mock.patch.object(keystone, "delete_trust") @mock.patch.object(context, "get_admin_context") - @mock.patch.object(server.ConductorServerEndpoint, "_replica_cron_client") + @mock.patch.object(server.ConductorServerEndpoint, "_transfer_cron_client") def test_cleanup_schedule_resources( self, - mock_replica_cron_client, + mock_transfer_cron_client, mock_get_admin_context, mock_delete_trust, ): @@ -4893,14 +4877,14 @@ def test_cleanup_schedule_resources( schedule ) - mock_replica_cron_client.unregister.assert_called_once_with( + mock_transfer_cron_client.unregister.assert_called_once_with( mock.sentinel.context, schedule ) mock_get_admin_context.assert_not_called() mock_delete_trust.assert_not_called() - mock_replica_cron_client.reset_mock() + mock_transfer_cron_client.reset_mock() schedule.trust_id = mock.sentinel.trust_id self.server._cleanup_schedule_resources( @@ -4908,7 +4892,7 @@ def test_cleanup_schedule_resources( schedule ) - mock_replica_cron_client.unregister.assert_called_once_with( + mock_transfer_cron_client.unregister.assert_called_once_with( mock.sentinel.context, schedule ) @@ -4918,24 +4902,24 @@ def test_cleanup_schedule_resources( mock_get_admin_context.return_value) @mock.patch.object(db_api, "delete_transfer_schedule") - @mock.patch.object(server.ConductorServerEndpoint, "_get_replica") - def test_delete_replica_schedule( + @mock.patch.object(server.ConductorServerEndpoint, "_get_transfer") + def test_delete_transfer_schedule( self, - mock_get_replica, + mock_get_transfer, mock_delete_transfer_schedule ): - replica = mock.Mock() - replica.last_execution_status = constants.EXECUTION_STATUS_COMPLETED - mock_get_replica.return_value = replica + transfer = mock.Mock() + transfer.last_execution_status = constants.EXECUTION_STATUS_COMPLETED + mock_get_transfer.return_value = transfer - testutils.get_wrapped_function(self.server.delete_replica_schedule)( + testutils.get_wrapped_function(self.server.delete_transfer_schedule)( self.server, mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.schedule_id ) - mock_get_replica.assert_called_once_with( + mock_get_transfer.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id ) @@ -4947,33 +4931,33 @@ def test_delete_replica_schedule( mock.ANY ) - mock_get_replica.reset_mock() + mock_get_transfer.reset_mock() mock_delete_transfer_schedule.reset_mock() - replica.last_execution_status = constants.EXECUTION_STATUS_RUNNING + transfer.last_execution_status = constants.EXECUTION_STATUS_RUNNING self.assertRaises( - exception.InvalidReplicaState, + exception.InvalidTransferState, testutils.get_wrapped_function( - self.server.delete_replica_schedule), + self.server.delete_transfer_schedule), self.server, mock.sentinel.context, mock.sentinel.transfer_id, mock.sentinel.schedule_id ) - mock_get_replica.assert_called_once_with( + mock_get_transfer.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id ) mock_delete_transfer_schedule.assert_not_called() @mock.patch.object(db_api, "get_transfer_schedules") - def test_get_replica_schedules(self, mock_get_transfer_schedules): + def test_get_transfer_schedules(self, mock_get_transfer_schedules): result = testutils.get_wrapped_function( - self.server.get_replica_schedules)( + self.server.get_transfer_schedules)( self.server, mock.sentinel.context, - replica_id=None, + transfer_id=None, expired=True ) @@ -4988,9 +4972,9 @@ def test_get_replica_schedules(self, mock_get_transfer_schedules): ) @mock.patch.object(db_api, "get_transfer_schedule") - def test_get_replica_schedule(self, mock_get_transfer_schedule): + def test_get_transfer_schedule(self, mock_get_transfer_schedule): result = testutils.get_wrapped_function( - self.server.get_replica_schedule)( + self.server.get_transfer_schedule)( self.server, mock.sentinel.context, mock.sentinel.transfer_id, @@ -5010,7 +4994,7 @@ def test_get_replica_schedule(self, mock_get_transfer_schedule): ) @mock.patch.object(server.ConductorServerEndpoint, - "get_replica_tasks_execution") + "get_transfer_tasks_execution") @mock.patch.object(server.ConductorServerEndpoint, "_begin_tasks") @mock.patch.object(db_api, "add_transfer_tasks_execution") @mock.patch.object(db_api, "update_transfer_action_info_for_instance") @@ -5020,22 +5004,22 @@ def test_get_replica_schedule(self, mock_get_transfer_schedule): @mock.patch.object(utils, "sanitize_task_info") @mock.patch.object(models, "TasksExecution") @mock.patch.object(server.ConductorServerEndpoint, - "_check_valid_replica_tasks_execution") + "_check_valid_transfer_tasks_execution") @mock.patch.object(server.ConductorServerEndpoint, - "_check_replica_running_executions") + "_check_transfer_running_executions") @mock.patch.object(server.ConductorServerEndpoint, "_check_minion_pools_for_action") @mock.patch.object(models, "Transfer") - @mock.patch.object(server.ConductorServerEndpoint, "_get_replica") - @ddt.file_data("data/update_replica_config.yml") + @mock.patch.object(server.ConductorServerEndpoint, "_get_transfer") + @ddt.file_data("data/update_transfer_config.yml") @ddt.unpack - def test_update_replica( + def test_update_transfer( self, - mock_get_replica, + mock_get_transfer, mock_transfer, mock_check_minion_pools_for_action, - mock_check_replica_running_executions, - mock_check_valid_replica_tasks_execution, + mock_check_transfer_running_executions, + mock_check_valid_transfer_tasks_execution, mock_TasksExecution, mock_sanitize_task_info, mock_create_task, @@ -5043,22 +5027,22 @@ def test_update_replica( mock_update_transfer_action_info_for_instance, mock_add_transfer_tasks_execution, mock_begin_tasks, - mock_get_replica_tasks_execution, + mock_get_transfer_tasks_execution, config, has_updated_values, - has_replica_instance + has_transfer_instance ): transfer = mock.Mock() dummy = mock.Mock() execution = mock.Mock() - transfer.instances = config['replica'].get("instances", []) - transfer.info = config['replica'].get("info", {}) - mock_get_replica.return_value = transfer + transfer.instances = config['transfer'].get("instances", []) + transfer.info = config['transfer'].get("info", {}) + mock_get_transfer.return_value = transfer mock_transfer.return_value = dummy mock_TasksExecution.return_value = execution updated_properties = config.get("updated_properties", {}) - result = testutils.get_wrapped_function(self.server.update_replica)( + result = testutils.get_wrapped_function(self.server.update_transfer)( self.server, mock.sentinel.context, mock.sentinel.transfer_id, @@ -5066,25 +5050,25 @@ def test_update_replica( ) self.assertEqual( - mock_get_replica_tasks_execution.return_value, + mock_get_transfer_tasks_execution.return_value, result ) - mock_get_replica.assert_called_once_with( + mock_get_transfer.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id, include_task_info=True ) - mock_check_replica_running_executions.assert_called_once_with( + mock_check_transfer_running_executions.assert_called_once_with( mock.sentinel.context, - mock_get_replica.return_value, + mock_get_transfer.return_value, ) - mock_check_valid_replica_tasks_execution.assert_called_once_with( - mock_get_replica.return_value, + mock_check_valid_transfer_tasks_execution.assert_called_once_with( + mock_get_transfer.return_value, force=True, ) self.assertEqual( execution.action, - mock_get_replica.return_value + mock_get_transfer.return_value ) mock_check_execution_tasks_sanity.assert_called_once_with( execution, @@ -5099,7 +5083,7 @@ def test_update_replica( transfer, execution ) - mock_get_replica_tasks_execution.assert_called_once_with( + mock_get_transfer_tasks_execution.assert_called_once_with( mock.sentinel.context, mock.sentinel.transfer_id, execution.id @@ -5111,11 +5095,11 @@ def test_update_replica( ) else: mock_check_minion_pools_for_action.assert_not_called() - if has_replica_instance: + if has_transfer_instance: expected_sanitize_task_info_calls = [] create_task_calls = [] update_transfer_action_info_for_instance_calls = [] - for instance in config['replica'].get("info", {}): + for instance in config['transfer'].get("info", {}): expected_sanitize_task_info_calls.append( mock.call(mock.ANY)) expected_sanitize_task_info_calls.append( @@ -5126,11 +5110,11 @@ def test_update_replica( execution)) create_task_calls.append(mock.call( instance, - constants.TASK_TYPE_UPDATE_SOURCE_REPLICA, + constants.TASK_TYPE_UPDATE_SOURCE_TRANSFER, execution)) create_task_calls.append(mock.call( instance, - constants.TASK_TYPE_UPDATE_DESTINATION_REPLICA, + constants.TASK_TYPE_UPDATE_DESTINATION_TRANSFER, execution, depends_on=mock.ANY)) update_transfer_action_info_for_instance_calls.append( @@ -5594,7 +5578,7 @@ def test_set_task_error_os_morphing( mock_conf_conductor, ): execution = mock.Mock( - type=constants.EXECUTION_TYPE_REPLICA_UPDATE, + type=constants.EXECUTION_TYPE_TRANSFER_UPDATE, action_id=mock.sentinel.action_id, tasks=[ mock.Mock( @@ -5641,9 +5625,9 @@ def test_set_task_error_os_morphing( mock_get_tasks_execution.return_value, ) - # migration execution + # deployment execution mock_check_delete_reservation_for_transfer.assert_not_called() - execution.type = constants.EXECUTION_TYPE_MIGRATION + execution.type = constants.EXECUTION_TYPE_DEPLOYMENT self.server.set_task_error( mock.sentinel.context, mock.sentinel.task_id, diff --git a/coriolis/tests/db/test_api.py b/coriolis/tests/db/test_api.py index 76fd5f03..33e90c2d 100644 --- a/coriolis/tests/db/test_api.py +++ b/coriolis/tests/db/test_api.py @@ -6,7 +6,7 @@ import ddt from oslo_utils import timeutils -import sqlalchemy.orm +import sqlalchemy from coriolis import constants from coriolis.db import api @@ -50,7 +50,7 @@ def create_valid_tasks_execution(): valid_tasks_execution = models.TasksExecution() valid_tasks_execution.id = str(uuid.uuid4()) valid_tasks_execution.status = DEFAULT_EXECUTION_STATUS - valid_tasks_execution.type = constants.EXECUTION_TYPE_REPLICA_EXECUTION + valid_tasks_execution.type = constants.EXECUTION_TYPE_TRANSFER_EXECUTION valid_tasks_execution.number = 1 valid_task = models.Task() @@ -59,7 +59,7 @@ def create_valid_tasks_execution(): valid_task.instance = DEFAULT_INSTANCE valid_task.status = constants.TASK_STATUS_RUNNING valid_task.task_type = ( - constants.TASK_TYPE_VALIDATE_REPLICA_SOURCE_INPUTS) + constants.TASK_TYPE_VALIDATE_TRANSFER_SOURCE_INPUTS) valid_task.index = 1 valid_task.on_error = False @@ -109,7 +109,7 @@ def setup_scoped_data(cls, region_id, project_id="1"): valid_transfer.user_id = project_id valid_transfer.project_id = project_id valid_transfer.base_id = valid_transfer.id - valid_transfer.scenario = constants.REPLICA_SCENARIO_REPLICA + valid_transfer.scenario = constants.TRANSFER_SCENARIO_REPLICA valid_transfer.last_execution_status = DEFAULT_EXECUTION_STATUS valid_transfer.executions = [] valid_transfer.instances = [DEFAULT_INSTANCE] @@ -514,7 +514,7 @@ def _create_dummy_execution(action): new_tasks_execution.id = str(uuid.uuid4()) new_tasks_execution.action = action new_tasks_execution.status = constants.EXECUTION_STATUS_UNEXECUTED - new_tasks_execution.type = constants.EXECUTION_TYPE_REPLICA_EXECUTION + new_tasks_execution.type = constants.EXECUTION_TYPE_TRANSFER_EXECUTION new_tasks_execution.number = 0 return new_tasks_execution @@ -859,7 +859,7 @@ def setUpClass(cls): 'transfer') @staticmethod - def _create_dummy_transfer(scenario=constants.REPLICA_SCENARIO_REPLICA, + def _create_dummy_transfer(scenario=constants.TRANSFER_SCENARIO_REPLICA, origin_endpoint_id=str(uuid.uuid4()), destination_endpoint_id=str(uuid.uuid4()), project_id=DEFAULT_PROJECT_ID): @@ -903,7 +903,7 @@ def test_get_transfers_include_task_info(self): self.assertTrue(hasattr(result[0], 'info')) def test_get_transfers_transfer_scenario(self): - scenario = constants.REPLICA_SCENARIO_REPLICA + scenario = constants.TRANSFER_SCENARIO_REPLICA result = api.get_transfers(self.context, transfer_scenario=scenario) self.assertTrue(all([res.scenario == scenario for res in result])) @@ -925,7 +925,7 @@ def test_get_transfer_include_task_info(self): def test_get_transfer_by_scenario(self): result = api.get_transfer( self.context, self.valid_transfer.id, - transfer_scenario=constants.REPLICA_SCENARIO_REPLICA) + transfer_scenario=constants.TRANSFER_SCENARIO_REPLICA) self.assertEqual(result, self.valid_transfer) def test_get_transfer_out_of_user_scope(self): @@ -947,7 +947,7 @@ def test_get_endpoint_transfers_count(self): origin_endpoint_id=origin_endpoint_id, destination_endpoint_id=dest_endpoint_id) dummy_transfer_migration = self._create_dummy_transfer( - scenario=constants.REPLICA_SCENARIO_LIVE_MIGRATION, + scenario=constants.TRANSFER_SCENARIO_LIVE_MIGRATION, origin_endpoint_id=origin_endpoint_id, destination_endpoint_id=dest_endpoint_id) self.session.add(dummy_transfer_replica) @@ -959,7 +959,7 @@ def test_get_endpoint_transfers_count(self): result = api.get_endpoint_transfers_count( self.context, origin_endpoint_id, - transfer_scenario=constants.REPLICA_SCENARIO_REPLICA) + transfer_scenario=constants.TRANSFER_SCENARIO_REPLICA) self.assertEqual(result, 1) def test_add_transfer(self): diff --git a/coriolis/tests/minion_manager/rpc/test_client.py b/coriolis/tests/minion_manager/rpc/test_client.py index c2e56936..6ed7e3b3 100644 --- a/coriolis/tests/minion_manager/rpc/test_client.py +++ b/coriolis/tests/minion_manager/rpc/test_client.py @@ -125,8 +125,9 @@ def test_validate_minion_pool_selections_for_action(self): def test_allocate_minion_machines_for_replica(self): args = {"replica": "test_replica"} self._test( - self.client.allocate_minion_machines_for_replica, args, + self.client.allocate_minion_machines_for_transfer, args, rpc_op='_cast', + server_fun_name='allocate_minion_machines_for_replica' ) def test_allocate_minion_machines_for_migration(self): @@ -136,8 +137,9 @@ def test_allocate_minion_machines_for_migration(self): "include_osmorphing_minions": True } self._test( - self.client.allocate_minion_machines_for_migration, args, + self.client.allocate_minion_machines_for_deployment, args, rpc_op='_cast', + server_fun_name='allocate_minion_machines_for_migration' ) def test_deallocate_minion_machine(self): diff --git a/coriolis/tests/minion_manager/rpc/test_tasks.py b/coriolis/tests/minion_manager/rpc/test_tasks.py index e32e5675..42990f8c 100644 --- a/coriolis/tests/minion_manager/rpc/test_tasks.py +++ b/coriolis/tests/minion_manager/rpc/test_tasks.py @@ -233,15 +233,15 @@ def test_get_task_name(self): ) @mock.patch.object( - ConductorClient, 'report_migration_minions_allocation_error' + ConductorClient, 'report_deployment_minions_allocation_error' ) def test__report_machine_allocation_failure( - self, mock_report_migration_minions_allocation_error): + self, mock_report_depl_minions_alloation_error): result = self.task._report_machine_allocation_failure( mock.sentinel.context, self.action_id, mock.sentinel.failure_str) self.assertIsNone(result) - mock_report_migration_minions_allocation_error.assert_called_once_with( + mock_report_depl_minions_alloation_error.assert_called_once_with( mock.sentinel.context, self.action_id, mock.sentinel.failure_str ) @@ -263,15 +263,15 @@ def test_get_task_name(self): ) @mock.patch.object( - ConductorClient, 'report_replica_minions_allocation_error' + ConductorClient, 'report_transfer_minions_allocation_error' ) def test__report_machine_allocation_failure( - self, mock_report_replica_minions_allocation_error): + self, mock_report_transfer_minions_allocation_error): result = self.task._report_machine_allocation_failure( mock.sentinel.context, self.action_id, mock.sentinel.failure_str) self.assertIsNone(result) - mock_report_replica_minions_allocation_error.assert_called_once_with( + mock_report_transfer_minions_allocation_error.assert_called_once_with( mock.sentinel.context, self.action_id, mock.sentinel.failure_str ) @@ -465,7 +465,7 @@ def test_execute_raises_exception_when_invalid_migration_state( mock_get_action_label): mock_get_minion_machine.return_value = self.minion_machine mock_confirm_allocation.side_effect = [ - exception.InvalidReplicaState(reason='Invalid state')] + exception.InvalidTransferState(reason='Invalid state')] self.assertRaises( exception.MinionMachineAllocationFailure, @@ -500,16 +500,16 @@ def test_get_task_name(self): ) @mock.patch.object( - ConductorClient, 'confirm_migration_minions_allocation' + ConductorClient, 'confirm_deployment_minions_allocation' ) def test__confirm_machine_allocation_for_action( - self, mock_confirm_migration_minions_allocation): + self, mock_confirm_deployment_minions_allocation): result = self.task._confirm_machine_allocation_for_action( mock.sentinel.context, self.action_id, mock.sentinel.machine_allocations) self.assertIsNone(result) - mock_confirm_migration_minions_allocation.assert_called_once_with( + mock_confirm_deployment_minions_allocation.assert_called_once_with( mock.sentinel.context, self.action_id, mock.sentinel.machine_allocations) @@ -538,16 +538,16 @@ def test_get_task_name(self): ) @mock.patch.object( - ConductorClient, 'confirm_replica_minions_allocation' + ConductorClient, 'confirm_transfer_minions_allocation' ) def test__confirm_machine_allocation_for_action( - self, mock_confirm_replica_minions_allocation): + self, mock_confirm_transfer_minions_allocation): result = self.task._confirm_machine_allocation_for_action( mock.sentinel.context, mock.sentinel.action_id, mock.sentinel.machine_allocations) self.assertIsNone(result) - mock_confirm_replica_minions_allocation.assert_called_once_with( + mock_confirm_transfer_minions_allocation.assert_called_once_with( mock.sentinel.context, mock.sentinel.action_id, mock.sentinel.machine_allocations) diff --git a/coriolis/tests/replica_cron/test_api.py b/coriolis/tests/replica_cron/test_api.py deleted file mode 100644 index a770b4e2..00000000 --- a/coriolis/tests/replica_cron/test_api.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright 2024 Cloudbase Solutions Srl -# All Rights Reserved. - -from unittest import mock - -from coriolis.replica_cron import api as replicas_cron_module -from coriolis.tests import test_base - - -class APITestCase(test_base.CoriolisBaseTestCase): - """Test suite for the Coriolis API class.""" - - def setUp(self): - super(APITestCase, self).setUp() - self.api = replicas_cron_module.API() - self.rpc_client = mock.MagicMock() - self.api._rpc_client = self.rpc_client - self.ctxt = mock.sentinel.ctxt - self.replica_id = mock.sentinel.transfer_id - self.schedule_id = mock.sentinel.schedule_id - - def test_create(self): - schedule = mock.sentinel.schedule - enabled = mock.sentinel.enabled - exp_date = mock.sentinel.exp_date - shutdown_instance = mock.sentinel.shutdown_instance - - result = self.api.create( - self.ctxt, self.replica_id, schedule, enabled, exp_date, - shutdown_instance) - - self.rpc_client.create_replica_schedule.assert_called_once_with( - self.ctxt, self.replica_id, schedule, enabled, exp_date, - shutdown_instance) - self.assertEqual(result, - self.rpc_client.create_replica_schedule.return_value) - - def test_get_schedules(self): - result = self.api.get_schedules(self.ctxt, self.replica_id) - - self.rpc_client.get_replica_schedules.assert_called_once_with( - self.ctxt, self.replica_id, expired=True) - self.assertEqual(result, - self.rpc_client.get_replica_schedules.return_value) - - def test_get_schedule(self): - result = self.api.get_schedule(self.ctxt, self.replica_id, - self.schedule_id) - - self.rpc_client.get_replica_schedule.assert_called_once_with( - self.ctxt, self.replica_id, self.schedule_id, expired=True) - self.assertEqual(result, - self.rpc_client.get_replica_schedule.return_value) - - def test_update(self): - update_values = mock.sentinel.update_values - - result = self.api.update(self.ctxt, self.replica_id, self.schedule_id, - update_values) - - self.rpc_client.update_replica_schedule.assert_called_once_with( - self.ctxt, self.replica_id, self.schedule_id, update_values) - self.assertEqual(result, - self.rpc_client.update_replica_schedule.return_value) - - def test_delete(self): - self.api.delete(self.ctxt, self.replica_id, self.schedule_id) - self.rpc_client.delete_replica_schedule.assert_called_once_with( - self.ctxt, self.replica_id, self.schedule_id) diff --git a/coriolis/tests/replica_tasks_executions/test_api.py b/coriolis/tests/replica_tasks_executions/test_api.py index 077e2876..1a216295 100644 --- a/coriolis/tests/replica_tasks_executions/test_api.py +++ b/coriolis/tests/replica_tasks_executions/test_api.py @@ -16,50 +16,52 @@ def setUp(self): self.rpc_client = mock.MagicMock() self.api._rpc_client = self.rpc_client self.ctxt = mock.sentinel.ctxt - self.replica_id = mock.sentinel.transfer_id + self.transfer_id = mock.sentinel.transfer_id self.execution_id = mock.sentinel.execution_id def test_create(self): shutdown_instances = mock.sentinel.shutdown_instances - result = self.api.create(self.ctxt, self.replica_id, + result = self.api.create(self.ctxt, self.transfer_id, shutdown_instances) - self.rpc_client.execute_replica_tasks.assert_called_once_with( - self.ctxt, self.replica_id, shutdown_instances) + self.rpc_client.execute_transfer_tasks.assert_called_once_with( + self.ctxt, self.transfer_id, shutdown_instances) self.assertEqual(result, - self.rpc_client.execute_replica_tasks.return_value) + self.rpc_client.execute_transfer_tasks.return_value) def test_delete(self): - self.api.delete(self.ctxt, self.replica_id, self.execution_id) + self.api.delete(self.ctxt, self.transfer_id, self.execution_id) - self.rpc_client.delete_replica_tasks_execution.assert_called_once_with( - self.ctxt, self.replica_id, self.execution_id) + (self.rpc_client.delete_transfer_tasks_execution + .assert_called_once_with( + self.ctxt, self.transfer_id, self.execution_id)) def test_cancel(self): force = mock.sentinel.force - self.api.cancel(self.ctxt, self.replica_id, self.execution_id, force) + self.api.cancel(self.ctxt, self.transfer_id, self.execution_id, force) - self.rpc_client.cancel_replica_tasks_execution.assert_called_once_with( - self.ctxt, self.replica_id, self.execution_id, force) + (self.rpc_client.cancel_transfer_tasks_execution + .assert_called_once_with( + self.ctxt, self.transfer_id, self.execution_id, force)) def test_get_executions(self): include_tasks = mock.sentinel.include_tasks - result = self.api.get_executions(self.ctxt, self.replica_id, + result = self.api.get_executions(self.ctxt, self.transfer_id, include_tasks) - self.rpc_client.get_replica_tasks_executions.assert_called_once_with( - self.ctxt, self.replica_id, include_tasks) + self.rpc_client.get_transfer_tasks_executions.assert_called_once_with( + self.ctxt, self.transfer_id, include_tasks) self.assertEqual( - result, self.rpc_client.get_replica_tasks_executions.return_value) + result, self.rpc_client.get_transfer_tasks_executions.return_value) def test_get_execution(self): - result = self.api.get_execution(self.ctxt, self.replica_id, + result = self.api.get_execution(self.ctxt, self.transfer_id, self.execution_id) - self.rpc_client.get_replica_tasks_execution.assert_called_once_with( - self.ctxt, self.replica_id, self.execution_id) + self.rpc_client.get_transfer_tasks_execution.assert_called_once_with( + self.ctxt, self.transfer_id, self.execution_id) self.assertEqual( - result, self.rpc_client.get_replica_tasks_execution.return_value) + result, self.rpc_client.get_transfer_tasks_execution.return_value) diff --git a/coriolis/tests/replicas/test_api.py b/coriolis/tests/replicas/test_api.py index a948da8f..69d1fede 100644 --- a/coriolis/tests/replicas/test_api.py +++ b/coriolis/tests/replicas/test_api.py @@ -16,7 +16,7 @@ def setUp(self): self.rpc_client = mock.MagicMock() self.api._rpc_client = self.rpc_client self.ctxt = mock.sentinel.ctxt - self.replica_id = mock.sentinel.transfer_id + self.transfer_id = mock.sentinel.transfer_id def test_create(self): origin_endpoint_id = mock.sentinel.origin_endpoint_id @@ -32,57 +32,57 @@ def test_create(self): storage_mappings = mock.sentinel.storage_mappings result = self.api.create( - self.ctxt, mock.sentinel.replica_scenario, + self.ctxt, mock.sentinel.transfer_scenario, origin_endpoint_id, destination_endpoint_id, origin_minion_pool_id, destination_minion_pool_id, instance_osmorphing_minion_pool_mappings, source_environment, destination_environment, instances, network_map, storage_mappings) - self.rpc_client.create_instances_replica.assert_called_once_with( - self.ctxt, mock.sentinel.replica_scenario, + self.rpc_client.create_instances_transfer.assert_called_once_with( + self.ctxt, mock.sentinel.transfer_scenario, origin_endpoint_id, destination_endpoint_id, origin_minion_pool_id, destination_minion_pool_id, instance_osmorphing_minion_pool_mappings, source_environment, destination_environment, instances, network_map, storage_mappings, None, None) - self.assertEqual(result, - self.rpc_client.create_instances_replica.return_value) + self.assertEqual( + result, self.rpc_client.create_instances_transfer.return_value) def test_update(self): updated_properties = mock.sentinel.updated_properties - result = self.api.update(self.ctxt, self.replica_id, + result = self.api.update(self.ctxt, self.transfer_id, updated_properties) - self.rpc_client.update_replica.assert_called_once_with( - self.ctxt, self.replica_id, updated_properties) + self.rpc_client.update_transfer.assert_called_once_with( + self.ctxt, self.transfer_id, updated_properties) self.assertEqual(result, - self.rpc_client.update_replica.return_value) + self.rpc_client.update_transfer.return_value) def test_delete(self): - self.api.delete(self.ctxt, self.replica_id) - self.rpc_client.delete_replica.assert_called_once_with( - self.ctxt, self.replica_id) + self.api.delete(self.ctxt, self.transfer_id) + self.rpc_client.delete_transfer.assert_called_once_with( + self.ctxt, self.transfer_id) def test_get_replicas(self): result = self.api.get_replicas( self.ctxt, include_tasks_executions=False, include_task_info=False) - self.rpc_client.get_replicas.assert_called_once_with( + self.rpc_client.get_transfers.assert_called_once_with( self.ctxt, False, include_task_info=False) - self.assertEqual(result, self.rpc_client.get_replicas.return_value) + self.assertEqual(result, self.rpc_client.get_transfers.return_value) def test_get_replica(self): - result = self.api.get_replica(self.ctxt, self.replica_id) + result = self.api.get_replica(self.ctxt, self.transfer_id) - self.rpc_client.get_replica.assert_called_once_with( - self.ctxt, self.replica_id, include_task_info=False) - self.assertEqual(result, self.rpc_client.get_replica.return_value) + self.rpc_client.get_transfer.assert_called_once_with( + self.ctxt, self.transfer_id, include_task_info=False) + self.assertEqual(result, self.rpc_client.get_transfer.return_value) def test_delete_disks(self): - result = self.api.delete_disks(self.ctxt, self.replica_id) + result = self.api.delete_disks(self.ctxt, self.transfer_id) - self.rpc_client.delete_replica_disks.assert_called_once_with( - self.ctxt, self.replica_id) + self.rpc_client.delete_transfer_disks.assert_called_once_with( + self.ctxt, self.transfer_id) self.assertEqual(result, - self.rpc_client.delete_replica_disks.return_value) + self.rpc_client.delete_transfer_disks.return_value) diff --git a/coriolis/tests/tasks/test_osmorphing_tasks.py b/coriolis/tests/tasks/test_osmorphing_tasks.py index 4485fc0a..13402f45 100644 --- a/coriolis/tests/tasks/test_osmorphing_tasks.py +++ b/coriolis/tests/tasks/test_osmorphing_tasks.py @@ -35,10 +35,10 @@ def test__run(self, mock_morph_image, mock_unmarshal, mock_get_provider, destination = mock.MagicMock() expected_calls = [ mock.call.mock_get_provider( - origin['type'], constants.PROVIDER_TYPE_REPLICA_EXPORT, + origin['type'], constants.PROVIDER_TYPE_TRANSFER_EXPORT, mock.sentinel.event_handler), mock.call.mock_get_provider( - destination['type'], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination['type'], constants.PROVIDER_TYPE_TRANSFER_IMPORT, mock.sentinel.event_handler), ] diff --git a/coriolis/tests/tasks/test_replica_tasks.py b/coriolis/tests/tasks/test_replica_tasks.py index a5d9679b..b5f75a20 100644 --- a/coriolis/tests/tasks/test_replica_tasks.py +++ b/coriolis/tests/tasks/test_replica_tasks.py @@ -75,7 +75,7 @@ def test__run(self, mock_validate_value, mock_get_conn_info, mock.sentinel.destiantion, task_info, mock.sentinel.event_handler) self.assertEqual(result, expected_result) mock_get_provider.assert_called_once_with( - origin['type'], constants.PROVIDER_TYPE_REPLICA_EXPORT, + origin['type'], constants.PROVIDER_TYPE_TRANSFER_EXPORT, mock.sentinel.event_handler) mock_get_conn_info.assert_called_once_with(mock.sentinel.ctxt, origin) prov_fun.assert_called_once_with( @@ -103,7 +103,7 @@ def test__run(self, mock_get_conn_info, mock_get_provider): mock.sentinel.destiantion, task_info, mock.sentinel.event_handler) self.assertEqual(result, {}) mock_get_provider.assert_called_once_with( - origin['type'], constants.PROVIDER_TYPE_REPLICA_EXPORT, + origin['type'], constants.PROVIDER_TYPE_TRANSFER_EXPORT, mock.sentinel.event_handler) mock_get_conn_info.assert_called_once_with(mock.sentinel.ctxt, origin) prov_fun.assert_called_once_with( @@ -157,7 +157,7 @@ def test__run(self, mock_unmarshal, mock_check_vol_info, mock_get_vol_info, mock.sentinel.destiantion, task_info, mock.sentinel.event_handler) self.assertEqual(result, expected_result) mock_get_provider.assert_called_once_with( - origin['type'], constants.PROVIDER_TYPE_REPLICA_EXPORT, + origin['type'], constants.PROVIDER_TYPE_TRANSFER_EXPORT, mock.sentinel.event_handler) mock_get_conn_info.assert_called_once_with(mock.sentinel.ctxt, origin) mock_get_vol_info.assert_called_once_with(task_info) @@ -196,7 +196,7 @@ def test__run(self, mock_check_vol_info, mock_validate_value, destination, task_info, mock.sentinel.event_handler) self.assertEqual(result, expected_result) mock_get_provider.assert_called_once_with( - destination['type'], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination['type'], constants.PROVIDER_TYPE_TRANSFER_IMPORT, mock.sentinel.event_handler) mock_get_conn_info.assert_called_once_with( mock.sentinel.ctxt, destination) @@ -235,7 +235,7 @@ def test__run(self, mock_get_vol_info, mock_get_conn_info, mock.sentinel.destination, task_info, mock.sentinel.event_handler) self.assertEqual(result, expected_result) mock_get_provider.assert_called_once_with( - origin['type'], constants.PROVIDER_TYPE_REPLICA_EXPORT, + origin['type'], constants.PROVIDER_TYPE_TRANSFER_EXPORT, mock.sentinel.event_handler) mock_get_conn_info.assert_called_once_with( mock.sentinel.ctxt, origin) @@ -278,7 +278,7 @@ def test__run(self, mock_get_vol_info, mock_get_conn_info, destination, task_info, mock.sentinel.event_handler) self.assertEqual(result, expected_result) mock_get_provider.assert_called_once_with( - destination['type'], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination['type'], constants.PROVIDER_TYPE_TRANSFER_IMPORT, mock.sentinel.event_handler) mock_get_conn_info.assert_called_once_with( mock.sentinel.ctxt, destination) @@ -346,7 +346,7 @@ def _get_result(): self.assertEqual(_get_result(), expected_result) mock_get_provider.assert_called_once_with( - origin['type'], constants.PROVIDER_TYPE_REPLICA_EXPORT, + origin['type'], constants.PROVIDER_TYPE_TRANSFER_EXPORT, mock.sentinel.event_handler) mock_get_conn_info.assert_called_once_with( mock.sentinel.ctxt, origin) @@ -377,7 +377,7 @@ def test__run(self, mock_get_conn_info, mock_get_provider): mock.sentinel.destination, task_info, mock.sentinel.event_handler) self.assertEqual(result, expected_result) mock_get_provider.assert_called_once_with( - origin['type'], constants.PROVIDER_TYPE_REPLICA_EXPORT, + origin['type'], constants.PROVIDER_TYPE_TRANSFER_EXPORT, mock.sentinel.event_handler) mock_get_conn_info.assert_called_once_with( mock.sentinel.ctxt, origin) @@ -400,7 +400,7 @@ def test__run_no_resources(self, mock_get_conn_info, mock_get_provider): mock.sentinel.destination, task_info, mock.sentinel.event_handler) self.assertEqual(result, expected_result) mock_get_provider.assert_called_once_with( - origin['type'], constants.PROVIDER_TYPE_REPLICA_EXPORT, + origin['type'], constants.PROVIDER_TYPE_TRANSFER_EXPORT, mock.sentinel.event_handler) mock_get_conn_info.assert_called_once_with( mock.sentinel.ctxt, origin) @@ -469,7 +469,7 @@ def test__run(self, mock_validate_value, mock_check_vol_info, destination, task_info, mock.sentinel.event_handler) self.assertEqual(result, expected_result) mock_get_provider.assert_called_once_with( - destination['type'], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination['type'], constants.PROVIDER_TYPE_TRANSFER_IMPORT, mock.sentinel.event_handler) mock_get_conn_info.assert_called_once_with( mock.sentinel.ctxt, destination) @@ -511,7 +511,7 @@ def test__run(self, data, mock_get_conn_info, mock_get_provider): destination, task_info, mock.sentinel.event_handler) self.assertEqual(result, expected_result) mock_get_provider.assert_called_once_with( - destination['type'], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination['type'], constants.PROVIDER_TYPE_TRANSFER_IMPORT, mock.sentinel.event_handler) mock_get_conn_info.assert_called_once_with( mock.sentinel.ctxt, destination) @@ -548,7 +548,7 @@ def test__run(self, mock_get_vol_info, mock_get_conn_info, destination, task_info, mock.sentinel.event_handler) self.assertEqual(result, expected_result) mock_get_provider.assert_called_once_with( - destination['type'], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination['type'], constants.PROVIDER_TYPE_TRANSFER_IMPORT, mock.sentinel.event_handler) mock_get_conn_info.assert_called_once_with( mock.sentinel.ctxt, destination) @@ -581,7 +581,7 @@ def test__run(self, mock_get_conn_info, mock_get_provider): destination, task_info, mock.sentinel.event_handler) self.assertEqual(result, expected_result) mock_get_provider.assert_called_once_with( - destination['type'], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination['type'], constants.PROVIDER_TYPE_TRANSFER_IMPORT, mock.sentinel.event_handler) mock_get_conn_info.assert_called_once_with( mock.sentinel.ctxt, destination) @@ -608,7 +608,7 @@ def test__run_no_result(self, mock_get_conn_info, mock_get_provider): mock.sentinel.event_handler) self.assertEqual(result, expected_result) mock_get_provider.assert_called_once_with( - destination['type'], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination['type'], constants.PROVIDER_TYPE_TRANSFER_IMPORT, mock.sentinel.event_handler) mock_get_conn_info.assert_called_once_with( mock.sentinel.ctxt, destination) @@ -640,7 +640,7 @@ def test__run(self, mock_get_conn_info, mock_get_provider): destination, task_info, mock.sentinel.event_handler) self.assertEqual(result, expected_result) mock_get_provider.assert_called_once_with( - destination['type'], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination['type'], constants.PROVIDER_TYPE_TRANSFER_IMPORT, mock.sentinel.event_handler) mock_get_conn_info.assert_called_once_with( mock.sentinel.ctxt, destination) @@ -676,7 +676,7 @@ def test__run(self, mock_check_ensure_volumes_ordering, destination, task_info, mock.sentinel.event_handler) self.assertEqual(result, expected_result) mock_get_provider.assert_called_once_with( - destination['type'], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination['type'], constants.PROVIDER_TYPE_TRANSFER_IMPORT, mock.sentinel.event_handler) mock_get_conn_info.assert_called_once_with( mock.sentinel.ctxt, destination) @@ -719,7 +719,7 @@ def test__run(self, mock_check_ensure_volumes_ordering, destination, task_info, mock.sentinel.event_handler) self.assertEqual(result, expected_result) mock_get_provider.assert_called_once_with( - destination['type'], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination['type'], constants.PROVIDER_TYPE_TRANSFER_IMPORT, mock.sentinel.event_handler) mock_get_conn_info.assert_called_once_with( mock.sentinel.ctxt, destination) @@ -761,7 +761,7 @@ def test__run(self, mock_check_ensure_volumes_ordering, destination, task_info, mock.sentinel.event_handler) self.assertEqual(result, expected_result) mock_get_provider.assert_called_once_with( - destination['type'], constants.PROVIDER_TYPE_REPLICA_IMPORT, + destination['type'], constants.PROVIDER_TYPE_TRANSFER_IMPORT, mock.sentinel.event_handler) mock_get_conn_info.assert_called_once_with( mock.sentinel.ctxt, destination) @@ -799,7 +799,7 @@ def test__run(self, mock_get_conn_info, mock_get_provider, self.assertEqual(result, {}) mock_event_manager.assert_called_once_with(mock.sentinel.event_handler) mock_get_provider.assert_called_once_with( - origin['type'], constants.PROVIDER_TYPE_VALIDATE_REPLICA_EXPORT, + origin['type'], constants.PROVIDER_TYPE_VALIDATE_TRANSFER_EXPORT, mock.sentinel.event_handler, raise_if_not_found=False) mock_get_conn_info.assert_called_once_with(mock.sentinel.ctxt, origin) prov_fun.assert_called_once_with( @@ -824,7 +824,7 @@ def test__run_no_source_provider(self, mock_get_conn_info, self.assertEqual(result, {}) mock_event_manager.assert_called_once_with(mock.sentinel.event_handler) mock_get_provider.assert_called_once_with( - origin['type'], constants.PROVIDER_TYPE_VALIDATE_REPLICA_EXPORT, + origin['type'], constants.PROVIDER_TYPE_VALIDATE_TRANSFER_EXPORT, mock.sentinel.event_handler, raise_if_not_found=False) mock_get_conn_info.assert_called_once_with(mock.sentinel.ctxt, origin) prov_fun.assert_not_called() @@ -869,7 +869,7 @@ def test__run(self, mock_validate_replica_inputs, mock_get_conn_info, self.assertEqual(result, {}) mock_get_provider.assert_called_once_with( destination['type'], - constants.PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT, None, + constants.PROVIDER_TYPE_VALIDATE_TRANSFER_IMPORT, None, raise_if_not_found=False) mock_get_conn_info.assert_called_once_with( mock.sentinel.ctxt, destination) @@ -896,7 +896,7 @@ def test__run_no_destination_provider( self.assertEqual(result, {}) mock_get_provider.assert_called_once_with( destination['type'], - constants.PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT, None, + constants.PROVIDER_TYPE_VALIDATE_TRANSFER_IMPORT, None, raise_if_not_found=False) mock_get_conn_info.assert_called_once_with( mock.sentinel.ctxt, destination) @@ -920,7 +920,7 @@ def test__run_no_export_info( destination, task_info, None) mock_get_provider.assert_called_once_with( destination['type'], - constants.PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT, None, + constants.PROVIDER_TYPE_VALIDATE_TRANSFER_IMPORT, None, raise_if_not_found=False) mock_get_conn_info.assert_called_once_with( mock.sentinel.ctxt, destination) @@ -960,7 +960,7 @@ def test__run(self, mock_validate_value, mock_get_conn_info, task_info['export_info'], schemas.CORIOLIS_VM_EXPORT_INFO_SCHEMA) mock_get_provider.assert_called_once_with( destination['type'], - constants.PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT, + constants.PROVIDER_TYPE_VALIDATE_TRANSFER_IMPORT, mock.sentinel.event_handler, raise_if_not_found=False) prov_fun.assert_called_once_with( mock.sentinel.ctxt, mock_get_conn_info.return_value, @@ -991,7 +991,7 @@ def test__run_no_dest_provider( task_info['export_info'], schemas.CORIOLIS_VM_EXPORT_INFO_SCHEMA) mock_get_provider.assert_called_once_with( destination['type'], - constants.PROVIDER_TYPE_VALIDATE_REPLICA_IMPORT, + constants.PROVIDER_TYPE_VALIDATE_TRANSFER_IMPORT, mock.sentinel.event_handler, raise_if_not_found=False) prov_fun.assert_not_called() @@ -1058,7 +1058,7 @@ def test__run_no_source_provider( mock_event_manager.assert_called_once_with(mock.sentinel.event_handler) mock_get_provider.assert_called_once_with( - origin['type'], constants.PROVIDER_TYPE_SOURCE_REPLICA_UPDATE, + origin['type'], constants.PROVIDER_TYPE_SOURCE_TRANSFER_UPDATE, mock.sentinel.event_handler, raise_if_not_found=False) mock_get_conn_info.assert_not_called() mock_validate_value.assert_not_called() @@ -1092,7 +1092,7 @@ def test__run_no_volumes_info( mock_event_manager.assert_called_once_with(mock.sentinel.event_handler) mock_get_provider.assert_called_once_with( - origin['type'], constants.PROVIDER_TYPE_SOURCE_REPLICA_UPDATE, + origin['type'], constants.PROVIDER_TYPE_SOURCE_TRANSFER_UPDATE, mock.sentinel.event_handler, raise_if_not_found=False) mock_get_conn_info.assert_called_once_with(mock.sentinel.ctxt, origin) mock_validate_value.assert_not_called() @@ -1127,7 +1127,7 @@ def test__run( mock_event_manager.assert_called_once_with(mock.sentinel.event_handler) mock_get_provider.assert_called_once_with( - origin['type'], constants.PROVIDER_TYPE_SOURCE_REPLICA_UPDATE, + origin['type'], constants.PROVIDER_TYPE_SOURCE_TRANSFER_UPDATE, mock.sentinel.event_handler, raise_if_not_found=False) mock_get_conn_info.assert_called_once_with(mock.sentinel.ctxt, origin) mock_validate_value.assert_called_once_with( @@ -1201,7 +1201,7 @@ def test__run_no_dest_provider( mock_event_manager.assert_called_once_with(mock.sentinel.event_handler) mock_get_provider.assert_called_once_with( destination['type'], - constants.PROVIDER_TYPE_DESTINATION_REPLICA_UPDATE, + constants.PROVIDER_TYPE_DESTINATION_TRANSFER_UPDATE, mock.sentinel.event_handler, raise_if_not_found=False) mock_get_conn_info.assert_not_called() mock_validate_value.assert_not_called() @@ -1237,7 +1237,7 @@ def test__run_no_volumes_info( mock_event_manager.assert_called_once_with(mock.sentinel.event_handler) mock_get_provider.assert_called_once_with( destination['type'], - constants.PROVIDER_TYPE_DESTINATION_REPLICA_UPDATE, + constants.PROVIDER_TYPE_DESTINATION_TRANSFER_UPDATE, mock.sentinel.event_handler, raise_if_not_found=False) mock_get_conn_info.assert_called_once_with( mock.sentinel.ctxt, destination) @@ -1276,7 +1276,7 @@ def test__run( mock_event_manager.assert_called_once_with(mock.sentinel.event_handler) mock_get_provider.assert_called_once_with( destination['type'], - constants.PROVIDER_TYPE_DESTINATION_REPLICA_UPDATE, + constants.PROVIDER_TYPE_DESTINATION_TRANSFER_UPDATE, mock.sentinel.event_handler, raise_if_not_found=False) mock_get_conn_info.assert_called_once_with( mock.sentinel.ctxt, destination) diff --git a/coriolis/replica_cron/__init__.py b/coriolis/tests/transfer_cron/__init__.py similarity index 100% rename from coriolis/replica_cron/__init__.py rename to coriolis/tests/transfer_cron/__init__.py diff --git a/coriolis/replica_cron/rpc/__init__.py b/coriolis/tests/transfer_cron/rpc/__init__.py similarity index 100% rename from coriolis/replica_cron/rpc/__init__.py rename to coriolis/tests/transfer_cron/rpc/__init__.py diff --git a/coriolis/tests/replica_cron/rpc/test_client.py b/coriolis/tests/transfer_cron/rpc/test_client.py similarity index 91% rename from coriolis/tests/replica_cron/rpc/test_client.py rename to coriolis/tests/transfer_cron/rpc/test_client.py index bcb9157c..a6e9c084 100644 --- a/coriolis/tests/replica_cron/rpc/test_client.py +++ b/coriolis/tests/transfer_cron/rpc/test_client.py @@ -3,8 +3,8 @@ from unittest import mock -from coriolis.replica_cron.rpc import client as rpc_client from coriolis.tests import test_base +from coriolis.transfer_cron.rpc import client as rpc_client class ReplicaCronClientTestCase(test_base.CoriolisBaseTestCase): @@ -12,7 +12,7 @@ class ReplicaCronClientTestCase(test_base.CoriolisBaseTestCase): def setUp(self): super(ReplicaCronClientTestCase, self).setUp() - self.client = rpc_client.ReplicaCronClient() + self.client = rpc_client.TransferCronClient() self.ctxt = mock.MagicMock() def test_register(self): diff --git a/coriolis/tests/replica_cron/rpc/test_server.py b/coriolis/tests/transfer_cron/rpc/test_server.py similarity index 90% rename from coriolis/tests/replica_cron/rpc/test_server.py rename to coriolis/tests/transfer_cron/rpc/test_server.py index 1fdb3ca3..a0270b46 100644 --- a/coriolis/tests/replica_cron/rpc/test_server.py +++ b/coriolis/tests/transfer_cron/rpc/test_server.py @@ -9,8 +9,8 @@ from coriolis.conductor.rpc import client as rpc_client from coriolis import exception -from coriolis.replica_cron.rpc import server from coriolis.tests import test_base +from coriolis.transfer_cron.rpc import server class TriggerReplicaTestCase(test_base.CoriolisBaseTestCase): @@ -19,7 +19,7 @@ class TriggerReplicaTestCase(test_base.CoriolisBaseTestCase): def test__trigger_replica(self): mock_conductor_client = mock.MagicMock() - mock_conductor_client.execute_replica_tasks.return_value = { + mock_conductor_client.execute_transfer_tasks.return_value = { 'id': mock.sentinel.id, 'action_id': mock.sentinel.action_id } @@ -29,20 +29,20 @@ def test__trigger_replica(self): mock_conductor_client, mock.sentinel.transfer_id, False) - mock_conductor_client.execute_replica_tasks.assert_called_once_with( + mock_conductor_client.execute_transfer_tasks.assert_called_once_with( mock.sentinel.ctxt, mock.sentinel.transfer_id, False) self.assertEqual( result, 'Execution %s for Replica %s' % ( mock.sentinel.id, mock.sentinel.action_id)) - def test__trigger_replica_invalid_replica_state(self): + def test__trigger_transfer_invalid_replica_state(self): mock_conductor_client = mock.MagicMock() - mock_conductor_client.execute_replica_tasks.side_effect = ( - exception.InvalidReplicaState(reason='test_reason')) + mock_conductor_client.execute_transfer_tasks.side_effect = ( + exception.InvalidTransferState(reason='test_reason')) - with self.assertLogs('coriolis.replica_cron.rpc.server', + with self.assertLogs('coriolis.transfer_cron.rpc.server', level=logging.INFO): server._trigger_replica( mock.sentinel.ctxt, @@ -131,7 +131,7 @@ def test__register_schedule_expired(self, mock_utcnow, 'shutdown_instance': 'test_schedule_shutdown_instance' } - with self.assertLogs('coriolis.replica_cron.rpc.server', + with self.assertLogs('coriolis.transfer_cron.rpc.server', level=logging.INFO): self.server._register_schedule(test_schedule) @@ -170,7 +170,7 @@ def test__init_cron_with_exception(self, mock_register_schedule, ] mock_register_schedule.side_effect = Exception('test_exception') - with self.assertLogs('coriolis.replica_cron.rpc.server', + with self.assertLogs('coriolis.transfer_cron.rpc.server', level=logging.ERROR): self.server._init_cron() @@ -180,14 +180,14 @@ def test__init_cron_with_exception(self, mock_register_schedule, mock.call({'id': 'schedule2'}, date=mock.ANY), ]) - @mock.patch.object(rpc_client.ConductorClient, 'get_replica_schedules') - def test__get_all_schedules(self, mock_get_replica_schedules): + @mock.patch.object(rpc_client.ConductorClient, 'get_transfer_schedules') + def test__get_all_schedules(self, mock_get_transfer_schedules): result = self.server._get_all_schedules() - mock_get_replica_schedules.assert_called_once_with( + mock_get_transfer_schedules.assert_called_once_with( self.server._admin_ctx, expired=False) - self.assertEqual(result, mock_get_replica_schedules.return_value) + self.assertEqual(result, mock_get_transfer_schedules.return_value) @mock.patch.object(server.ReplicaCronServerEndpoint, '_register_schedule') @mock.patch.object(server.timeutils, 'utcnow') diff --git a/coriolis/tests/transfer_cron/test_api.py b/coriolis/tests/transfer_cron/test_api.py new file mode 100644 index 00000000..94e57341 --- /dev/null +++ b/coriolis/tests/transfer_cron/test_api.py @@ -0,0 +1,69 @@ +# Copyright 2024 Cloudbase Solutions Srl +# All Rights Reserved. + +from unittest import mock + +from coriolis.tests import test_base +from coriolis.transfer_cron import api as transfers_cron_module + + +class APITestCase(test_base.CoriolisBaseTestCase): + """Test suite for the Coriolis API class.""" + + def setUp(self): + super(APITestCase, self).setUp() + self.api = transfers_cron_module.API() + self.rpc_client = mock.MagicMock() + self.api._rpc_client = self.rpc_client + self.ctxt = mock.sentinel.ctxt + self.transfer_id = mock.sentinel.transfer_id + self.schedule_id = mock.sentinel.schedule_id + + def test_create(self): + schedule = mock.sentinel.schedule + enabled = mock.sentinel.enabled + exp_date = mock.sentinel.exp_date + shutdown_instance = mock.sentinel.shutdown_instance + + result = self.api.create( + self.ctxt, self.transfer_id, schedule, enabled, exp_date, + shutdown_instance) + + self.rpc_client.create_transfer_schedule.assert_called_once_with( + self.ctxt, self.transfer_id, schedule, enabled, exp_date, + shutdown_instance) + self.assertEqual(result, + self.rpc_client.create_transfer_schedule.return_value) + + def test_get_schedules(self): + result = self.api.get_schedules(self.ctxt, self.transfer_id) + + self.rpc_client.get_transfer_schedules.assert_called_once_with( + self.ctxt, self.transfer_id, expired=True) + self.assertEqual(result, + self.rpc_client.get_transfer_schedules.return_value) + + def test_get_schedule(self): + result = self.api.get_schedule(self.ctxt, self.transfer_id, + self.schedule_id) + + self.rpc_client.get_transfer_schedule.assert_called_once_with( + self.ctxt, self.transfer_id, self.schedule_id, expired=True) + self.assertEqual(result, + self.rpc_client.get_transfer_schedule.return_value) + + def test_update(self): + update_values = mock.sentinel.update_values + + result = self.api.update(self.ctxt, self.transfer_id, self.schedule_id, + update_values) + + self.rpc_client.update_transfer_schedule.assert_called_once_with( + self.ctxt, self.transfer_id, self.schedule_id, update_values) + self.assertEqual(result, + self.rpc_client.update_transfer_schedule.return_value) + + def test_delete(self): + self.api.delete(self.ctxt, self.transfer_id, self.schedule_id) + self.rpc_client.delete_transfer_schedule.assert_called_once_with( + self.ctxt, self.transfer_id, self.schedule_id) diff --git a/coriolis/tests/worker/rpc/test_server.py b/coriolis/tests/worker/rpc/test_server.py index c5d30410..b38180cb 100644 --- a/coriolis/tests/worker/rpc/test_server.py +++ b/coriolis/tests/worker/rpc/test_server.py @@ -957,7 +957,7 @@ def test_validate_endpoint_source_environment( mock_get_provider.assert_called_once_with( mock.sentinel.source_platform_name, - constants.PROVIDER_TYPE_REPLICA_EXPORT, + constants.PROVIDER_TYPE_TRANSFER_EXPORT, None, ) mock_validate.assert_called_once_with( @@ -1140,11 +1140,11 @@ def call_validate_endpoint_connection(): "connection_info_schema" ), ( - constants.PROVIDER_TYPE_REPLICA_IMPORT, + constants.PROVIDER_TYPE_TRANSFER_IMPORT, "destination_environment_schema", ), ( - constants.PROVIDER_TYPE_REPLICA_EXPORT, + constants.PROVIDER_TYPE_TRANSFER_EXPORT, "source_environment_schema" ), ( diff --git a/coriolis/tests/replica_cron/__init__.py b/coriolis/transfer_cron/__init__.py similarity index 100% rename from coriolis/tests/replica_cron/__init__.py rename to coriolis/transfer_cron/__init__.py diff --git a/coriolis/replica_cron/api.py b/coriolis/transfer_cron/api.py similarity index 76% rename from coriolis/replica_cron/api.py rename to coriolis/transfer_cron/api.py index 42681473..6fc03730 100644 --- a/coriolis/replica_cron/api.py +++ b/coriolis/transfer_cron/api.py @@ -10,22 +10,22 @@ def __init__(self): def create(self, ctxt, replica_id, schedule, enabled, exp_date, shutdown_instance): - return self._rpc_client.create_replica_schedule( + return self._rpc_client.create_transfer_schedule( ctxt, replica_id, schedule, enabled, exp_date, shutdown_instance) def get_schedules(self, ctxt, replica_id, expired=True): - return self._rpc_client.get_replica_schedules( + return self._rpc_client.get_transfer_schedules( ctxt, replica_id, expired=expired) def get_schedule(self, ctxt, replica_id, schedule_id, expired=True): - return self._rpc_client.get_replica_schedule( + return self._rpc_client.get_transfer_schedule( ctxt, replica_id, schedule_id, expired=expired) def update(self, ctxt, replica_id, schedule_id, update_values): - return self._rpc_client.update_replica_schedule( + return self._rpc_client.update_transfer_schedule( ctxt, replica_id, schedule_id, update_values) def delete(self, ctxt, replica_id, schedule_id): - self._rpc_client.delete_replica_schedule( + self._rpc_client.delete_transfer_schedule( ctxt, replica_id, schedule_id) diff --git a/coriolis/tests/replica_cron/rpc/__init__.py b/coriolis/transfer_cron/rpc/__init__.py similarity index 100% rename from coriolis/tests/replica_cron/rpc/__init__.py rename to coriolis/transfer_cron/rpc/__init__.py diff --git a/coriolis/replica_cron/rpc/client.py b/coriolis/transfer_cron/rpc/client.py similarity index 75% rename from coriolis/replica_cron/rpc/client.py rename to coriolis/transfer_cron/rpc/client.py index 59379630..1483e239 100644 --- a/coriolis/replica_cron/rpc/client.py +++ b/coriolis/transfer_cron/rpc/client.py @@ -9,11 +9,11 @@ VERSION = "1.0" -class ReplicaCronClient(rpc.BaseRPCClient): - def __init__(self, topic=constants.REPLICA_CRON_MAIN_MESSAGING_TOPIC): +class TransferCronClient(rpc.BaseRPCClient): + def __init__(self, topic=constants.TRANSFER_CRON_MAIN_MESSAGING_TOPIC): target = messaging.Target( topic=topic, version=VERSION) - super(ReplicaCronClient, self).__init__(target) + super(TransferCronClient, self).__init__(target) def register(self, ctxt, schedule): self._call(ctxt, 'register', schedule=schedule) diff --git a/coriolis/replica_cron/rpc/server.py b/coriolis/transfer_cron/rpc/server.py similarity index 95% rename from coriolis/replica_cron/rpc/server.py rename to coriolis/transfer_cron/rpc/server.py index 1b2d6a3a..9b13a6ae 100644 --- a/coriolis/replica_cron/rpc/server.py +++ b/coriolis/transfer_cron/rpc/server.py @@ -19,12 +19,12 @@ def _trigger_replica(ctxt, conductor_client, replica_id, shutdown_instance): try: - execution = conductor_client.execute_replica_tasks( + execution = conductor_client.execute_transfer_tasks( ctxt, replica_id, shutdown_instance) result_msg = 'Execution %s for Replica %s' % ( execution.get('id'), execution.get('action_id')) return result_msg - except (exception.InvalidReplicaState, + except (exception.InvalidTransferState, exception.InvalidActionTasksExecutionState): LOG.info("A replica or migration already running") @@ -81,7 +81,7 @@ def _init_cron(self): self._cron.start() def _get_all_schedules(self): - schedules = self._rpc_client.get_replica_schedules( + schedules = self._rpc_client.get_transfer_schedules( self._admin_ctx, expired=False) return schedules diff --git a/coriolis/worker/rpc/server.py b/coriolis/worker/rpc/server.py index e96ace88..c463239a 100644 --- a/coriolis/worker/rpc/server.py +++ b/coriolis/worker/rpc/server.py @@ -513,7 +513,7 @@ def validate_endpoint_target_environment( def validate_endpoint_source_environment( self, ctxt, platform_name, source_env): provider = providers_factory.get_provider( - platform_name, constants.PROVIDER_TYPE_REPLICA_EXPORT, None) + platform_name, constants.PROVIDER_TYPE_TRANSFER_EXPORT, None) source_env_schema = provider.get_source_environment_schema() is_valid = True @@ -608,11 +608,11 @@ def get_provider_schemas(self, ctxt, platform_name, provider_type): schema = provider.get_connection_info_schema() schemas["connection_info_schema"] = schema - if provider_type == constants.PROVIDER_TYPE_REPLICA_IMPORT: + if provider_type == constants.PROVIDER_TYPE_TRANSFER_IMPORT: schema = provider.get_target_environment_schema() schemas["destination_environment_schema"] = schema - if provider_type == constants.PROVIDER_TYPE_REPLICA_EXPORT: + if provider_type == constants.PROVIDER_TYPE_TRANSFER_EXPORT: schema = provider.get_source_environment_schema() schemas["source_environment_schema"] = schema From ee490098ff2c55878ac5030303449c90a70ef5b4 Mon Sep 17 00:00:00 2001 From: Daniel Vincze Date: Tue, 19 Nov 2024 14:56:17 +0200 Subject: [PATCH 24/24] Refactor API layer --- coriolis/api/v1/deployments.py | 18 +- coriolis/api/v1/replica_tasks_executions.py | 74 ------ coriolis/api/v1/router.py | 58 ++--- ...replica_actions.py => transfer_actions.py} | 20 +- ...ica_schedules.py => transfer_schedules.py} | 52 ++-- ...py => transfer_tasks_execution_actions.py} | 20 +- coriolis/api/v1/transfer_tasks_executions.py | 74 ++++++ coriolis/api/v1/{replicas.py => transfers.py} | 154 ++++++------ coriolis/api/v1/views/deployment_view.py | 4 +- coriolis/api/v1/views/migration_view.py | 32 --- coriolis/api/v1/views/replica_view.py | 26 -- ...dule_view.py => transfer_schedule_view.py} | 0 ...ew.py => transfer_tasks_execution_view.py} | 6 +- coriolis/api/v1/views/transfer_view.py | 25 ++ .../cmd/{replica_cron.py => transfer_cron.py} | 2 +- coriolis/deployments/api.py | 10 +- coriolis/minion_manager/rpc/client.py | 10 +- coriolis/minion_manager/rpc/server.py | 34 +-- coriolis/policies/deployments.py | 4 +- coriolis/policies/migrations.py | 80 ------ coriolis/policies/replica_schedules.py | 80 ------ coriolis/policies/replica_tasks_executions.py | 83 ------- coriolis/policies/replicas.py | 92 ------- coriolis/policies/transfer_schedules.py | 80 ++++++ .../policies/transfer_tasks_executions.py | 83 +++++++ coriolis/policies/transfers.py | 92 +++++++ coriolis/policy.py | 11 +- coriolis/replica_tasks_executions/api.py | 29 --- coriolis/schemas.py | 2 +- .../disk_sync_resources_info_schema.json | 2 +- ...ema.json => transfer_schedule_schema.json} | 2 +- .../tests/api/v1/data/migration_create.yml | 28 --- .../api/v1/data/migration_validate_input.yml | 48 ---- ...ransfer_task_execution_actions_cancel.yml} | 0 ... transfers_get_merged_transfer_values.yml} | 8 +- ... => transfers_update_storage_mappings.yml} | 0 ...yml => transfers_validate_create_body.yml} | 4 +- ...yml => transfers_validate_update_body.yml} | 4 +- ...transfers_validate_update_body_raises.yml} | 6 +- coriolis/tests/api/v1/test_router.py | 57 ++--- ...ca_actions.py => test_transfer_actions.py} | 34 +-- ...chedules.py => test_transfer_schedules.py} | 138 ++++++----- ... test_transfer_tasks_execution_actions.py} | 25 +- ...s.py => test_transfer_tasks_executions.py} | 96 ++++---- .../{test_replicas.py => test_transfers.py} | 233 +++++++++--------- coriolis/tests/api/v1/views/__init__py | 0 .../tests/api/v1/views/test_migration_view.py | 96 -------- .../tests/api/v1/views/test_replica_view.py | 112 --------- ...view.py => test_transfer_schedule_view.py} | 8 +- ...y => test_transfer_task_execution_view.py} | 12 +- .../tests/api/v1/views/test_transfer_view.py | 112 +++++++++ coriolis/tests/cmd/test_replica_cron.py | 16 +- .../tests/minion_manager/rpc/test_client.py | 12 +- .../tests/transfer_cron/rpc/test_server.py | 50 ++-- .../transfer_tasks_executions}/__init__.py | 0 .../test_api.py | 4 +- .../{replicas => tests/transfers}/__init__.py | 0 .../tests/{replicas => transfers}/test_api.py | 12 +- coriolis/transfer_cron/api.py | 20 +- coriolis/transfer_cron/rpc/server.py | 12 +- .../__init__.py | 0 coriolis/transfer_tasks_executions/api.py | 29 +++ .../{tests/replicas => transfers}/__init__.py | 0 coriolis/{replicas => transfers}/api.py | 24 +- setup.cfg | 2 +- 65 files changed, 1096 insertions(+), 1365 deletions(-) delete mode 100644 coriolis/api/v1/replica_tasks_executions.py rename coriolis/api/v1/{replica_actions.py => transfer_actions.py} (50%) rename coriolis/api/v1/{replica_schedules.py => transfer_schedules.py} (69%) rename coriolis/api/v1/{replica_tasks_execution_actions.py => transfer_tasks_execution_actions.py} (50%) create mode 100644 coriolis/api/v1/transfer_tasks_executions.py rename coriolis/api/v1/{replicas.py => transfers.py} (72%) delete mode 100644 coriolis/api/v1/views/migration_view.py delete mode 100644 coriolis/api/v1/views/replica_view.py rename coriolis/api/v1/views/{replica_schedule_view.py => transfer_schedule_view.py} (100%) rename coriolis/api/v1/views/{replica_tasks_execution_view.py => transfer_tasks_execution_view.py} (82%) create mode 100644 coriolis/api/v1/views/transfer_view.py rename coriolis/cmd/{replica_cron.py => transfer_cron.py} (92%) delete mode 100644 coriolis/policies/migrations.py delete mode 100644 coriolis/policies/replica_schedules.py delete mode 100644 coriolis/policies/replica_tasks_executions.py delete mode 100644 coriolis/policies/replicas.py create mode 100644 coriolis/policies/transfer_schedules.py create mode 100644 coriolis/policies/transfer_tasks_executions.py create mode 100644 coriolis/policies/transfers.py delete mode 100644 coriolis/replica_tasks_executions/api.py rename coriolis/schemas/{replica_schedule_schema.json => transfer_schedule_schema.json} (93%) delete mode 100644 coriolis/tests/api/v1/data/migration_create.yml delete mode 100644 coriolis/tests/api/v1/data/migration_validate_input.yml rename coriolis/tests/api/v1/data/{replica_task_execution_actions_cancel.yml => transfer_task_execution_actions_cancel.yml} (100%) rename coriolis/tests/api/v1/data/{replicas_get_merged_replica_values.yml => transfers_get_merged_transfer_values.yml} (98%) rename coriolis/tests/api/v1/data/{replicas_update_storage_mappings.yml => transfers_update_storage_mappings.yml} (100%) rename coriolis/tests/api/v1/data/{replicas_validate_create_body.yml => transfers_validate_create_body.yml} (98%) rename coriolis/tests/api/v1/data/{replicas_validate_update_body.yml => transfers_validate_update_body.yml} (98%) rename coriolis/tests/api/v1/data/{replicas_validate_update_body_raises.yml => transfers_validate_update_body_raises.yml} (80%) rename coriolis/tests/api/v1/{test_replica_actions.py => test_transfer_actions.py} (70%) rename coriolis/tests/api/v1/{test_replica_schedules.py => test_transfer_schedules.py} (73%) rename coriolis/tests/api/v1/{test_replica_tasks_execution_actions.py => test_transfer_tasks_execution_actions.py} (57%) rename coriolis/tests/api/v1/{test_replica_tasks_executions.py => test_transfer_tasks_executions.py} (63%) rename coriolis/tests/api/v1/{test_replicas.py => test_transfers.py} (67%) delete mode 100644 coriolis/tests/api/v1/views/__init__py delete mode 100644 coriolis/tests/api/v1/views/test_migration_view.py delete mode 100644 coriolis/tests/api/v1/views/test_replica_view.py rename coriolis/tests/api/v1/views/{test_replica_schedule_view.py => test_transfer_schedule_view.py} (57%) rename coriolis/tests/api/v1/views/{test_replica_task_execution_view.py => test_transfer_task_execution_view.py} (88%) create mode 100644 coriolis/tests/api/v1/views/test_transfer_view.py rename coriolis/{replica_tasks_executions => tests/transfer_tasks_executions}/__init__.py (100%) rename coriolis/tests/{replica_tasks_executions => transfer_tasks_executions}/test_api.py (95%) rename coriolis/{replicas => tests/transfers}/__init__.py (100%) rename coriolis/tests/{replicas => transfers}/test_api.py (92%) rename coriolis/{tests/replica_tasks_executions => transfer_tasks_executions}/__init__.py (100%) create mode 100644 coriolis/transfer_tasks_executions/api.py rename coriolis/{tests/replicas => transfers}/__init__.py (100%) rename coriolis/{replicas => transfers}/api.py (63%) diff --git a/coriolis/api/v1/deployments.py b/coriolis/api/v1/deployments.py index 8b92743a..bc682d9f 100644 --- a/coriolis/api/v1/deployments.py +++ b/coriolis/api/v1/deployments.py @@ -65,13 +65,13 @@ def detail(self, req): def _validate_deployment_input(self, context, body): deployment = body["deployment"] - replica_id = deployment.get("replica_id", "") + transfer_id = deployment.get("transfer_id", "") - if not replica_id: + if not transfer_id: raise exc.HTTPBadRequest( - explanation="Missing 'replica_id' field from deployment " + explanation="Missing 'transfer_id' field from deployment " "body. A deployment can be created strictly " - "based on an existing Replica.") + "based on an existing Transfer.") clone_disks = deployment.get("clone_disks", True) force = deployment.get("force", False) @@ -84,7 +84,7 @@ def _validate_deployment_input(self, context, body): user_scripts, deployment.get("instances", [])) return ( - replica_id, force, clone_disks, skip_os_morphing, + transfer_id, force, clone_disks, skip_os_morphing, instance_osmorphing_minion_pool_mappings, user_scripts) @@ -92,15 +92,15 @@ def create(self, req, body): context = req.environ['coriolis.context'] context.can(deployment_policies.get_deployments_policy_label("create")) - (replica_id, force, clone_disks, skip_os_morphing, + (transfer_id, force, clone_disks, skip_os_morphing, instance_osmorphing_minion_pool_mappings, user_scripts) = self._validate_deployment_input( context, body) - # NOTE: destination environment for replica should have been + # NOTE: destination environment for transfer should have been # validated upon its creation. - deployment = self._deployment_api.deploy_replica_instances( - context, replica_id, instance_osmorphing_minion_pool_mappings, + deployment = self._deployment_api.deploy_transfer_instances( + context, transfer_id, instance_osmorphing_minion_pool_mappings, clone_disks, force, skip_os_morphing, user_scripts=user_scripts) diff --git a/coriolis/api/v1/replica_tasks_executions.py b/coriolis/api/v1/replica_tasks_executions.py deleted file mode 100644 index b755e486..00000000 --- a/coriolis/api/v1/replica_tasks_executions.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. - -from coriolis.api.v1.views import replica_tasks_execution_view -from coriolis.api import wsgi as api_wsgi -from coriolis import exception -from coriolis.policies import replica_tasks_executions as executions_policies -from coriolis.replica_tasks_executions import api - -from webob import exc - - -class ReplicaTasksExecutionController(api_wsgi.Controller): - def __init__(self): - self._replica_tasks_execution_api = api.API() - super(ReplicaTasksExecutionController, self).__init__() - - def show(self, req, replica_id, id): - context = req.environ["coriolis.context"] - context.can( - executions_policies.get_replica_executions_policy_label("show")) - execution = self._replica_tasks_execution_api.get_execution( - context, replica_id, id) - if not execution: - raise exc.HTTPNotFound() - - return replica_tasks_execution_view.single(execution) - - def index(self, req, replica_id): - context = req.environ["coriolis.context"] - context.can( - executions_policies.get_replica_executions_policy_label("list")) - - return replica_tasks_execution_view.collection( - self._replica_tasks_execution_api.get_executions( - context, replica_id, include_tasks=False)) - - def detail(self, req, replica_id): - context = req.environ["coriolis.context"] - context.can( - executions_policies.get_replica_executions_policy_label("show")) - - return replica_tasks_execution_view.collection( - self._replica_tasks_execution_api.get_executions( - context, replica_id, include_tasks=True)) - - def create(self, req, replica_id, body): - context = req.environ["coriolis.context"] - context.can( - executions_policies.get_replica_executions_policy_label("create")) - - # TODO(alexpilotti): validate body - - execution_body = body.get("execution", {}) - shutdown_instances = execution_body.get("shutdown_instances", False) - - return replica_tasks_execution_view.single( - self._replica_tasks_execution_api.create( - context, replica_id, shutdown_instances)) - - def delete(self, req, replica_id, id): - context = req.environ["coriolis.context"] - context.can( - executions_policies.get_replica_executions_policy_label("delete")) - - try: - self._replica_tasks_execution_api.delete(context, replica_id, id) - raise exc.HTTPNoContent() - except exception.NotFound as ex: - raise exc.HTTPNotFound(explanation=ex.msg) - - -def create_resource(): - return api_wsgi.Resource(ReplicaTasksExecutionController()) diff --git a/coriolis/api/v1/router.py b/coriolis/api/v1/router.py index 28a44f7d..35297957 100644 --- a/coriolis/api/v1/router.py +++ b/coriolis/api/v1/router.py @@ -21,12 +21,12 @@ from coriolis.api.v1 import provider_schemas from coriolis.api.v1 import providers from coriolis.api.v1 import regions -from coriolis.api.v1 import replica_actions -from coriolis.api.v1 import replica_schedules -from coriolis.api.v1 import replica_tasks_execution_actions -from coriolis.api.v1 import replica_tasks_executions -from coriolis.api.v1 import replicas from coriolis.api.v1 import services +from coriolis.api.v1 import transfer_actions +from coriolis.api.v1 import transfer_schedules +from coriolis.api.v1 import transfer_tasks_execution_actions +from coriolis.api.v1 import transfer_tasks_executions +from coriolis.api.v1 import transfers LOG = logging.getLogger(__name__) @@ -154,44 +154,46 @@ def _setup_routes(self, mapper, ext_mgr): action='action', conditions={'method': 'POST'}) - self.resources['replicas'] = replicas.create_resource() - mapper.resource('replica', 'replicas', - controller=self.resources['replicas'], + self.resources['transfers'] = transfers.create_resource() + mapper.resource('transfer', 'transfers', + controller=self.resources['transfers'], collection={'detail': 'GET'}, member={'action': 'POST'}) - replica_actions_resource = replica_actions.create_resource() - self.resources['replica_actions'] = replica_actions_resource - migration_path = '/{project_id}/replicas/{id}' - mapper.connect('replica_actions', + transfer_actions_resource = transfer_actions.create_resource() + self.resources['transfer_actions'] = transfer_actions_resource + migration_path = '/{project_id}/transfers/{id}' + mapper.connect('transfer_actions', migration_path + '/actions', - controller=self.resources['replica_actions'], + controller=self.resources['transfer_actions'], action='action', conditions={'method': 'POST'}) - self.resources['replica_tasks_executions'] = \ - replica_tasks_executions.create_resource() - mapper.resource('execution', 'replicas/{replica_id}/executions', - controller=self.resources['replica_tasks_executions'], + self.resources['transfer_tasks_executions'] = \ + transfer_tasks_executions.create_resource() + mapper.resource('execution', 'transfers/{transfer_id}/executions', + controller=self.resources['transfer_tasks_executions'], collection={'detail': 'GET'}, member={'action': 'POST'}) - replica_tasks_execution_actions_resource = \ - replica_tasks_execution_actions.create_resource() - self.resources['replica_tasks_execution_actions'] = \ - replica_tasks_execution_actions_resource - migration_path = '/{project_id}/replicas/{replica_id}/executions/{id}' - mapper.connect('replica_tasks_execution_actions', + transfer_tasks_execution_actions_resource = \ + transfer_tasks_execution_actions.create_resource() + self.resources['transfer_tasks_execution_actions'] = \ + transfer_tasks_execution_actions_resource + migration_path = ('/{project_id}/transfers/{transfer_id}/' + 'executions/{id}') + mapper.connect('transfer_tasks_execution_actions', migration_path + '/actions', controller=self.resources[ - 'replica_tasks_execution_actions'], + 'transfer_tasks_execution_actions'], action='action', conditions={'method': 'POST'}) - sched = replica_schedules.create_resource() - self.resources['replica_schedules'] = sched - mapper.resource('replica_schedule', 'replicas/{replica_id}/schedules', - controller=self.resources['replica_schedules'], + sched = transfer_schedules.create_resource() + self.resources['transfer_schedules'] = sched + mapper.resource('transfer_schedule', + 'transfers/{transfer_id}/schedules', + controller=self.resources['transfer_schedules'], collection={'index': 'GET'}, member={'action': 'POST'}) diff --git a/coriolis/api/v1/replica_actions.py b/coriolis/api/v1/transfer_actions.py similarity index 50% rename from coriolis/api/v1/replica_actions.py rename to coriolis/api/v1/transfer_actions.py index adb82a26..487e0099 100644 --- a/coriolis/api/v1/replica_actions.py +++ b/coriolis/api/v1/transfer_actions.py @@ -1,28 +1,28 @@ # Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. -from coriolis.api.v1.views import replica_tasks_execution_view +from coriolis.api.v1.views import transfer_tasks_execution_view from coriolis.api import wsgi as api_wsgi from coriolis import exception -from coriolis.policies import replicas as replica_policies -from coriolis.replicas import api +from coriolis.policies import transfers as transfer_policies +from coriolis.transfers import api from webob import exc -class ReplicaActionsController(api_wsgi.Controller): +class TransferActionsController(api_wsgi.Controller): def __init__(self): - self._replica_api = api.API() - super(ReplicaActionsController, self).__init__() + self._transfer_api = api.API() + super(TransferActionsController, self).__init__() @api_wsgi.action('delete-disks') def _delete_disks(self, req, id, body): context = req.environ['coriolis.context'] context.can( - replica_policies.get_replicas_policy_label("delete_disks")) + transfer_policies.get_transfers_policy_label("delete_disks")) try: - return replica_tasks_execution_view.single( - self._replica_api.delete_disks(context, id)) + return transfer_tasks_execution_view.single( + self._transfer_api.delete_disks(context, id)) except exception.NotFound as ex: raise exc.HTTPNotFound(explanation=ex.msg) except exception.InvalidParameterValue as ex: @@ -30,4 +30,4 @@ def _delete_disks(self, req, id, body): def create_resource(): - return api_wsgi.Resource(ReplicaActionsController()) + return api_wsgi.Resource(TransferActionsController()) diff --git a/coriolis/api/v1/replica_schedules.py b/coriolis/api/v1/transfer_schedules.py similarity index 69% rename from coriolis/api/v1/replica_schedules.py rename to coriolis/api/v1/transfer_schedules.py index 53edf7c3..2ea70ce9 100644 --- a/coriolis/api/v1/replica_schedules.py +++ b/coriolis/api/v1/transfer_schedules.py @@ -1,10 +1,10 @@ # Copyright 2017 Cloudbase Solutions Srl # All Rights Reserved. -from coriolis.api.v1.views import replica_schedule_view +from coriolis.api.v1.views import transfer_schedule_view from coriolis.api import wsgi as api_wsgi from coriolis import exception -from coriolis.policies import replica_schedules as schedules_policies +from coriolis.policies import transfer_schedules as schedules_policies from coriolis import schemas from coriolis.transfer_cron import api @@ -18,31 +18,31 @@ LOG = logging.getLogger(__name__) -class ReplicaScheduleController(api_wsgi.Controller): +class TransferScheduleController(api_wsgi.Controller): def __init__(self): self._schedule_api = api.API() - super(ReplicaScheduleController, self).__init__() + super(TransferScheduleController, self).__init__() - def show(self, req, replica_id, id): + def show(self, req, transfer_id, id): context = req.environ["coriolis.context"] context.can( - schedules_policies.get_replica_schedules_policy_label("show")) - schedule = self._schedule_api.get_schedule(context, replica_id, id) + schedules_policies.get_transfer_schedules_policy_label("show")) + schedule = self._schedule_api.get_schedule(context, transfer_id, id) if not schedule: raise exc.HTTPNotFound() - return replica_schedule_view.single(schedule) + return transfer_schedule_view.single(schedule) - def index(self, req, replica_id): + def index(self, req, transfer_id): context = req.environ["coriolis.context"] context.can( - schedules_policies.get_replica_schedules_policy_label("list")) + schedules_policies.get_transfer_schedules_policy_label("list")) show_expired = strutils.bool_from_string( req.GET.get("show_expired", True), strict=True) - return replica_schedule_view.collection( + return transfer_schedule_view.collection( self._schedule_api.get_schedules( - context, replica_id, expired=show_expired)) + context, transfer_id, expired=show_expired)) def _validate_schedule(self, schedule): schema = schemas.SCHEDULE_API_BODY_SCHEMA["properties"]["schedule"] @@ -100,45 +100,45 @@ def _validate_update_body(self, update_body): body["expiration_date"] = exp return body - def create(self, req, replica_id, body): + def create(self, req, transfer_id, body): context = req.environ["coriolis.context"] context.can( - schedules_policies.get_replica_schedules_policy_label("create")) + schedules_policies.get_transfer_schedules_policy_label("create")) - LOG.debug("Got request: %r %r %r" % (req, replica_id, body)) + LOG.debug("Got request: %r %r %r" % (req, transfer_id, body)) try: schedule, enabled, exp_date, shutdown = self._validate_create_body( body) except Exception as err: raise exception.InvalidInput(err) - return replica_schedule_view.single(self._schedule_api.create( - context, replica_id, schedule, enabled, exp_date, shutdown)) + return transfer_schedule_view.single(self._schedule_api.create( + context, transfer_id, schedule, enabled, exp_date, shutdown)) - def update(self, req, replica_id, id, body): + def update(self, req, transfer_id, id, body): context = req.environ["coriolis.context"] context.can( - schedules_policies.get_replica_schedules_policy_label("update")) + schedules_policies.get_transfer_schedules_policy_label("update")) LOG.debug("Got request: %r %r %r %r" % ( - req, replica_id, id, body)) + req, transfer_id, id, body)) try: update_values = self._validate_update_body(body) except Exception as err: raise exception.InvalidInput(err) - return replica_schedule_view.single(self._schedule_api.update( - context, replica_id, id, update_values)) + return transfer_schedule_view.single(self._schedule_api.update( + context, transfer_id, id, update_values)) - def delete(self, req, replica_id, id): + def delete(self, req, transfer_id, id): context = req.environ["coriolis.context"] context.can( - schedules_policies.get_replica_schedules_policy_label("delete")) + schedules_policies.get_transfer_schedules_policy_label("delete")) - self._schedule_api.delete(context, replica_id, id) + self._schedule_api.delete(context, transfer_id, id) raise exc.HTTPNoContent() def create_resource(): - return api_wsgi.Resource(ReplicaScheduleController()) + return api_wsgi.Resource(TransferScheduleController()) diff --git a/coriolis/api/v1/replica_tasks_execution_actions.py b/coriolis/api/v1/transfer_tasks_execution_actions.py similarity index 50% rename from coriolis/api/v1/replica_tasks_execution_actions.py rename to coriolis/api/v1/transfer_tasks_execution_actions.py index b7bcca85..27998046 100644 --- a/coriolis/api/v1/replica_tasks_execution_actions.py +++ b/coriolis/api/v1/transfer_tasks_execution_actions.py @@ -5,25 +5,25 @@ from coriolis.api import wsgi as api_wsgi from coriolis import exception -from coriolis.policies import replica_tasks_executions as execution_policies -from coriolis.replica_tasks_executions import api +from coriolis.policies import transfer_tasks_executions as execution_policies +from coriolis.transfer_tasks_executions import api -class ReplicaTasksExecutionActionsController(api_wsgi.Controller): +class TransferTasksExecutionActionsController(api_wsgi.Controller): def __init__(self): - self._replica_tasks_execution_api = api.API() - super(ReplicaTasksExecutionActionsController, self).__init__() + self._transfer_tasks_execution_api = api.API() + super(TransferTasksExecutionActionsController, self).__init__() @api_wsgi.action('cancel') - def _cancel(self, req, replica_id, id, body): + def _cancel(self, req, transfer_id, id, body): context = req.environ['coriolis.context'] context.can( - execution_policies.get_replica_executions_policy_label('cancel')) + execution_policies.get_transfer_executions_policy_label('cancel')) try: force = (body["cancel"] or {}).get("force", False) - self._replica_tasks_execution_api.cancel( - context, replica_id, id, force) + self._transfer_tasks_execution_api.cancel( + context, transfer_id, id, force) raise exc.HTTPNoContent() except exception.NotFound as ex: raise exc.HTTPNotFound(explanation=ex.msg) @@ -32,4 +32,4 @@ def _cancel(self, req, replica_id, id, body): def create_resource(): - return api_wsgi.Resource(ReplicaTasksExecutionActionsController()) + return api_wsgi.Resource(TransferTasksExecutionActionsController()) diff --git a/coriolis/api/v1/transfer_tasks_executions.py b/coriolis/api/v1/transfer_tasks_executions.py new file mode 100644 index 00000000..68f029e2 --- /dev/null +++ b/coriolis/api/v1/transfer_tasks_executions.py @@ -0,0 +1,74 @@ +# Copyright 2016 Cloudbase Solutions Srl +# All Rights Reserved. + +from coriolis.api.v1.views import transfer_tasks_execution_view +from coriolis.api import wsgi as api_wsgi +from coriolis import exception +from coriolis.policies import transfer_tasks_executions as executions_policies +from coriolis.transfer_tasks_executions import api + +from webob import exc + + +class TransferTasksExecutionController(api_wsgi.Controller): + def __init__(self): + self._transfer_tasks_execution_api = api.API() + super(TransferTasksExecutionController, self).__init__() + + def show(self, req, transfer_id, id): + context = req.environ["coriolis.context"] + context.can( + executions_policies.get_transfer_executions_policy_label("show")) + execution = self._transfer_tasks_execution_api.get_execution( + context, transfer_id, id) + if not execution: + raise exc.HTTPNotFound() + + return transfer_tasks_execution_view.single(execution) + + def index(self, req, transfer_id): + context = req.environ["coriolis.context"] + context.can( + executions_policies.get_transfer_executions_policy_label("list")) + + return transfer_tasks_execution_view.collection( + self._transfer_tasks_execution_api.get_executions( + context, transfer_id, include_tasks=False)) + + def detail(self, req, transfer_id): + context = req.environ["coriolis.context"] + context.can( + executions_policies.get_transfer_executions_policy_label("show")) + + return transfer_tasks_execution_view.collection( + self._transfer_tasks_execution_api.get_executions( + context, transfer_id, include_tasks=True)) + + def create(self, req, transfer_id, body): + context = req.environ["coriolis.context"] + context.can( + executions_policies.get_transfer_executions_policy_label("create")) + + # TODO(alexpilotti): validate body + + execution_body = body.get("execution", {}) + shutdown_instances = execution_body.get("shutdown_instances", False) + + return transfer_tasks_execution_view.single( + self._transfer_tasks_execution_api.create( + context, transfer_id, shutdown_instances)) + + def delete(self, req, transfer_id, id): + context = req.environ["coriolis.context"] + context.can( + executions_policies.get_transfer_executions_policy_label("delete")) + + try: + self._transfer_tasks_execution_api.delete(context, transfer_id, id) + raise exc.HTTPNoContent() + except exception.NotFound as ex: + raise exc.HTTPNotFound(explanation=ex.msg) + + +def create_resource(): + return api_wsgi.Resource(TransferTasksExecutionController()) diff --git a/coriolis/api/v1/replicas.py b/coriolis/api/v1/transfers.py similarity index 72% rename from coriolis/api/v1/replicas.py rename to coriolis/api/v1/transfers.py index 197d80e8..511cfd2d 100644 --- a/coriolis/api/v1/replicas.py +++ b/coriolis/api/v1/transfers.py @@ -2,61 +2,61 @@ # All Rights Reserved. from coriolis.api.v1 import utils as api_utils -from coriolis.api.v1.views import replica_tasks_execution_view -from coriolis.api.v1.views import replica_view +from coriolis.api.v1.views import transfer_tasks_execution_view +from coriolis.api.v1.views import transfer_view from coriolis.api import wsgi as api_wsgi from coriolis import constants from coriolis.endpoints import api as endpoints_api from coriolis import exception -from coriolis.policies import replicas as replica_policies -from coriolis.replicas import api +from coriolis.policies import transfers as transfer_policies +from coriolis.transfers import api from oslo_config import cfg as conf from oslo_log import log as logging from webob import exc -REPLICA_API_OPTS = [ - conf.BoolOpt("include_task_info_in_replicas_api", +TRANSFER_API_OPTS = [ + conf.BoolOpt("include_task_info_in_transfers_api", default=False, help="Whether or not to expose the internal 'info' field of " - "a Replica as part of a `GET` request.")] + "a Transfer as part of a `GET` request.")] CONF = conf.CONF -CONF.register_opts(REPLICA_API_OPTS, 'api') +CONF.register_opts(TRANSFER_API_OPTS, 'api') LOG = logging.getLogger(__name__) -SUPPORTED_REPLICA_SCENARIOS = [ +SUPPORTED_TRANSFER_SCENARIOS = [ constants.TRANSFER_SCENARIO_REPLICA, constants.TRANSFER_SCENARIO_LIVE_MIGRATION] -class ReplicaController(api_wsgi.Controller): +class TransferController(api_wsgi.Controller): def __init__(self): - self._replica_api = api.API() + self._transfer_api = api.API() self._endpoints_api = endpoints_api.API() - super(ReplicaController, self).__init__() + super(TransferController, self).__init__() def show(self, req, id): context = req.environ["coriolis.context"] - context.can(replica_policies.get_replicas_policy_label("show")) - replica = self._replica_api.get_replica( + context.can(transfer_policies.get_transfers_policy_label("show")) + transfer = self._transfer_api.get_transfer( context, id, - include_task_info=CONF.api.include_task_info_in_replicas_api) - if not replica: + include_task_info=CONF.api.include_task_info_in_transfers_api) + if not transfer: raise exc.HTTPNotFound() - return replica_view.single(replica) + return transfer_view.single(transfer) def _list(self, req): show_deleted = api_utils._get_show_deleted( req.GET.get("show_deleted", None)) context = req.environ["coriolis.context"] context.show_deleted = show_deleted - context.can(replica_policies.get_replicas_policy_label("list")) - include_task_info = CONF.api.include_task_info_in_replicas_api - return replica_view.collection( - self._replica_api.get_replicas( + context.can(transfer_policies.get_transfers_policy_label("list")) + include_task_info = CONF.api.include_task_info_in_transfers_api + return transfer_view.collection( + self._transfer_api.get_transfers( context, include_tasks_executions=include_task_info, include_task_info=include_task_info)) @@ -67,41 +67,41 @@ def index(self, req): def detail(self, req): return self._list(req) - @api_utils.format_keyerror_message(resource='replica', method='create') + @api_utils.format_keyerror_message(resource='transfer', method='create') def _validate_create_body(self, context, body): - replica = body["replica"] + transfer = body["transfer"] - scenario = replica.get("scenario", "") + scenario = transfer.get("scenario", "") if scenario: - if scenario not in SUPPORTED_REPLICA_SCENARIOS: + if scenario not in SUPPORTED_TRANSFER_SCENARIOS: raise exc.HTTPBadRequest( - explanation=f"Unsupported Replica creation scenario " + explanation=f"Unsupported Transfer creation scenario " f"'{scenario}', must be one of: " - f"{SUPPORTED_REPLICA_SCENARIOS}") + f"{SUPPORTED_TRANSFER_SCENARIOS}") else: scenario = constants.TRANSFER_SCENARIO_REPLICA LOG.warn( - "No Replica 'scenario' field set in Replica body, " + "No Transfer 'scenario' field set in Transfer body, " f"defaulting to: '{scenario}'") - origin_endpoint_id = replica["origin_endpoint_id"] - destination_endpoint_id = replica["destination_endpoint_id"] - destination_environment = replica.get( + origin_endpoint_id = transfer["origin_endpoint_id"] + destination_endpoint_id = transfer["destination_endpoint_id"] + destination_environment = transfer.get( "destination_environment", {}) instances = api_utils.validate_instances_list_for_transfer( - replica.get('instances')) + transfer.get('instances')) - notes = replica.get("notes") + notes = transfer.get("notes") - source_environment = replica.get("source_environment", {}) + source_environment = transfer.get("source_environment", {}) self._endpoints_api.validate_source_environment( context, origin_endpoint_id, source_environment) - origin_minion_pool_id = replica.get( + origin_minion_pool_id = transfer.get( 'origin_minion_pool_id') - destination_minion_pool_id = replica.get( + destination_minion_pool_id = transfer.get( 'destination_minion_pool_id') - instance_osmorphing_minion_pool_mappings = replica.get( + instance_osmorphing_minion_pool_mappings = transfer.get( 'instance_osmorphing_minion_pool_mappings', {}) extras = [ instance @@ -111,18 +111,18 @@ def _validate_create_body(self, context, body): raise ValueError( "One or more instance OSMorphing pool mappings were " "provided for instances (%s) which are not part of the " - "Replicas's declared instances (%s)" % (extras, instances)) + "Transfer's declared instances (%s)" % (extras, instances)) # TODO(aznashwan): until the provider plugin interface is updated # to have separate 'network_map' and 'storage_mappings' fields, # we add them as part of the destination environment: - network_map = replica.get("network_map", {}) + network_map = transfer.get("network_map", {}) api_utils.validate_network_map(network_map) destination_environment['network_map'] = network_map self._endpoints_api.validate_target_environment( context, destination_endpoint_id, destination_environment) - user_scripts = replica.get('user_scripts', {}) + user_scripts = transfer.get('user_scripts', {}) api_utils.validate_user_scripts(user_scripts) user_scripts = api_utils.normalize_user_scripts( user_scripts, instances) @@ -131,7 +131,7 @@ def _validate_create_body(self, context, body): # import provider before appending the 'storage_mappings' parameter # for plugins with strict property name checks which do not yet # support storage mapping features: - storage_mappings = replica.get("storage_mappings", {}) + storage_mappings = transfer.get("storage_mappings", {}) api_utils.validate_storage_mappings(storage_mappings) destination_environment['storage_mappings'] = storage_mappings @@ -144,7 +144,7 @@ def _validate_create_body(self, context, body): def create(self, req, body): context = req.environ["coriolis.context"] - context.can(replica_policies.get_replicas_policy_label("create")) + context.can(transfer_policies.get_transfers_policy_label("create")) (scenario, origin_endpoint_id, destination_endpoint_id, source_environment, destination_environment, instances, network_map, @@ -153,7 +153,7 @@ def create(self, req, body): instance_osmorphing_minion_pool_mappings, user_scripts) = ( self._validate_create_body(context, body)) - return replica_view.single(self._replica_api.create( + return transfer_view.single(self._transfer_api.create( context, scenario, origin_endpoint_id, destination_endpoint_id, origin_minion_pool_id, destination_minion_pool_id, instance_osmorphing_minion_pool_mappings, source_environment, @@ -162,9 +162,9 @@ def create(self, req, body): def delete(self, req, id): context = req.environ["coriolis.context"] - context.can(replica_policies.get_replicas_policy_label("delete")) + context.can(transfer_policies.get_transfers_policy_label("delete")) try: - self._replica_api.delete(context, id) + self._transfer_api.delete(context, id) raise exc.HTTPNoContent() except exception.NotFound as ex: raise exc.HTTPNotFound(explanation=ex.msg) @@ -234,8 +234,8 @@ def _get_updated_user_scripts(original_user_scripts, new_user_scripts): return user_scripts - def _get_merged_replica_values(self, replica, updated_values): - """ Looks for the following keys in the original replica body and + def _get_merged_transfer_values(self, transfer, updated_values): + """ Looks for the following keys in the original transfer body and updated values (preferring the updated values where needed, but using `.update()` on dicts): "source_environment", "destination_environment", "network_map", "notes" @@ -249,9 +249,9 @@ def _get_merged_replica_values(self, replica, updated_values): for option in [ "source_environment", "destination_environment", "network_map"]: - before = replica.get(option) + before = transfer.get(option) after = updated_values.get(option) - # NOTE: for Replicas created before the separation of these fields + # NOTE: for Transfers created before the separation of these fields # in the DB there is the chance that some of these may be NULL: if before is None: before = {} @@ -261,7 +261,7 @@ def _get_merged_replica_values(self, replica, updated_values): final_values[option] = before - original_storage_mappings = replica.get('storage_mappings') + original_storage_mappings = transfer.get('storage_mappings') if original_storage_mappings is None: original_storage_mappings = {} new_storage_mappings = updated_values.get('storage_mappings') @@ -271,7 +271,7 @@ def _get_merged_replica_values(self, replica, updated_values): original_storage_mappings, new_storage_mappings) original_user_scripts = api_utils.validate_user_scripts( - replica.get('user_scripts', {})) + transfer.get('user_scripts', {})) new_user_scripts = api_utils.validate_user_scripts( updated_values.get('user_scripts', {})) final_values['user_scripts'] = self._get_updated_user_scripts( @@ -280,7 +280,7 @@ def _get_merged_replica_values(self, replica, updated_values): if 'notes' in updated_values: final_values['notes'] = updated_values.get('notes', '') else: - final_values['notes'] = replica.get('notes', '') + final_values['notes'] = transfer.get('notes', '') # NOTE: until the provider plugin interface is updated # to have separate 'network_map' and 'storage_mappings' fields, @@ -304,48 +304,48 @@ def _get_merged_replica_values(self, replica, updated_values): return final_values - @api_utils.format_keyerror_message(resource='replica', method='update') + @api_utils.format_keyerror_message(resource='transfer', method='update') def _validate_update_body(self, id, context, body): - replica = self._replica_api.get_replica(context, id) + transfer = self._transfer_api.get_transfer(context, id) scenario = body.get("scenario", "") - if scenario and scenario != replica["scenario"]: + if scenario and scenario != transfer["scenario"]: raise exc.HTTPBadRequest( - explanation=f"Changing Replica creation scenario is not " + explanation=f"Changing Transfer creation scenario is not " f"supported (original scenario is " - f"{replica['scenario']}, received '{scenario}')") + f"{transfer['scenario']}, received '{scenario}')") - replica_body = body['replica'] - origin_endpoint_id = replica_body.get('origin_endpoint_id', None) - destination_endpoint_id = replica_body.get( + transfer_body = body['transfer'] + origin_endpoint_id = transfer_body.get('origin_endpoint_id', None) + destination_endpoint_id = transfer_body.get( 'destination_endpoint_id', None) - instances = body['replica'].get('instances', None) + instances = body['transfer'].get('instances', None) if origin_endpoint_id or destination_endpoint_id: raise exc.HTTPBadRequest( explanation="The source or destination endpoints for a " - "Coriolis Replica cannot be updated after its " + "Coriolis Transfer cannot be updated after its " "creation. If the credentials of any of the " - "Replica's endpoints need updating, please update " - "the endpoints themselves.") + "Transfer's endpoints need updating, please " + "update the endpoints themselves.") if instances: raise exc.HTTPBadRequest( - explanation="The list of instances of a Replica cannot be " + explanation="The list of instances of a Transfer cannot be " "updated") - merged_body = self._get_merged_replica_values( - replica, replica_body) + merged_body = self._get_merged_transfer_values( + transfer, transfer_body) - replica_origin_endpoint_id = replica["origin_endpoint_id"] - replica_destination_endpoint_id = replica[ + transfer_origin_endpoint_id = transfer["origin_endpoint_id"] + transfer_destination_endpoint_id = transfer[ "destination_endpoint_id"] self._endpoints_api.validate_source_environment( - context, replica_origin_endpoint_id, + context, transfer_origin_endpoint_id, merged_body["source_environment"]) destination_environment = merged_body["destination_environment"] self._endpoints_api.validate_target_environment( - context, replica_destination_endpoint_id, + context, transfer_destination_endpoint_id, destination_environment) api_utils.validate_network_map(merged_body["network_map"]) @@ -356,19 +356,19 @@ def _validate_update_body(self, id, context, body): user_scripts = merged_body['user_scripts'] api_utils.validate_user_scripts(user_scripts) merged_body['user_scripts'] = api_utils.normalize_user_scripts( - user_scripts, replica.get('instances', [])) + user_scripts, transfer.get('instances', [])) return merged_body def update(self, req, id, body): context = req.environ["coriolis.context"] - context.can(replica_policies.get_replicas_policy_label("update")) + context.can(transfer_policies.get_transfers_policy_label("update")) updated_values = self._validate_update_body(id, context, body) try: - return replica_tasks_execution_view.single( - self._replica_api.update(req.environ['coriolis.context'], - id, updated_values)) + return transfer_tasks_execution_view.single( + self._transfer_api.update(req.environ['coriolis.context'], + id, updated_values)) except exception.NotFound as ex: raise exc.HTTPNotFound(explanation=ex.msg) except exception.InvalidParameterValue as ex: @@ -376,4 +376,4 @@ def update(self, req, id, body): def create_resource(): - return api_wsgi.Resource(ReplicaController()) + return api_wsgi.Resource(TransferController()) diff --git a/coriolis/api/v1/views/deployment_view.py b/coriolis/api/v1/views/deployment_view.py index fb4186f3..1a32f562 100644 --- a/coriolis/api/v1/views/deployment_view.py +++ b/coriolis/api/v1/views/deployment_view.py @@ -1,7 +1,7 @@ # Copyright 2024 Cloudbase Solutions Srl # All Rights Reserved. -from coriolis.api.v1.views import replica_tasks_execution_view as view +from coriolis.api.v1.views import transfer_tasks_execution_view as view from coriolis.api.v1.views import utils as view_utils @@ -9,7 +9,7 @@ def _format_deployment(deployment, keys=None): deployment_dict = view_utils.format_opt(deployment, keys) if len(deployment_dict.get("executions", [])): - execution = view.format_replica_tasks_execution( + execution = view.format_transfer_tasks_execution( deployment_dict["executions"][0], keys) del deployment_dict["executions"] else: diff --git a/coriolis/api/v1/views/migration_view.py b/coriolis/api/v1/views/migration_view.py deleted file mode 100644 index f130a3df..00000000 --- a/coriolis/api/v1/views/migration_view.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. - -from coriolis.api.v1.views import replica_tasks_execution_view as view -from coriolis.api.v1.views import utils as view_utils - - -def _format_migration(migration, keys=None): - migration_dict = view_utils.format_opt(migration, keys) - - if len(migration_dict.get("executions", [])): - execution = view.format_replica_tasks_execution( - migration_dict["executions"][0], keys) - del migration_dict["executions"] - else: - execution = {} - - tasks = execution.get("tasks") - if tasks: - migration_dict["tasks"] = tasks - - return migration_dict - - -def single(migration, keys=None): - return {"migration": _format_migration(migration, keys)} - - -def collection(migrations, keys=None): - formatted_migrations = [_format_migration(m, keys) - for m in migrations] - return {'migrations': formatted_migrations} diff --git a/coriolis/api/v1/views/replica_view.py b/coriolis/api/v1/views/replica_view.py deleted file mode 100644 index abc38f2d..00000000 --- a/coriolis/api/v1/views/replica_view.py +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. - -from coriolis.api.v1.views import replica_tasks_execution_view as view -from coriolis.api.v1.views import utils as view_utils - - -def _format_replica(replica, keys=None): - replica_dict = view_utils.format_opt(replica, keys) - - executions = replica_dict.get('executions', []) - replica_dict['executions'] = [ - view.format_replica_tasks_execution(ex) - for ex in executions] - - return replica_dict - - -def single(replica, keys=None): - return {"replica": _format_replica(replica, keys)} - - -def collection(replicas, keys=None): - formatted_replicas = [_format_replica(m, keys) - for m in replicas] - return {'replicas': formatted_replicas} diff --git a/coriolis/api/v1/views/replica_schedule_view.py b/coriolis/api/v1/views/transfer_schedule_view.py similarity index 100% rename from coriolis/api/v1/views/replica_schedule_view.py rename to coriolis/api/v1/views/transfer_schedule_view.py diff --git a/coriolis/api/v1/views/replica_tasks_execution_view.py b/coriolis/api/v1/views/transfer_tasks_execution_view.py similarity index 82% rename from coriolis/api/v1/views/replica_tasks_execution_view.py rename to coriolis/api/v1/views/transfer_tasks_execution_view.py index 96359a18..3873d187 100644 --- a/coriolis/api/v1/views/replica_tasks_execution_view.py +++ b/coriolis/api/v1/views/transfer_tasks_execution_view.py @@ -25,7 +25,7 @@ def _sort_tasks(tasks, filter_error_only_tasks=True): tasks, key=lambda t: t.get('index', 0)) -def format_replica_tasks_execution(execution, keys=None): +def format_transfer_tasks_execution(execution, keys=None): if "tasks" in execution: execution["tasks"] = _sort_tasks(execution["tasks"]) @@ -35,10 +35,10 @@ def format_replica_tasks_execution(execution, keys=None): def single(execution, keys=None): - return {"execution": format_replica_tasks_execution(execution, keys)} + return {"execution": format_transfer_tasks_execution(execution, keys)} def collection(executions, keys=None): - formatted_executions = [format_replica_tasks_execution(m, keys) + formatted_executions = [format_transfer_tasks_execution(m, keys) for m in executions] return {'executions': formatted_executions} diff --git a/coriolis/api/v1/views/transfer_view.py b/coriolis/api/v1/views/transfer_view.py new file mode 100644 index 00000000..2fcefe4f --- /dev/null +++ b/coriolis/api/v1/views/transfer_view.py @@ -0,0 +1,25 @@ +# Copyright 2016 Cloudbase Solutions Srl +# All Rights Reserved. + +from coriolis.api.v1.views import transfer_tasks_execution_view as view +from coriolis.api.v1.views import utils as view_utils + + +def _format_transfer(transfer, keys=None): + transfer_dict = view_utils.format_opt(transfer, keys) + + executions = transfer_dict.get('executions', []) + transfer_dict['executions'] = [ + view.format_transfer_tasks_execution(ex) + for ex in executions] + + return transfer_dict + + +def single(transfer, keys=None): + return {"transfer": _format_transfer(transfer, keys)} + + +def collection(transfers, keys=None): + formatted_transfers = [_format_transfer(t, keys) for t in transfers] + return {'transfers': formatted_transfers} diff --git a/coriolis/cmd/replica_cron.py b/coriolis/cmd/transfer_cron.py similarity index 92% rename from coriolis/cmd/replica_cron.py rename to coriolis/cmd/transfer_cron.py index 80eafc5b..3605389c 100644 --- a/coriolis/cmd/replica_cron.py +++ b/coriolis/cmd/transfer_cron.py @@ -20,7 +20,7 @@ def main(): server = service.MessagingService( constants.TRANSFER_CRON_MAIN_MESSAGING_TOPIC, - [rpc_server.ReplicaCronServerEndpoint()], + [rpc_server.TransferCronServerEndpoint()], rpc_server.VERSION, worker_count=1) launcher = service.service.launch( CONF, server, workers=server.get_workers_count()) diff --git a/coriolis/deployments/api.py b/coriolis/deployments/api.py index dbb4fe95..8cc21eec 100644 --- a/coriolis/deployments/api.py +++ b/coriolis/deployments/api.py @@ -8,12 +8,12 @@ class API(object): def __init__(self): self._rpc_client = rpc_client.ConductorClient() - def deploy_replica_instances(self, ctxt, replica_id, - instance_osmorphing_minion_pool_mappings, - clone_disks=False, force=False, - skip_os_morphing=False, user_scripts=None): + def deploy_transfer_instances(self, ctxt, transfer_id, + instance_osmorphing_minion_pool_mappings, + clone_disks=False, force=False, + skip_os_morphing=False, user_scripts=None): return self._rpc_client.deploy_transfer_instances( - ctxt, replica_id, instance_osmorphing_minion_pool_mappings=( + ctxt, transfer_id, instance_osmorphing_minion_pool_mappings=( instance_osmorphing_minion_pool_mappings), clone_disks=clone_disks, force=force, skip_os_morphing=skip_os_morphing, diff --git a/coriolis/minion_manager/rpc/client.py b/coriolis/minion_manager/rpc/client.py index 1de89e2d..5253dc45 100644 --- a/coriolis/minion_manager/rpc/client.py +++ b/coriolis/minion_manager/rpc/client.py @@ -68,16 +68,16 @@ def validate_minion_pool_selections_for_action(self, ctxt, action): action=action) def allocate_minion_machines_for_transfer( - self, ctxt, replica): + self, ctxt, transfer): return self._cast( - ctxt, 'allocate_minion_machines_for_replica', replica=replica) + ctxt, 'allocate_minion_machines_for_transfer', transfer=transfer) def allocate_minion_machines_for_deployment( - self, ctxt, migration, include_transfer_minions=True, + self, ctxt, deployment, include_transfer_minions=True, include_osmorphing_minions=True): return self._cast( - ctxt, 'allocate_minion_machines_for_migration', - migration=migration, + ctxt, 'allocate_minion_machines_for_deployment', + deployment=deployment, include_transfer_minions=include_transfer_minions, include_osmorphing_minions=include_osmorphing_minions) diff --git a/coriolis/minion_manager/rpc/server.py b/coriolis/minion_manager/rpc/server.py index 761a10a1..3d74552f 100644 --- a/coriolis/minion_manager/rpc/server.py +++ b/coriolis/minion_manager/rpc/server.py @@ -67,7 +67,7 @@ def __init__(self): self._scheduler_client_instance = None self._worker_client_instance = None self._conductor_client_instance = None - self._replica_cron_client_instance = None + self._transfer_cron_client_instance = None self._minion_manager_client_instance = None try: self._cron = cron.Cron() @@ -510,53 +510,53 @@ def _check_pool_minion_count( "Successfully validated minion pool selections for action '%s' " "with properties: %s", action['id'], action) - def allocate_minion_machines_for_replica( - self, ctxt, replica): + def allocate_minion_machines_for_transfer( + self, ctxt, transfer): try: self._run_machine_allocation_subflow_for_action( - ctxt, replica, + ctxt, transfer, constants.TRANSFER_ACTION_TYPE_TRANSFER, include_transfer_minions=True, include_osmorphing_minions=False) except Exception as ex: LOG.warn( "Error occurred while allocating minion machines for " - "Replica with ID '%s'. Removing all allocations. " + "Transfer with ID '%s'. Removing all allocations. " "Error was: %s" % ( - replica['id'], utils.get_exception_details())) + transfer['id'], utils.get_exception_details())) self._cleanup_machines_with_statuses_for_action( - ctxt, replica['id'], + ctxt, transfer['id'], [constants.MINION_MACHINE_STATUS_UNINITIALIZED]) self.deallocate_minion_machines_for_action( - ctxt, replica['id']) + ctxt, transfer['id']) (self._rpc_conductor_client .report_transfer_minions_allocation_error( - ctxt, replica['id'], str(ex))) + ctxt, transfer['id'], str(ex))) raise - def allocate_minion_machines_for_migration( - self, ctxt, migration, include_transfer_minions=True, + def allocate_minion_machines_for_deployment( + self, ctxt, deployment, include_transfer_minions=True, include_osmorphing_minions=True): try: self._run_machine_allocation_subflow_for_action( - ctxt, migration, + ctxt, deployment, constants.TRANSFER_ACTION_TYPE_DEPLOYMENT, include_transfer_minions=include_transfer_minions, include_osmorphing_minions=include_osmorphing_minions) except Exception as ex: LOG.warn( "Error occurred while allocating minion machines for " - "Migration with ID '%s'. Removing all allocations. " + "Deployment with ID '%s'. Removing all allocations. " "Error was: %s" % ( - migration['id'], utils.get_exception_details())) + deployment['id'], utils.get_exception_details())) self._cleanup_machines_with_statuses_for_action( - ctxt, migration['id'], + ctxt, deployment['id'], [constants.MINION_MACHINE_STATUS_UNINITIALIZED]) self.deallocate_minion_machines_for_action( - ctxt, migration['id']) + ctxt, deployment['id']) (self._rpc_conductor_client .report_deployment_minions_allocation_error( - ctxt, migration['id'], str(ex))) + ctxt, deployment['id'], str(ex))) raise def _make_minion_machine_allocation_subflow_for_action( diff --git a/coriolis/policies/deployments.py b/coriolis/policies/deployments.py index 77e20bb0..a24c9d41 100644 --- a/coriolis/policies/deployments.py +++ b/coriolis/policies/deployments.py @@ -54,7 +54,7 @@ def get_deployments_policy_label(rule_label): policy.DocumentedRuleDefault( get_deployments_policy_label('cancel'), DEPLOYMENTS_POLICY_DEFAULT_RULE, - "Cancel a running Migration", + "Cancel a running Deployment", [ { "path": "/deployments/{deployment_id}/actions/", @@ -65,7 +65,7 @@ def get_deployments_policy_label(rule_label): policy.DocumentedRuleDefault( get_deployments_policy_label('delete'), DEPLOYMENTS_POLICY_DEFAULT_RULE, - "Delete Migration", + "Delete Deployment", [ { "path": "/deployment/{deployment_id}", diff --git a/coriolis/policies/migrations.py b/coriolis/policies/migrations.py deleted file mode 100644 index f1570b0b..00000000 --- a/coriolis/policies/migrations.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2018 Cloudbase Solutions Srl -# All Rights Reserved. - -from oslo_policy import policy - -from coriolis.policies import base - - -MIGRATIONS_POLICY_PREFIX = "%s:migrations" % base.CORIOLIS_POLICIES_PREFIX -MIGRATIONS_POLICY_DEFAULT_RULE = "rule:admin_or_owner" - - -def get_migrations_policy_label(rule_label): - return "%s:%s" % ( - MIGRATIONS_POLICY_PREFIX, rule_label) - - -MIGRATIONS_POLICY_DEFAULT_RULES = [ - policy.DocumentedRuleDefault( - get_migrations_policy_label('create'), - MIGRATIONS_POLICY_DEFAULT_RULE, - "Create a migration", - [ - { - "path": "/migrations", - "method": "POST" - } - ] - ), - policy.DocumentedRuleDefault( - get_migrations_policy_label('list'), - MIGRATIONS_POLICY_DEFAULT_RULE, - "List migrations", - [ - { - "path": "/migrations", - "method": "GET" - } - ] - ), - policy.DocumentedRuleDefault( - get_migrations_policy_label('show'), - MIGRATIONS_POLICY_DEFAULT_RULE, - "Show details for a migration", - [ - { - "path": "/migrations/{migration_id}", - "method": "GET" - } - ] - ), - # TODO(aznashwan): migration actions should ideally be - # declared in a separate module - policy.DocumentedRuleDefault( - get_migrations_policy_label('cancel'), - MIGRATIONS_POLICY_DEFAULT_RULE, - "Cancel a running Migration", - [ - { - "path": "/migrations/{migration_id}/actions", - "method": "POST" - } - ] - ), - policy.DocumentedRuleDefault( - get_migrations_policy_label('delete'), - MIGRATIONS_POLICY_DEFAULT_RULE, - "Delete Migration", - [ - { - "path": "/migrations/{migration_id}", - "method": "DELETE" - } - ] - ) -] - - -def list_rules(): - return MIGRATIONS_POLICY_DEFAULT_RULES diff --git a/coriolis/policies/replica_schedules.py b/coriolis/policies/replica_schedules.py deleted file mode 100644 index 43088383..00000000 --- a/coriolis/policies/replica_schedules.py +++ /dev/null @@ -1,80 +0,0 @@ -# Copyright 2018 Cloudbase Solutions Srl -# All Rights Reserved. - -from oslo_policy import policy - -from coriolis.policies import base - - -REPLICA_SCHEDULES_POLICY_PREFIX = "%s:replica_schedules" % ( - base.CORIOLIS_POLICIES_PREFIX) -REPLICA_SCHEDULES_POLICY_DEFAULT_RULE = "rule:admin_or_owner" - - -def get_replica_schedules_policy_label(rule_label): - return "%s:%s" % ( - REPLICA_SCHEDULES_POLICY_PREFIX, rule_label) - - -REPLICA_SCHEDULES_POLICY_DEFAULT_RULES = [ - policy.DocumentedRuleDefault( - get_replica_schedules_policy_label('create'), - REPLICA_SCHEDULES_POLICY_DEFAULT_RULE, - "Create a new execution schedule for a given Replica", - [ - { - "path": "/replicas/{replica_id}/schedules", - "method": "POST" - } - ] - ), - policy.DocumentedRuleDefault( - get_replica_schedules_policy_label('list'), - REPLICA_SCHEDULES_POLICY_DEFAULT_RULE, - "List execution schedules for a given Replica", - [ - { - "path": "/replicas/{replica_id}/schedules", - "method": "GET" - } - ] - ), - policy.DocumentedRuleDefault( - get_replica_schedules_policy_label('show'), - REPLICA_SCHEDULES_POLICY_DEFAULT_RULE, - "Show details for an execution schedule for a given Replica", - [ - { - "path": "/replicas/{replica_id}/schedules/{schedule_id}", - "method": "GET" - } - ] - ), - policy.DocumentedRuleDefault( - get_replica_schedules_policy_label('update'), - REPLICA_SCHEDULES_POLICY_DEFAULT_RULE, - "Update an existing execution schedule for a given Replica", - [ - { - "path": ( - "/replicas/{replica_id}/schedules/{schedule_id}"), - "method": "PUT" - } - ] - ), - policy.DocumentedRuleDefault( - get_replica_schedules_policy_label('delete'), - REPLICA_SCHEDULES_POLICY_DEFAULT_RULE, - "Delete an execution schedule for a given Replica", - [ - { - "path": "/replicas/{replica_id}/schedules/{schedule_id}", - "method": "DELETE" - } - ] - ) -] - - -def list_rules(): - return REPLICA_SCHEDULES_POLICY_DEFAULT_RULES diff --git a/coriolis/policies/replica_tasks_executions.py b/coriolis/policies/replica_tasks_executions.py deleted file mode 100644 index f30a299b..00000000 --- a/coriolis/policies/replica_tasks_executions.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2018 Cloudbase Solutions Srl -# All Rights Reserved. - -from oslo_policy import policy - -from coriolis.policies import base - - -REPLICA_EXECUTIONS_POLICY_PREFIX = "%s:replica_executions" % ( - base.CORIOLIS_POLICIES_PREFIX) -REPLICA_EXECUTIONS_POLICY_DEFAULT_RULE = "rule:admin_or_owner" - - -def get_replica_executions_policy_label(rule_label): - return "%s:%s" % ( - REPLICA_EXECUTIONS_POLICY_PREFIX, rule_label) - - -REPLICA_EXECUTIONS_POLICY_DEFAULT_RULES = [ - policy.DocumentedRuleDefault( - get_replica_executions_policy_label('create'), - REPLICA_EXECUTIONS_POLICY_DEFAULT_RULE, - "Create a new execution for a given Replica", - [ - { - "path": "/replicas/{replica_id}/executions", - "method": "POST" - } - ] - ), - policy.DocumentedRuleDefault( - get_replica_executions_policy_label('list'), - REPLICA_EXECUTIONS_POLICY_DEFAULT_RULE, - "List Executions for a given Replica", - [ - { - "path": "/replicas/{replica_id}/executions", - "method": "GET" - } - ] - ), - policy.DocumentedRuleDefault( - get_replica_executions_policy_label('show'), - REPLICA_EXECUTIONS_POLICY_DEFAULT_RULE, - "Show details for Replica execution", - [ - { - "path": "/replicas/{replica_id}/executions/{execution_id}", - "method": "GET" - } - ] - ), - # TODO(aznashwan): replica actions should ideally be - # declared in a separate module - policy.DocumentedRuleDefault( - get_replica_executions_policy_label('cancel'), - REPLICA_EXECUTIONS_POLICY_DEFAULT_RULE, - "Cancel a Replica execution", - [ - { - "path": ( - "/replicas/{replica_id}/executions/" - "{execution_id}/actions"), - "method": "POST" - } - ] - ), - policy.DocumentedRuleDefault( - get_replica_executions_policy_label('delete'), - REPLICA_EXECUTIONS_POLICY_DEFAULT_RULE, - "Delete an execution for a given Replica", - [ - { - "path": "/replicas/{replica_id}/executions/{execution_id}", - "method": "DELETE" - } - ] - ) -] - - -def list_rules(): - return REPLICA_EXECUTIONS_POLICY_DEFAULT_RULES diff --git a/coriolis/policies/replicas.py b/coriolis/policies/replicas.py deleted file mode 100644 index a48f517d..00000000 --- a/coriolis/policies/replicas.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2018 Cloudbase Solutions Srl -# All Rights Reserved. - -from oslo_policy import policy - -from coriolis.policies import base - - -REPLICAS_POLICY_PREFIX = "%s:replicas" % base.CORIOLIS_POLICIES_PREFIX -REPLICAS_POLICY_DEFAULT_RULE = "rule:admin_or_owner" - - -def get_replicas_policy_label(rule_label): - return "%s:%s" % ( - REPLICAS_POLICY_PREFIX, rule_label) - - -REPLICAS_POLICY_DEFAULT_RULES = [ - policy.DocumentedRuleDefault( - get_replicas_policy_label('create'), - REPLICAS_POLICY_DEFAULT_RULE, - "Create a Replica", - [ - { - "path": "/replicas", - "method": "POST" - } - ] - ), - policy.DocumentedRuleDefault( - get_replicas_policy_label('list'), - REPLICAS_POLICY_DEFAULT_RULE, - "List Replicas", - [ - { - "path": "/replicas", - "method": "GET" - } - ] - ), - policy.DocumentedRuleDefault( - get_replicas_policy_label('show'), - REPLICAS_POLICY_DEFAULT_RULE, - "Show details for Replica", - [ - { - "path": "/replicas/{replica_id}", - "method": "GET" - } - ] - ), - # TODO(aznashwan): replica actions should ideally be - # declared in a separate module - policy.DocumentedRuleDefault( - get_replicas_policy_label('delete_disks'), - REPLICAS_POLICY_DEFAULT_RULE, - "Delete Replica Disks", - [ - { - "path": "/replicas/{replica_id}/actions", - "method": "POST" - } - ] - ), - policy.DocumentedRuleDefault( - get_replicas_policy_label('delete'), - REPLICAS_POLICY_DEFAULT_RULE, - "Delete Replica", - [ - { - "path": "/replicas/{replica_id}", - "method": "DELETE" - } - ] - ), - policy.DocumentedRuleDefault( - get_replicas_policy_label('update'), - REPLICAS_POLICY_DEFAULT_RULE, - "Update Replica", - [ - { - "path": "/replicas/{replica_id}", - "method": "POST" - } - ] - ) - -] - - -def list_rules(): - return REPLICAS_POLICY_DEFAULT_RULES diff --git a/coriolis/policies/transfer_schedules.py b/coriolis/policies/transfer_schedules.py new file mode 100644 index 00000000..40951872 --- /dev/null +++ b/coriolis/policies/transfer_schedules.py @@ -0,0 +1,80 @@ +# Copyright 2018 Cloudbase Solutions Srl +# All Rights Reserved. + +from oslo_policy import policy + +from coriolis.policies import base + + +TRANSFER_SCHEDULES_POLICY_PREFIX = "%s:transfer_schedules" % ( + base.CORIOLIS_POLICIES_PREFIX) +TRANSFER_SCHEDULES_POLICY_DEFAULT_RULE = "rule:admin_or_owner" + + +def get_transfer_schedules_policy_label(rule_label): + return "%s:%s" % ( + TRANSFER_SCHEDULES_POLICY_PREFIX, rule_label) + + +TRANSFER_SCHEDULES_POLICY_DEFAULT_RULES = [ + policy.DocumentedRuleDefault( + get_transfer_schedules_policy_label('create'), + TRANSFER_SCHEDULES_POLICY_DEFAULT_RULE, + "Create a new execution schedule for a given Transfer", + [ + { + "path": "/transfers/{transfer_id}/schedules", + "method": "POST" + } + ] + ), + policy.DocumentedRuleDefault( + get_transfer_schedules_policy_label('list'), + TRANSFER_SCHEDULES_POLICY_DEFAULT_RULE, + "List execution schedules for a given Transfer", + [ + { + "path": "/transfers/{transfer_id}/schedules", + "method": "GET" + } + ] + ), + policy.DocumentedRuleDefault( + get_transfer_schedules_policy_label('show'), + TRANSFER_SCHEDULES_POLICY_DEFAULT_RULE, + "Show details for an execution schedule for a given Transfer", + [ + { + "path": "/transfers/{transfer_id}/schedules/{schedule_id}", + "method": "GET" + } + ] + ), + policy.DocumentedRuleDefault( + get_transfer_schedules_policy_label('update'), + TRANSFER_SCHEDULES_POLICY_DEFAULT_RULE, + "Update an existing execution schedule for a given Transfer", + [ + { + "path": ( + "/transfers/{transfer_id}/schedules/{schedule_id}"), + "method": "PUT" + } + ] + ), + policy.DocumentedRuleDefault( + get_transfer_schedules_policy_label('delete'), + TRANSFER_SCHEDULES_POLICY_DEFAULT_RULE, + "Delete an execution schedule for a given Transfer", + [ + { + "path": "/transfers/{transfer_id}/schedules/{schedule_id}", + "method": "DELETE" + } + ] + ) +] + + +def list_rules(): + return TRANSFER_SCHEDULES_POLICY_DEFAULT_RULES diff --git a/coriolis/policies/transfer_tasks_executions.py b/coriolis/policies/transfer_tasks_executions.py new file mode 100644 index 00000000..b653149b --- /dev/null +++ b/coriolis/policies/transfer_tasks_executions.py @@ -0,0 +1,83 @@ +# Copyright 2018 Cloudbase Solutions Srl +# All Rights Reserved. + +from oslo_policy import policy + +from coriolis.policies import base + + +TRANSFER_EXECUTIONS_POLICY_PREFIX = "%s:transfer_executions" % ( + base.CORIOLIS_POLICIES_PREFIX) +TRANSFER_EXECUTIONS_POLICY_DEFAULT_RULE = "rule:admin_or_owner" + + +def get_transfer_executions_policy_label(rule_label): + return "%s:%s" % ( + TRANSFER_EXECUTIONS_POLICY_PREFIX, rule_label) + + +TRANSFER_EXECUTIONS_POLICY_DEFAULT_RULES = [ + policy.DocumentedRuleDefault( + get_transfer_executions_policy_label('create'), + TRANSFER_EXECUTIONS_POLICY_DEFAULT_RULE, + "Create a new execution for a given Transfer", + [ + { + "path": "/transfers/{transfer_id}/executions", + "method": "POST" + } + ] + ), + policy.DocumentedRuleDefault( + get_transfer_executions_policy_label('list'), + TRANSFER_EXECUTIONS_POLICY_DEFAULT_RULE, + "List Executions for a given Transfer", + [ + { + "path": "/transfers/{transfer_id}/executions", + "method": "GET" + } + ] + ), + policy.DocumentedRuleDefault( + get_transfer_executions_policy_label('show'), + TRANSFER_EXECUTIONS_POLICY_DEFAULT_RULE, + "Show details for Transfer execution", + [ + { + "path": "/transfers/{transfer_id}/executions/{execution_id}", + "method": "GET" + } + ] + ), + # TODO(aznashwan): transfer actions should ideally be + # declared in a separate module + policy.DocumentedRuleDefault( + get_transfer_executions_policy_label('cancel'), + TRANSFER_EXECUTIONS_POLICY_DEFAULT_RULE, + "Cancel a Transfer execution", + [ + { + "path": ( + "/transfers/{transfer_id}/executions/" + "{execution_id}/actions"), + "method": "POST" + } + ] + ), + policy.DocumentedRuleDefault( + get_transfer_executions_policy_label('delete'), + TRANSFER_EXECUTIONS_POLICY_DEFAULT_RULE, + "Delete an execution for a given Transfer", + [ + { + "path": "/transfers/{transfer_id}/executions/{execution_id}", + "method": "DELETE" + } + ] + ) +] + + +def list_rules(): + return TRANSFER_EXECUTIONS_POLICY_DEFAULT_RULES diff --git a/coriolis/policies/transfers.py b/coriolis/policies/transfers.py new file mode 100644 index 00000000..85978d5a --- /dev/null +++ b/coriolis/policies/transfers.py @@ -0,0 +1,92 @@ +# Copyright 2018 Cloudbase Solutions Srl +# All Rights Reserved. + +from oslo_policy import policy + +from coriolis.policies import base + + +TRANSFERS_POLICY_PREFIX = "%s:transfers" % base.CORIOLIS_POLICIES_PREFIX +TRANSFERS_POLICY_DEFAULT_RULE = "rule:admin_or_owner" + + +def get_transfers_policy_label(rule_label): + return "%s:%s" % ( + TRANSFERS_POLICY_PREFIX, rule_label) + + +TRANSFERS_POLICY_DEFAULT_RULES = [ + policy.DocumentedRuleDefault( + get_transfers_policy_label('create'), + TRANSFERS_POLICY_DEFAULT_RULE, + "Create a Transfer", + [ + { + "path": "/transfers", + "method": "POST" + } + ] + ), + policy.DocumentedRuleDefault( + get_transfers_policy_label('list'), + TRANSFERS_POLICY_DEFAULT_RULE, + "List Transfers", + [ + { + "path": "/transfers", + "method": "GET" + } + ] + ), + policy.DocumentedRuleDefault( + get_transfers_policy_label('show'), + TRANSFERS_POLICY_DEFAULT_RULE, + "Show details for Transfer", + [ + { + "path": "/transfers/{transfer_id}", + "method": "GET" + } + ] + ), + # TODO(aznashwan): transfer actions should ideally be + # declared in a separate module + policy.DocumentedRuleDefault( + get_transfers_policy_label('delete_disks'), + TRANSFERS_POLICY_DEFAULT_RULE, + "Delete Transfer Disks", + [ + { + "path": "/transfers/{transfer_id}/actions", + "method": "POST" + } + ] + ), + policy.DocumentedRuleDefault( + get_transfers_policy_label('delete'), + TRANSFERS_POLICY_DEFAULT_RULE, + "Delete Transfer", + [ + { + "path": "/transfers/{transfer_id}", + "method": "DELETE" + } + ] + ), + policy.DocumentedRuleDefault( + get_transfers_policy_label('update'), + TRANSFERS_POLICY_DEFAULT_RULE, + "Update Transfer", + [ + { + "path": "/transfers/{transfer_id}", + "method": "POST" + } + ] + ) + +] + + +def list_rules(): + return TRANSFERS_POLICY_DEFAULT_RULES diff --git a/coriolis/policy.py b/coriolis/policy.py index 37ad18f3..297a4c92 100644 --- a/coriolis/policy.py +++ b/coriolis/policy.py @@ -13,13 +13,12 @@ from coriolis.policies import diagnostics from coriolis.policies import endpoints from coriolis.policies import general -from coriolis.policies import migrations from coriolis.policies import minion_pools from coriolis.policies import regions -from coriolis.policies import replica_schedules -from coriolis.policies import replica_tasks_executions -from coriolis.policies import replicas from coriolis.policies import services +from coriolis.policies import transfer_schedules +from coriolis.policies import transfer_tasks_executions +from coriolis.policies import transfers from coriolis import utils @@ -29,8 +28,8 @@ _ENFORCER = None DEFAULT_POLICIES_MODULES = [ - base, deployments, endpoints, general, migrations, replicas, - replica_schedules, replica_tasks_executions, diagnostics, regions, + base, deployments, endpoints, general, transfers, + transfer_schedules, transfer_tasks_executions, diagnostics, regions, services, minion_pools] diff --git a/coriolis/replica_tasks_executions/api.py b/coriolis/replica_tasks_executions/api.py deleted file mode 100644 index 234e785a..00000000 --- a/coriolis/replica_tasks_executions/api.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2016 Cloudbase Solutions Srl -# All Rights Reserved. - -from coriolis.conductor.rpc import client as rpc_client - - -class API(object): - def __init__(self): - self._rpc_client = rpc_client.ConductorClient() - - def create(self, ctxt, replica_id, shutdown_instances): - return self._rpc_client.execute_transfer_tasks( - ctxt, replica_id, shutdown_instances) - - def delete(self, ctxt, replica_id, execution_id): - self._rpc_client.delete_transfer_tasks_execution( - ctxt, replica_id, execution_id) - - def cancel(self, ctxt, replica_id, execution_id, force): - self._rpc_client.cancel_transfer_tasks_execution( - ctxt, replica_id, execution_id, force) - - def get_executions(self, ctxt, replica_id, include_tasks=False): - return self._rpc_client.get_transfer_tasks_executions( - ctxt, replica_id, include_tasks) - - def get_execution(self, ctxt, replica_id, execution_id): - return self._rpc_client.get_transfer_tasks_execution( - ctxt, replica_id, execution_id) diff --git a/coriolis/schemas.py b/coriolis/schemas.py index 278a25dd..3a84be73 100644 --- a/coriolis/schemas.py +++ b/coriolis/schemas.py @@ -31,7 +31,7 @@ _CORIOLIS_VM_INSTANCE_INFO_SCHEMA_NAME = "vm_instance_info_schema.json" _CORIOLIS_OS_MORPHING_RES_SCHEMA_NAME = "os_morphing_resources_schema.json" _CORIOLIS_VM_NETWORK_SCHEMA_NAME = "vm_network_schema.json" -_SCHEDULE_API_BODY_SCHEMA_NAME = "replica_schedule_schema.json" +_SCHEDULE_API_BODY_SCHEMA_NAME = "transfer_schedule_schema.json" _CORIOLIS_DESTINATION_OPTIONS_SCHEMA_NAME = "destination_options_schema.json" _CORIOLIS_SOURCE_OPTIONS_SCHEMA_NAME = "source_options_schema.json" _CORIOLIS_NETWORK_MAP_SCHEMA_NAME = "network_map_schema.json" diff --git a/coriolis/schemas/disk_sync_resources_info_schema.json b/coriolis/schemas/disk_sync_resources_info_schema.json index 0aa0becb..4a86c375 100644 --- a/coriolis/schemas/disk_sync_resources_info_schema.json +++ b/coriolis/schemas/disk_sync_resources_info_schema.json @@ -1,7 +1,7 @@ { "$schema": "http://cloudbase.it/coriolis/schemas/disk_sync_resources_info#", "type": "object", - "description": "Information returned after the 'DEPLOY_REPLICA_TARGET_RESOURCES' task and passed to 'REPLICATE_DISKS', as well as for 'DEPLOY_DISK_COPY_RESOURCES' and 'COPY_DISKS_DATA'. The only required property is the 'volumes_info', and the provider plugins may freely declare and use any other fields.", + "description": "Information returned after the 'DEPLOY_TRANSFER_DISKS' task and passed to 'DEPLOY_TRANSFER_TARGET_RESOURCES' and 'REPLICATE_DISKS'. The only required property is the 'volumes_info', and the provider plugins may freely declare and use any other fields.", "properties": { "volumes_info": { "type": "array", diff --git a/coriolis/schemas/replica_schedule_schema.json b/coriolis/schemas/transfer_schedule_schema.json similarity index 93% rename from coriolis/schemas/replica_schedule_schema.json rename to coriolis/schemas/transfer_schedule_schema.json index 89dba79a..b7c6285d 100644 --- a/coriolis/schemas/replica_schedule_schema.json +++ b/coriolis/schemas/transfer_schedule_schema.json @@ -1,5 +1,5 @@ { - "$schema": "http://cloudbase.it/coriolis/schemas/replica_schedule_schema#", + "$schema": "http://cloudbase.it/coriolis/schemas/transfer_schedule_schema#", "type": "object", "properties": { "schedule": { diff --git a/coriolis/tests/api/v1/data/migration_create.yml b/coriolis/tests/api/v1/data/migration_create.yml deleted file mode 100644 index dd220e23..00000000 --- a/coriolis/tests/api/v1/data/migration_create.yml +++ /dev/null @@ -1,28 +0,0 @@ - -- config: - migration: - user_scripts: - mock_user_scripts: null - instances: ["mock_instance1", "mock_instance2"] - replica_id: 'mock_replica_id' - clone_disks: True - force: False - skip_os_morphing: False - instance_osmorphing_minion_pool_mappings: - mock_mapping: "mock_value" - expected_api_method: "deploy_replica_instances" - validation_expected: False - -- config: - migration: - user_scripts: - mock_user_scripts: null - instances: ["mock_instance1", "mock_instance2"] - replica_id: null - clone_disks: True - force: False - skip_os_morphing: False - instance_osmorphing_minion_pool_mappings: - mock_mapping: "mock_value" - expected_api_method: "migrate_instances" - validation_expected: True diff --git a/coriolis/tests/api/v1/data/migration_validate_input.yml b/coriolis/tests/api/v1/data/migration_validate_input.yml deleted file mode 100644 index d8f7d680..00000000 --- a/coriolis/tests/api/v1/data/migration_validate_input.yml +++ /dev/null @@ -1,48 +0,0 @@ - -- config: - migration: - origin_endpoint_id: "mock_origin_endpoint_id" - destination_endpoint_id: "mock_destination_endpoint_id" - origin_minion_pool_id: "mock_origin_minion_pool_id" - destination_minion_pool_id: "mock_destination_minion_pool_id" - instance_osmorphing_minion_pool_mappings: - mock_instance_1: "mock_pool" - mock_instance_2: "mock_pool" - instances: ['mock_instance_1', 'mock_instance_2'] - notes: "mock_notes" - skip_os_morphing: false - shutdown_instances: false - replication_count: 2 - source_environment: {} - network_map: {} - destination_environment: - network_map: {} - storage_mappings: {} - storage_mappings: {} - raises_value_error: false - -- config: - migration: - origin_endpoint_id: "mock_origin_endpoint_id" - destination_endpoint_id: "mock_destination_endpoint_id" - origin_minion_pool_id: "mock_origin_minion_pool_id" - destination_minion_pool_id: "mock_destination_minion_pool_id" - instance_osmorphing_minion_pool_mappings: - mock_instance_1: "mock_pool" - mock_instance_2: "mock_pool" - instances: ['mock_instance_1', 'mock_instance_3'] - raises_value_error: true - - -- config: - migration: - origin_endpoint_id: "mock_origin_endpoint_id" - destination_endpoint_id: "mock_destination_endpoint_id" - origin_minion_pool_id: "mock_origin_minion_pool_id" - destination_minion_pool_id: "mock_destination_minion_pool_id" - instance_osmorphing_minion_pool_mappings: - mock_instance_1: "mock_pool" - mock_instance_2: "mock_pool" - instances: ['mock_instance_1', 'mock_instance_2'] - replication_count: 13 - raises_value_error: true \ No newline at end of file diff --git a/coriolis/tests/api/v1/data/replica_task_execution_actions_cancel.yml b/coriolis/tests/api/v1/data/transfer_task_execution_actions_cancel.yml similarity index 100% rename from coriolis/tests/api/v1/data/replica_task_execution_actions_cancel.yml rename to coriolis/tests/api/v1/data/transfer_task_execution_actions_cancel.yml diff --git a/coriolis/tests/api/v1/data/replicas_get_merged_replica_values.yml b/coriolis/tests/api/v1/data/transfers_get_merged_transfer_values.yml similarity index 98% rename from coriolis/tests/api/v1/data/replicas_get_merged_replica_values.yml rename to coriolis/tests/api/v1/data/transfers_get_merged_transfer_values.yml index 83d5a4a5..83f2abe2 100644 --- a/coriolis/tests/api/v1/data/replicas_get_merged_replica_values.yml +++ b/coriolis/tests/api/v1/data/transfers_get_merged_transfer_values.yml @@ -1,6 +1,6 @@ - config: - replica: + transfer: origin_endpoint_id: "mock_origin_endpoint_id" destination_endpoint_id: "mock_destination_endpoint_id" source_environment: {'mock_source_key': 'mock_source_value'} @@ -14,7 +14,7 @@ instance_osmorphing_minion_pool_mappings: mock_instance_1: "mock_pool_1" mock_instance_2: "mock_pool_2" - updated_values: + updated_values: source_environment: {'mock_updated_source_key': 'mock_updated_source_value'} destination_environment: storage_mappings: {'mock_updated_destination_key': 'mock_updated_destination_value'} @@ -47,7 +47,7 @@ mock_instance_2: "mock_updated_pool_2" - config: - replica: + transfer: origin_endpoint_id: "mock_origin_endpoint_id" destination_endpoint_id: "mock_destination_endpoint_id" source_environment: {'mock_source_key': 'mock_source_value'} @@ -75,7 +75,7 @@ notes: "mock_notes" - config: - replica: + transfer: origin_endpoint_id: "mock_origin_endpoint_id" destination_endpoint_id: "mock_destination_endpoint_id" user_scripts: {'mock_scripts_key': 'mock_scripts_value'} diff --git a/coriolis/tests/api/v1/data/replicas_update_storage_mappings.yml b/coriolis/tests/api/v1/data/transfers_update_storage_mappings.yml similarity index 100% rename from coriolis/tests/api/v1/data/replicas_update_storage_mappings.yml rename to coriolis/tests/api/v1/data/transfers_update_storage_mappings.yml diff --git a/coriolis/tests/api/v1/data/replicas_validate_create_body.yml b/coriolis/tests/api/v1/data/transfers_validate_create_body.yml similarity index 98% rename from coriolis/tests/api/v1/data/replicas_validate_create_body.yml rename to coriolis/tests/api/v1/data/transfers_validate_create_body.yml index dacd4d9c..9e229186 100644 --- a/coriolis/tests/api/v1/data/replicas_validate_create_body.yml +++ b/coriolis/tests/api/v1/data/transfers_validate_create_body.yml @@ -1,7 +1,7 @@ - config: body: - replica: + transfer: origin_endpoint_id: "mock_origin_endpoint_id" destination_endpoint_id: "mock_destination_endpoint_id" source_environment: "mock_source_environment" @@ -36,7 +36,7 @@ - config: body: - replica: + transfer: origin_endpoint_id: "mock_origin_endpoint_id" destination_endpoint_id: "mock_destination_endpoint_id" source_environment: "mock_source_environment" diff --git a/coriolis/tests/api/v1/data/replicas_validate_update_body.yml b/coriolis/tests/api/v1/data/transfers_validate_update_body.yml similarity index 98% rename from coriolis/tests/api/v1/data/replicas_validate_update_body.yml rename to coriolis/tests/api/v1/data/transfers_validate_update_body.yml index 5b763f1c..2448e517 100644 --- a/coriolis/tests/api/v1/data/replicas_validate_update_body.yml +++ b/coriolis/tests/api/v1/data/transfers_validate_update_body.yml @@ -1,7 +1,7 @@ - config: body: - replica: + transfer: source_environment: "mock_source_environment" destination_environment: "mock_destination_environment" storage_mappings: {'mock_updated_destination_key': 'mock_updated_destination_value'} @@ -13,7 +13,7 @@ instance_osmorphing_minion_pool_mappings: mock_instance_1: "mock_updated_pool_1" mock_instance_2: "mock_updated_pool_2" - replica: + transfer: destination_endpoint_id: "mock_destination_endpoint_id" origin_endpoint_id: "mock_origin_endpoint_id" instances: "mock_instances" diff --git a/coriolis/tests/api/v1/data/replicas_validate_update_body_raises.yml b/coriolis/tests/api/v1/data/transfers_validate_update_body_raises.yml similarity index 80% rename from coriolis/tests/api/v1/data/replicas_validate_update_body_raises.yml rename to coriolis/tests/api/v1/data/transfers_validate_update_body_raises.yml index 37dfd585..bd246ffb 100644 --- a/coriolis/tests/api/v1/data/replicas_validate_update_body_raises.yml +++ b/coriolis/tests/api/v1/data/transfers_validate_update_body_raises.yml @@ -1,13 +1,13 @@ - body: - replica: + transfer: origin_endpoint_id: "mock_origin_endpoint_id" - body: - replica: + transfer: destination_endpoint_id: "mock_destination_endpoint_id" - body: - replica: + transfer: instances: "instances" diff --git a/coriolis/tests/api/v1/test_router.py b/coriolis/tests/api/v1/test_router.py index c945ec8e..3dc14549 100644 --- a/coriolis/tests/api/v1/test_router.py +++ b/coriolis/tests/api/v1/test_router.py @@ -20,13 +20,13 @@ from coriolis.api.v1 import provider_schemas from coriolis.api.v1 import providers from coriolis.api.v1 import regions -from coriolis.api.v1 import replica_actions -from coriolis.api.v1 import replica_schedules -from coriolis.api.v1 import replica_tasks_execution_actions -from coriolis.api.v1 import replica_tasks_executions -from coriolis.api.v1 import replicas from coriolis.api.v1 import router from coriolis.api.v1 import services +from coriolis.api.v1 import transfer_actions +from coriolis.api.v1 import transfer_schedules +from coriolis.api.v1 import transfer_tasks_execution_actions +from coriolis.api.v1 import transfer_tasks_executions +from coriolis.api.v1 import transfers from coriolis.tests import test_base @@ -40,11 +40,11 @@ def setUp(self): @mock.patch.object(deployments, 'create_resource') @mock.patch.object(deployment_actions, 'create_resource') @mock.patch.object(diagnostics, 'create_resource') - @mock.patch.object(replica_schedules, 'create_resource') - @mock.patch.object(replica_tasks_execution_actions, 'create_resource') - @mock.patch.object(replica_tasks_executions, 'create_resource') - @mock.patch.object(replica_actions, 'create_resource') - @mock.patch.object(replicas, 'create_resource') + @mock.patch.object(transfer_schedules, 'create_resource') + @mock.patch.object(transfer_tasks_execution_actions, 'create_resource') + @mock.patch.object(transfer_tasks_executions, 'create_resource') + @mock.patch.object(transfer_actions, 'create_resource') + @mock.patch.object(transfers, 'create_resource') @mock.patch.object(provider_schemas, 'create_resource') @mock.patch.object(endpoint_source_options, 'create_resource') @mock.patch.object(endpoint_destination_options, 'create_resource') @@ -78,11 +78,11 @@ def test_setup_routes( mock_endpoint_destination_options_create_resource, mock_endpoint_source_options_create_resource, mock_provider_schemas_create_resource, - mock_replicas_create_resource, - mock_replica_actions_create_resource, - mock_replica_tasks_executions_create_resource, - mock_replica_tasks_execution_actions_create_resource, - mock_replica_schedules_create_resource, + mock_transfers_create_resource, + mock_transfer_actions_create_resource, + mock_transfer_tasks_executions_create_resource, + mock_transfer_tasks_execution_actions_create_resource, + mock_transfer_schedules_create_resource, mock_diagnostics_create_resource, mock_deployment_actions_create_resource, mock_deployments_create_resource @@ -161,24 +161,24 @@ def test_setup_routes( controller=mock_provider_schemas_create_resource.return_value, ), mock.call( - 'replica', 'replicas', - controller=mock_replicas_create_resource.return_value, + 'transfer', 'transfers', + controller=mock_transfers_create_resource.return_value, collection={'detail': 'GET'}, member={'action': 'POST'} ), mock.call( 'execution', - 'replicas/{replica_id}/executions', + 'transfers/{transfer_id}/executions', controller= - mock_replica_tasks_executions_create_resource.return_value, + mock_transfer_tasks_executions_create_resource.return_value, collection={'detail': 'GET'}, member={'action': 'POST'} ), mock.call( - 'replica_schedule', - 'replicas/{replica_id}/schedules', + 'transfer_schedule', + 'transfers/{transfer_id}/schedules', controller= - mock_replica_schedules_create_resource.return_value, + mock_transfer_schedules_create_resource.return_value, collection={'index': 'GET'}, member={'action': 'POST'} ), @@ -212,17 +212,18 @@ def test_setup_routes( conditions={'method': 'POST'} ), mock.call( - 'replica_actions', - '/{project_id}/replicas/{id}/actions', - controller=mock_replica_actions_create_resource.return_value, + 'transfer_actions', + '/{project_id}/transfers/{id}/actions', + controller=mock_transfer_actions_create_resource.return_value, action='action', conditions={'method': 'POST'} ), mock.call( - 'replica_tasks_execution_actions', - '/{project_id}/replicas/{replica_id}/executions/{id}/actions', + 'transfer_tasks_execution_actions', + '/{project_id}/transfers/{transfer_id}/' + 'executions/{id}/actions', controller= - mock_replica_tasks_execution_actions_create_resource. + mock_transfer_tasks_execution_actions_create_resource. return_value, action='action', conditions={'method': 'POST'} diff --git a/coriolis/tests/api/v1/test_replica_actions.py b/coriolis/tests/api/v1/test_transfer_actions.py similarity index 70% rename from coriolis/tests/api/v1/test_replica_actions.py rename to coriolis/tests/api/v1/test_transfer_actions.py index f4b3a889..bbfc4aaa 100644 --- a/coriolis/tests/api/v1/test_replica_actions.py +++ b/coriolis/tests/api/v1/test_transfer_actions.py @@ -5,22 +5,22 @@ from webob import exc -from coriolis.api.v1 import replica_actions -from coriolis.api.v1.views import replica_tasks_execution_view +from coriolis.api.v1 import transfer_actions +from coriolis.api.v1.views import transfer_tasks_execution_view from coriolis import exception -from coriolis.replicas import api from coriolis.tests import test_base from coriolis.tests import testutils +from coriolis.transfers import api -class ReplicaActionsControllerTestCase(test_base.CoriolisBaseTestCase): - """Test suite for the Coriolis Replica Actions v1 API""" +class TransferActionsControllerTestCase(test_base.CoriolisBaseTestCase): + """Test suite for the Coriolis Transfer Actions v1 API""" def setUp(self): - super(ReplicaActionsControllerTestCase, self).setUp() - self.replica_actions = replica_actions.ReplicaActionsController() + super(TransferActionsControllerTestCase, self).setUp() + self.transfer_actions = transfer_actions.TransferActionsController() - @mock.patch.object(replica_tasks_execution_view, 'single') + @mock.patch.object(transfer_tasks_execution_view, 'single') @mock.patch.object(api.API, 'delete_disks') def test_delete_disks( self, @@ -34,7 +34,7 @@ def test_delete_disks( body = mock.sentinel.body result = testutils.get_wrapped_function( - self.replica_actions._delete_disks)( + self.transfer_actions._delete_disks)( mock_req, id, body @@ -46,11 +46,11 @@ def test_delete_disks( ) mock_context.can.assert_called_once_with( - "migration:replicas:delete_disks") + "migration:transfers:delete_disks") mock_delete_disks.assert_called_once_with(mock_context, id) mock_single.assert_called_once_with(mock_delete_disks.return_value) - @mock.patch.object(replica_tasks_execution_view, 'single') + @mock.patch.object(transfer_tasks_execution_view, 'single') @mock.patch.object(api.API, 'delete_disks') def test_delete_disks_not_found( self, @@ -66,18 +66,19 @@ def test_delete_disks_not_found( self.assertRaises( exc.HTTPNotFound, - testutils.get_wrapped_function(self.replica_actions._delete_disks), + testutils.get_wrapped_function( + self.transfer_actions._delete_disks), req=mock_req, id=id, body=body ) mock_context.can.assert_called_once_with( - "migration:replicas:delete_disks") + "migration:transfers:delete_disks") mock_delete_disks.assert_called_once_with(mock_context, id) mock_single.assert_not_called() - @mock.patch.object(replica_tasks_execution_view, 'single') + @mock.patch.object(transfer_tasks_execution_view, 'single') @mock.patch.object(api.API, 'delete_disks') def test_delete_disks_invalid_parameter_value( self, @@ -93,13 +94,14 @@ def test_delete_disks_invalid_parameter_value( self.assertRaises( exc.HTTPNotFound, - testutils.get_wrapped_function(self.replica_actions._delete_disks), + testutils.get_wrapped_function( + self.transfer_actions._delete_disks), req=mock_req, id=id, body=body ) mock_context.can.assert_called_once_with( - "migration:replicas:delete_disks") + "migration:transfers:delete_disks") mock_delete_disks.assert_called_once_with(mock_context, id) mock_single.assert_not_called() diff --git a/coriolis/tests/api/v1/test_replica_schedules.py b/coriolis/tests/api/v1/test_transfer_schedules.py similarity index 73% rename from coriolis/tests/api/v1/test_replica_schedules.py rename to coriolis/tests/api/v1/test_transfer_schedules.py index 0fcaec7e..d143f7da 100644 --- a/coriolis/tests/api/v1/test_replica_schedules.py +++ b/coriolis/tests/api/v1/test_transfer_schedules.py @@ -7,22 +7,23 @@ import jsonschema from webob import exc -from coriolis.api.v1 import replica_schedules -from coriolis.api.v1.views import replica_schedule_view +from coriolis.api.v1 import transfer_schedules +from coriolis.api.v1.views import transfer_schedule_view from coriolis import exception from coriolis import schemas from coriolis.tests import test_base from coriolis.transfer_cron import api -class ReplicaScheduleControllerTestCase(test_base.CoriolisBaseTestCase): - """Test suite for the Coriolis Replica Schedule v1 API""" +class TransferScheduleControllerTestCase(test_base.CoriolisBaseTestCase): + """Test suite for the Coriolis Transfer Schedule v1 API""" def setUp(self): - super(ReplicaScheduleControllerTestCase, self).setUp() - self.replica_schedules = replica_schedules.ReplicaScheduleController() + super(TransferScheduleControllerTestCase, self).setUp() + self.transfer_schedules = ( + transfer_schedules.TransferScheduleController()) - @mock.patch.object(replica_schedule_view, 'single') + @mock.patch.object(transfer_schedule_view, 'single') @mock.patch.object(api.API, 'get_schedule') def test_show( self, @@ -33,9 +34,9 @@ def test_show( mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} id = mock.sentinel.id - replica_id = mock.sentinel.transfer_id + transfer_id = mock.sentinel.transfer_id - result = self.replica_schedules.show(mock_req, replica_id, id) + result = self.transfer_schedules.show(mock_req, transfer_id, id) self.assertEqual( mock_single.return_value, @@ -43,11 +44,12 @@ def test_show( ) mock_context.can.assert_called_once_with( - "migration:replica_schedules:show") - mock_get_schedule.assert_called_once_with(mock_context, replica_id, id) + "migration:transfer_schedules:show") + mock_get_schedule.assert_called_once_with( + mock_context, transfer_id, id) mock_single.assert_called_once_with(mock_get_schedule.return_value) - @mock.patch.object(replica_schedule_view, 'single') + @mock.patch.object(transfer_schedule_view, 'single') @mock.patch.object(api.API, 'get_schedule') def test_show_not_found( self, @@ -58,23 +60,24 @@ def test_show_not_found( mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} id = mock.sentinel.id - replica_id = mock.sentinel.transfer_id + transfer_id = mock.sentinel.transfer_id mock_get_schedule.return_value = None self.assertRaises( exc.HTTPNotFound, - self.replica_schedules.show, + self.transfer_schedules.show, mock_req, - replica_id, + transfer_id, id ) mock_context.can.assert_called_once_with( - "migration:replica_schedules:show") - mock_get_schedule.assert_called_once_with(mock_context, replica_id, id) + "migration:transfer_schedules:show") + mock_get_schedule.assert_called_once_with( + mock_context, transfer_id, id) mock_single.assert_not_called() - @mock.patch.object(replica_schedule_view, 'collection') + @mock.patch.object(transfer_schedule_view, 'collection') @mock.patch.object(api.API, 'get_schedules') def test_index( self, @@ -84,20 +87,20 @@ def test_index( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.transfer_id + transfer_id = mock.sentinel.transfer_id mock_req.GET = {"show_expired": "False"} - result = self.replica_schedules.index(mock_req, replica_id) + result = self.transfer_schedules.index(mock_req, transfer_id) self.assertEqual( mock_collection.return_value, result ) mock_context.can.assert_called_once_with( - "migration:replica_schedules:list") + "migration:transfer_schedules:list") mock_get_schedules.assert_called_once_with( mock_context, - replica_id, + transfer_id, expired=False ) mock_collection.assert_called_once_with( @@ -112,7 +115,7 @@ def test_validate_schedule( ): schedule = mock.sentinel.schedule - result = self.replica_schedules._validate_schedule(schedule) + result = self.transfer_schedules._validate_schedule(schedule) self.assertEqual( schedule, @@ -127,7 +130,7 @@ def test_validate_expiration_date_is_none( ): expiration_date = None - result = self.replica_schedules._validate_expiration_date( + result = self.transfer_schedules._validate_expiration_date( expiration_date) self.assertEqual( @@ -142,7 +145,7 @@ def test_validate_expiration_date_past( self.assertRaises( exception.InvalidInput, - self.replica_schedules._validate_expiration_date, + self.transfer_schedules._validate_expiration_date, expiration_date ) @@ -151,7 +154,7 @@ def test_validate_expiration_date( ): expiration_date = '9999-12-31' - result = self.replica_schedules._validate_expiration_date( + result = self.transfer_schedules._validate_expiration_date( expiration_date) self.assertEqual( @@ -159,11 +162,11 @@ def test_validate_expiration_date( result ) - @mock.patch.object(replica_schedules.ReplicaScheduleController, + @mock.patch.object(transfer_schedules.TransferScheduleController, '_validate_expiration_date') @mock.patch.object(schemas, 'validate_value') @mock.patch.object(jsonschema, 'FormatChecker') - @mock.patch.object(replica_schedules.ReplicaScheduleController, + @mock.patch.object(transfer_schedules.TransferScheduleController, '_validate_schedule') def test_validate_create_body( self, @@ -187,7 +190,7 @@ def test_validate_create_body( True ) - result = self.replica_schedules._validate_create_body(mock_body) + result = self.transfer_schedules._validate_create_body(mock_body) self.assertEqual( expected_result, @@ -201,11 +204,11 @@ def test_validate_create_body( ) mock_validate_expiration_date.assert_called_once_with(date) - @mock.patch.object(replica_schedules.ReplicaScheduleController, + @mock.patch.object(transfer_schedules.TransferScheduleController, '_validate_expiration_date') @mock.patch.object(schemas, 'validate_value') @mock.patch.object(jsonschema, 'FormatChecker') - @mock.patch.object(replica_schedules.ReplicaScheduleController, + @mock.patch.object(transfer_schedules.TransferScheduleController, '_validate_schedule') def test_validate_create_body_no_expiration_date( self, @@ -227,7 +230,7 @@ def test_validate_create_body_no_expiration_date( True ) - result = self.replica_schedules._validate_create_body(mock_body) + result = self.transfer_schedules._validate_create_body(mock_body) self.assertEqual( expected_result, @@ -249,15 +252,15 @@ def test_validate_create_body_no_schedule( self.assertRaises( exception.InvalidInput, - self.replica_schedules._validate_create_body, + self.transfer_schedules._validate_create_body, mock_body ) - @mock.patch.object(replica_schedules.ReplicaScheduleController, + @mock.patch.object(transfer_schedules.TransferScheduleController, '_validate_expiration_date') @mock.patch.object(schemas, 'validate_value') @mock.patch.object(jsonschema, 'FormatChecker') - @mock.patch.object(replica_schedules.ReplicaScheduleController, + @mock.patch.object(transfer_schedules.TransferScheduleController, '_validate_schedule') def test_validate_update_body( self, @@ -281,7 +284,8 @@ def test_validate_update_body( "shutdown_instance": True } - result = self.replica_schedules._validate_update_body(mock_update_body) + result = self.transfer_schedules._validate_update_body( + mock_update_body) self.assertEqual( expected_result, @@ -295,11 +299,11 @@ def test_validate_update_body( ) mock_validate_expiration_date.assert_called_once_with(date) - @mock.patch.object(replica_schedules.ReplicaScheduleController, + @mock.patch.object(transfer_schedules.TransferScheduleController, '_validate_expiration_date') @mock.patch.object(schemas, 'validate_value') @mock.patch.object(jsonschema, 'FormatChecker') - @mock.patch.object(replica_schedules.ReplicaScheduleController, + @mock.patch.object(transfer_schedules.TransferScheduleController, '_validate_schedule') def test_validate_update_body_none( self, @@ -311,7 +315,8 @@ def test_validate_update_body_none( mock_update_body = {} expected_result = {} - result = self.replica_schedules._validate_update_body(mock_update_body) + result = self.transfer_schedules._validate_update_body( + mock_update_body) self.assertEqual( expected_result, @@ -325,9 +330,9 @@ def test_validate_update_body_none( ) mock_validate_expiration_date.assert_not_called() - @mock.patch.object(replica_schedule_view, 'single') + @mock.patch.object(transfer_schedule_view, 'single') @mock.patch.object(api.API, 'create') - @mock.patch.object(replica_schedules.ReplicaScheduleController, + @mock.patch.object(transfer_schedules.TransferScheduleController, '_validate_create_body') def test_create( self, @@ -338,14 +343,14 @@ def test_create( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.transfer_id + transfer_id = mock.sentinel.transfer_id body = mock.sentinel.body schedule = mock.sentinel.schedule exp_date = mock.sentinel.exp_date mock_validate_create_body.return_value = ( schedule, False, exp_date, True) - result = self.replica_schedules.create(mock_req, replica_id, body) + result = self.transfer_schedules.create(mock_req, transfer_id, body) self.assertEqual( mock_single.return_value, @@ -353,13 +358,13 @@ def test_create( ) mock_context.can.assert_called_once_with( - "migration:replica_schedules:create") + "migration:transfer_schedules:create") mock_validate_create_body.assert_called_once_with(body) mock_create.assert_called_once_with( - mock_context, replica_id, schedule, False, exp_date, True) + mock_context, transfer_id, schedule, False, exp_date, True) mock_single.assert_called_once_with(mock_create.return_value) - @mock.patch.object(replica_schedules.ReplicaScheduleController, + @mock.patch.object(transfer_schedules.TransferScheduleController, '_validate_create_body') def test_create_except( self, @@ -368,25 +373,25 @@ def test_create_except( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.transfer_id + transfer_id = mock.sentinel.transfer_id body = mock.sentinel.body mock_validate_create_body.side_effect = Exception("err") self.assertRaises( exception.InvalidInput, - self.replica_schedules.create, + self.transfer_schedules.create, mock_req, - replica_id, + transfer_id, body ) mock_context.can.assert_called_once_with( - "migration:replica_schedules:create") + "migration:transfer_schedules:create") mock_validate_create_body.assert_called_once_with(body) - @mock.patch.object(replica_schedule_view, 'single') + @mock.patch.object(transfer_schedule_view, 'single') @mock.patch.object(api.API, 'update') - @mock.patch.object(replica_schedules.ReplicaScheduleController, + @mock.patch.object(transfer_schedules.TransferScheduleController, '_validate_update_body') def test_update( self, @@ -397,11 +402,12 @@ def test_update( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.transfer_id + transfer_id = mock.sentinel.transfer_id id = mock.sentinel.id body = mock.sentinel.body - result = self.replica_schedules.update(mock_req, replica_id, id, body) + result = self.transfer_schedules.update( + mock_req, transfer_id, id, body) self.assertEqual( mock_single.return_value, @@ -409,14 +415,14 @@ def test_update( ) mock_context.can.assert_called_once_with( - "migration:replica_schedules:update") + "migration:transfer_schedules:update") mock_validate_update_body.assert_called_once_with(body) mock_update.assert_called_once_with( - mock_context, replica_id, id, + mock_context, transfer_id, id, mock_validate_update_body.return_value) mock_single.assert_called_once_with(mock_update.return_value) - @mock.patch.object(replica_schedules.ReplicaScheduleController, + @mock.patch.object(transfer_schedules.TransferScheduleController, '_validate_update_body') def test_update_except( self, @@ -425,22 +431,22 @@ def test_update_except( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.transfer_id + transfer_id = mock.sentinel.transfer_id id = mock.sentinel.id body = mock.sentinel.body mock_validate_update_body.side_effect = Exception("err") self.assertRaises( exception.InvalidInput, - self.replica_schedules.update, + self.transfer_schedules.update, mock_req, - replica_id, + transfer_id, id, body ) mock_context.can.assert_called_once_with( - "migration:replica_schedules:update") + "migration:transfer_schedules:update") mock_validate_update_body.assert_called_once_with(body) @mock.patch.object(api.API, 'delete') @@ -451,17 +457,17 @@ def test_delete( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.transfer_id + transfer_id = mock.sentinel.transfer_id id = mock.sentinel.id self.assertRaises( exc.HTTPNoContent, - self.replica_schedules.delete, + self.transfer_schedules.delete, mock_req, - replica_id, + transfer_id, id ) mock_context.can.assert_called_once_with( - "migration:replica_schedules:delete") - mock_delete.assert_called_once_with(mock_context, replica_id, id) + "migration:transfer_schedules:delete") + mock_delete.assert_called_once_with(mock_context, transfer_id, id) diff --git a/coriolis/tests/api/v1/test_replica_tasks_execution_actions.py b/coriolis/tests/api/v1/test_transfer_tasks_execution_actions.py similarity index 57% rename from coriolis/tests/api/v1/test_replica_tasks_execution_actions.py rename to coriolis/tests/api/v1/test_transfer_tasks_execution_actions.py index 2ed8e2fd..bddfb363 100644 --- a/coriolis/tests/api/v1/test_replica_tasks_execution_actions.py +++ b/coriolis/tests/api/v1/test_transfer_tasks_execution_actions.py @@ -6,25 +6,26 @@ import ddt from webob import exc -from coriolis.api.v1 import replica_tasks_execution_actions as replica_api +from coriolis.api.v1 import transfer_tasks_execution_actions as transfer_api from coriolis import exception -from coriolis.replica_tasks_executions import api from coriolis.tests import test_base from coriolis.tests import testutils +from coriolis.transfer_tasks_executions import api @ddt.ddt -class ReplicaTasksExecutionActionsControllerTestCase( +class TransferTasksExecutionActionsControllerTestCase( test_base.CoriolisBaseTestCase ): - """Test suite for the Coriolis Replica Tasks Execution Actions v1 API""" + """Test suite for the Coriolis Transfer Tasks Execution Actions v1 API""" def setUp(self): - super(ReplicaTasksExecutionActionsControllerTestCase, self).setUp() - self.replica_api = replica_api.ReplicaTasksExecutionActionsController() + super(TransferTasksExecutionActionsControllerTestCase, self).setUp() + self.transfer_api = ( + transfer_api.TransferTasksExecutionActionsController()) @mock.patch.object(api.API, 'cancel') - @ddt.file_data('data/replica_task_execution_actions_cancel.yml') + @ddt.file_data('data/transfer_task_execution_actions_cancel.yml') def test_cancel( self, mock_cancel, @@ -37,7 +38,7 @@ def test_cancel( mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} id = mock.sentinel.id - replica_id = mock.sentinel.transfer_id + transfer_id = mock.sentinel.transfer_id body = config["body"] if exception_raised: mock_cancel.side_effect = getattr(exception, exception_raised)( @@ -45,14 +46,14 @@ def test_cancel( self.assertRaises( getattr(exc, expected_result), - testutils.get_wrapped_function(self.replica_api._cancel), + testutils.get_wrapped_function(self.transfer_api._cancel), mock_req, - replica_id, + transfer_id, id, body ) mock_context.can.assert_called_once_with( - "migration:replica_executions:cancel") + "migration:transfer_executions:cancel") mock_cancel.assert_called_once_with( - mock_context, replica_id, id, expected_force) + mock_context, transfer_id, id, expected_force) diff --git a/coriolis/tests/api/v1/test_replica_tasks_executions.py b/coriolis/tests/api/v1/test_transfer_tasks_executions.py similarity index 63% rename from coriolis/tests/api/v1/test_replica_tasks_executions.py rename to coriolis/tests/api/v1/test_transfer_tasks_executions.py index b1b2b11b..de607b2e 100644 --- a/coriolis/tests/api/v1/test_replica_tasks_executions.py +++ b/coriolis/tests/api/v1/test_transfer_tasks_executions.py @@ -5,21 +5,21 @@ from webob import exc -from coriolis.api.v1 import replica_tasks_executions as replica_api -from coriolis.api.v1.views import replica_tasks_execution_view +from coriolis.api.v1 import transfer_tasks_executions as transfer_api +from coriolis.api.v1.views import transfer_tasks_execution_view from coriolis import exception -from coriolis.replica_tasks_executions import api from coriolis.tests import test_base +from coriolis.transfer_tasks_executions import api -class ReplicaTasksExecutionControllerTestCase(test_base.CoriolisBaseTestCase): - """Test suite for the Coriolis Replica Tasks Execution v1 API""" +class TransferTasksExecutionControllerTestCase(test_base.CoriolisBaseTestCase): + """Test suite for the Coriolis Transfer Tasks Execution v1 API""" def setUp(self): - super(ReplicaTasksExecutionControllerTestCase, self).setUp() - self.replica_api = replica_api.ReplicaTasksExecutionController() + super(TransferTasksExecutionControllerTestCase, self).setUp() + self.transfer_api = transfer_api.TransferTasksExecutionController() - @mock.patch.object(replica_tasks_execution_view, 'single') + @mock.patch.object(transfer_tasks_execution_view, 'single') @mock.patch.object(api.API, 'get_execution') def test_show( self, @@ -29,10 +29,10 @@ def test_show( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.transfer_id + transfer_id = mock.sentinel.transfer_id id = mock.sentinel.id - result = self.replica_api.show(mock_req, replica_id, id) + result = self.transfer_api.show(mock_req, transfer_id, id) self.assertEqual( mock_single.return_value, @@ -40,12 +40,12 @@ def test_show( ) mock_context.can.assert_called_once_with( - "migration:replica_executions:show") + "migration:transfer_executions:show") mock_get_execution.assert_called_once_with( - mock_context, replica_id, id) + mock_context, transfer_id, id) mock_single.assert_called_once_with(mock_get_execution.return_value) - @mock.patch.object(replica_tasks_execution_view, 'single') + @mock.patch.object(transfer_tasks_execution_view, 'single') @mock.patch.object(api.API, 'get_execution') def test_show_not_found( self, @@ -55,25 +55,25 @@ def test_show_not_found( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.transfer_id + transfer_id = mock.sentinel.transfer_id id = mock.sentinel.id mock_get_execution.return_value = None self.assertRaises( exc.HTTPNotFound, - self.replica_api.show, + self.transfer_api.show, mock_req, - replica_id, + transfer_id, id ) mock_context.can.assert_called_once_with( - "migration:replica_executions:show") + "migration:transfer_executions:show") mock_get_execution.assert_called_once_with( - mock_context, replica_id, id) + mock_context, transfer_id, id) mock_single.assert_not_called() - @mock.patch.object(replica_tasks_execution_view, 'collection') + @mock.patch.object(transfer_tasks_execution_view, 'collection') @mock.patch.object(api.API, 'get_executions') def test_index( self, @@ -83,9 +83,9 @@ def test_index( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.transfer_id + transfer_id = mock.sentinel.transfer_id - result = self.replica_api.index(mock_req, replica_id) + result = self.transfer_api.index(mock_req, transfer_id) self.assertEqual( mock_collection.return_value, @@ -93,13 +93,13 @@ def test_index( ) mock_context.can.assert_called_once_with( - "migration:replica_executions:list") + "migration:transfer_executions:list") mock_get_executions.assert_called_once_with( - mock_context, replica_id, include_tasks=False) + mock_context, transfer_id, include_tasks=False) mock_collection.assert_called_once_with( mock_get_executions.return_value) - @mock.patch.object(replica_tasks_execution_view, 'collection') + @mock.patch.object(transfer_tasks_execution_view, 'collection') @mock.patch.object(api.API, 'get_executions') def test_detail( self, @@ -109,9 +109,9 @@ def test_detail( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.transfer_id + transfer_id = mock.sentinel.transfer_id - result = self.replica_api.detail(mock_req, replica_id) + result = self.transfer_api.detail(mock_req, transfer_id) self.assertEqual( mock_collection.return_value, @@ -119,13 +119,13 @@ def test_detail( ) mock_context.can.assert_called_once_with( - "migration:replica_executions:show") + "migration:transfer_executions:show") mock_get_executions.assert_called_once_with( - mock_context, replica_id, include_tasks=True) + mock_context, transfer_id, include_tasks=True) mock_collection.assert_called_once_with( mock_get_executions.return_value) - @mock.patch.object(replica_tasks_execution_view, 'single') + @mock.patch.object(transfer_tasks_execution_view, 'single') @mock.patch.object(api.API, 'create') def test_create( self, @@ -135,11 +135,11 @@ def test_create( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.transfer_id + transfer_id = mock.sentinel.transfer_id execution = {"shutdown_instances": True} mock_body = {"execution": execution} - result = self.replica_api.create(mock_req, replica_id, mock_body) + result = self.transfer_api.create(mock_req, transfer_id, mock_body) self.assertEqual( mock_single.return_value, @@ -147,12 +147,12 @@ def test_create( ) mock_context.can.assert_called_once_with( - "migration:replica_executions:create") + "migration:transfer_executions:create") mock_create.assert_called_once_with( - mock_context, replica_id, True) + mock_context, transfer_id, True) mock_single.assert_called_once_with(mock_create.return_value) - @mock.patch.object(replica_tasks_execution_view, 'single') + @mock.patch.object(transfer_tasks_execution_view, 'single') @mock.patch.object(api.API, 'create') def test_create_no_executions( self, @@ -162,10 +162,10 @@ def test_create_no_executions( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.transfer_id + transfer_id = mock.sentinel.transfer_id mock_body = {} - result = self.replica_api.create(mock_req, replica_id, mock_body) + result = self.transfer_api.create(mock_req, transfer_id, mock_body) self.assertEqual( mock_single.return_value, @@ -173,9 +173,9 @@ def test_create_no_executions( ) mock_context.can.assert_called_once_with( - "migration:replica_executions:create") + "migration:transfer_executions:create") mock_create.assert_called_once_with( - mock_context, replica_id, False) + mock_context, transfer_id, False) mock_single.assert_called_once_with(mock_create.return_value) @mock.patch.object(api.API, 'delete') @@ -186,20 +186,20 @@ def test_delete( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.transfer_id + transfer_id = mock.sentinel.transfer_id id = mock.sentinel.id self.assertRaises( exc.HTTPNoContent, - self.replica_api.delete, + self.transfer_api.delete, mock_req, - replica_id, + transfer_id, id ) mock_context.can.assert_called_once_with( - "migration:replica_executions:delete") - mock_delete.assert_called_once_with(mock_context, replica_id, id) + "migration:transfer_executions:delete") + mock_delete.assert_called_once_with(mock_context, transfer_id, id) @mock.patch.object(api.API, 'delete') def test_delete_not_found( @@ -209,18 +209,18 @@ def test_delete_not_found( mock_req = mock.Mock() mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - replica_id = mock.sentinel.transfer_id + transfer_id = mock.sentinel.transfer_id id = mock.sentinel.id mock_delete.side_effect = exception.NotFound() self.assertRaises( exc.HTTPNotFound, - self.replica_api.delete, + self.transfer_api.delete, mock_req, - replica_id, + transfer_id, id ) mock_context.can.assert_called_once_with( - "migration:replica_executions:delete") - mock_delete.assert_called_once_with(mock_context, replica_id, id) + "migration:transfer_executions:delete") + mock_delete.assert_called_once_with(mock_context, transfer_id, id) diff --git a/coriolis/tests/api/v1/test_replicas.py b/coriolis/tests/api/v1/test_transfers.py similarity index 67% rename from coriolis/tests/api/v1/test_replicas.py rename to coriolis/tests/api/v1/test_transfers.py index 4f9bdbe0..f2fa6081 100644 --- a/coriolis/tests/api/v1/test_replicas.py +++ b/coriolis/tests/api/v1/test_transfers.py @@ -6,31 +6,31 @@ import ddt from webob import exc -from coriolis.api.v1 import replicas +from coriolis.api.v1 import transfers from coriolis.api.v1 import utils as api_utils -from coriolis.api.v1.views import replica_tasks_execution_view -from coriolis.api.v1.views import replica_view +from coriolis.api.v1.views import transfer_tasks_execution_view +from coriolis.api.v1.views import transfer_view from coriolis.endpoints import api as endpoints_api from coriolis import exception -from coriolis.replicas import api from coriolis.tests import test_base from coriolis.tests import testutils +from coriolis.transfers import api @ddt.ddt -class ReplicaControllerTestCase(test_base.CoriolisBaseTestCase): - """Test suite for the Coriolis Replica Controller v1 API""" +class TransferControllerTestCase(test_base.CoriolisBaseTestCase): + """Test suite for the Coriolis Transfer Controller v1 API""" def setUp(self): - super(ReplicaControllerTestCase, self).setUp() - self.replicas = replicas.ReplicaController() + super(TransferControllerTestCase, self).setUp() + self.transfers = transfers.TransferController() - @mock.patch('coriolis.api.v1.replicas.CONF') - @mock.patch.object(replica_view, 'single') - @mock.patch.object(api.API, 'get_replica') + @mock.patch('coriolis.api.v1.transfers.CONF') + @mock.patch.object(transfer_view, 'single') + @mock.patch.object(api.API, 'get_transfer') def test_show( self, - mock_get_replica, + mock_get_transfer, mock_single, mock_conf ): @@ -38,26 +38,26 @@ def test_show( mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} id = mock.sentinel.id - mock_conf.api.include_task_info_in_replicas_api = True + mock_conf.api.include_task_info_in_transfers_api = True - result = self.replicas.show(mock_req, id) + result = self.transfers.show(mock_req, id) self.assertEqual( mock_single.return_value, result ) - mock_context.can.assert_called_once_with("migration:replicas:show") - mock_get_replica.assert_called_once_with( + mock_context.can.assert_called_once_with("migration:transfers:show") + mock_get_transfer.assert_called_once_with( mock_context, id, include_task_info=True) - mock_single.assert_called_once_with(mock_get_replica.return_value) + mock_single.assert_called_once_with(mock_get_transfer.return_value) - @mock.patch('coriolis.api.v1.replicas.CONF') - @mock.patch.object(replica_view, 'single') - @mock.patch.object(api.API, 'get_replica') - def test_show_no_replica( + @mock.patch('coriolis.api.v1.transfers.CONF') + @mock.patch.object(transfer_view, 'single') + @mock.patch.object(api.API, 'get_transfer') + def test_show_no_transfer( self, - mock_get_replica, + mock_get_transfer, mock_single, mock_conf ): @@ -65,29 +65,29 @@ def test_show_no_replica( mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} id = mock.sentinel.id - mock_conf.api.include_task_info_in_replicas_api = True - mock_get_replica.return_value = None + mock_conf.api.include_task_info_in_transfers_api = True + mock_get_transfer.return_value = None self.assertRaises( exc.HTTPNotFound, - self.replicas.show, + self.transfers.show, mock_req, id ) - mock_context.can.assert_called_once_with("migration:replicas:show") - mock_get_replica.assert_called_once_with( + mock_context.can.assert_called_once_with("migration:transfers:show") + mock_get_transfer.assert_called_once_with( mock_context, id, include_task_info=True) mock_single.assert_not_called() - @mock.patch('coriolis.api.v1.replicas.CONF') - @mock.patch.object(replica_view, 'collection') - @mock.patch.object(api.API, 'get_replicas') + @mock.patch('coriolis.api.v1.transfers.CONF') + @mock.patch.object(transfer_view, 'collection') + @mock.patch.object(api.API, 'get_transfers') @mock.patch.object(api_utils, '_get_show_deleted') def test_list( self, mock_get_show_deleted, - mock_get_replicas, + mock_get_transfers, mock_collection, mock_conf ): @@ -95,7 +95,7 @@ def test_list( mock_context = mock.Mock() mock_req.environ = {'coriolis.context': mock_context} - result = self.replicas._list(mock_req) + result = self.transfers._list(mock_req) self.assertEqual( mock_collection.return_value, @@ -104,14 +104,15 @@ def test_list( mock_get_show_deleted.assert_called_once_with( mock_req.GET.get.return_value) - mock_context.can.assert_called_once_with("migration:replicas:list") - mock_get_replicas.assert_called_once_with( + mock_context.can.assert_called_once_with("migration:transfers:list") + mock_get_transfers.assert_called_once_with( mock_context, include_tasks_executions= - mock_conf.api.include_task_info_in_replicas_api, - include_task_info=mock_conf.api.include_task_info_in_replicas_api + mock_conf.api.include_task_info_in_transfers_api, + include_task_info=mock_conf.api.include_task_info_in_transfers_api ) - mock_collection.assert_called_once_with(mock_get_replicas.return_value) + mock_collection.assert_called_once_with( + mock_get_transfers.return_value) @mock.patch.object(api_utils, 'validate_instances_list_for_transfer') @mock.patch.object(endpoints_api.API, 'validate_source_environment') @@ -120,7 +121,7 @@ def test_list( @mock.patch.object(api_utils, 'validate_user_scripts') @mock.patch.object(api_utils, 'normalize_user_scripts') @mock.patch.object(api_utils, 'validate_storage_mappings') - @ddt.file_data('data/replicas_validate_create_body.yml') + @ddt.file_data('data/transfers_validate_create_body.yml') def test_validate_create_body( self, mock_validate_storage_mappings, @@ -136,15 +137,15 @@ def test_validate_create_body( ): ctxt = {} body = config["body"] - replica = body["replica"] - origin_endpoint_id = replica.get('origin_endpoint_id') - source_environment = replica.get('source_environment') - network_map = replica.get('network_map') - destination_endpoint_id = replica.get('destination_endpoint_id') - destination_environment = replica.get('destination_environment') - user_scripts = replica.get('user_scripts') - instances = replica.get('instances') - storage_mappings = replica.get('storage_mappings') + transfer = body["transfer"] + origin_endpoint_id = transfer.get('origin_endpoint_id') + source_environment = transfer.get('source_environment') + network_map = transfer.get('network_map') + destination_endpoint_id = transfer.get('destination_endpoint_id') + destination_environment = transfer.get('destination_environment') + user_scripts = transfer.get('user_scripts') + instances = transfer.get('instances') + storage_mappings = transfer.get('storage_mappings') mock_validate_instances_list_for_transfer.return_value = instances mock_normalize_user_scripts.return_value = user_scripts @@ -153,8 +154,8 @@ def test_validate_create_body( Exception, exception_raised, testutils.get_wrapped_function( - self.replicas._validate_create_body), - self.replicas, + self.transfers._validate_create_body), + self.transfers, ctxt, body ) @@ -162,8 +163,8 @@ def test_validate_create_body( mock_validate_network_map.assert_not_called() else: result = testutils.get_wrapped_function( - self.replicas._validate_create_body)( - self.replicas, + self.transfers._validate_create_body)( + self.transfers, ctxt, body, ) @@ -187,9 +188,9 @@ def test_validate_create_body( mock_validate_instances_list_for_transfer.assert_called_once_with( instances) - @mock.patch.object(replica_view, 'single') + @mock.patch.object(transfer_view, 'single') @mock.patch.object(api.API, 'create') - @mock.patch.object(replicas.ReplicaController, '_validate_create_body') + @mock.patch.object(transfers.TransferController, '_validate_create_body') def test_create( self, mock_validate_create_body, @@ -202,7 +203,7 @@ def test_create( mock_body = {} mock_validate_create_body.return_value = (mock.sentinel.value,) * 13 - result = self.replicas.create(mock_req, mock_body) + result = self.transfers.create(mock_req, mock_body) self.assertEqual( mock_single.return_value, @@ -210,7 +211,7 @@ def test_create( ) mock_context.can.assert_called_once_with( - "migration:replicas:create") + "migration:transfers:create") mock_validate_create_body.assert_called_once_with( mock_context, mock_body) mock_create.assert_called_once() @@ -228,7 +229,7 @@ def test_delete( self.assertRaises( exc.HTTPNoContent, - self.replicas.delete, + self.transfers.delete, mock_req, id ) @@ -248,15 +249,15 @@ def test_delete_not_found( self.assertRaises( exc.HTTPNotFound, - self.replicas.delete, + self.transfers.delete, mock_req, id ) - mock_context.can.assert_called_once_with("migration:replicas:delete") + mock_context.can.assert_called_once_with("migration:transfers:delete") mock_delete.assert_called_once_with(mock_context, id) - @ddt.file_data('data/replicas_update_storage_mappings.yml') + @ddt.file_data('data/transfers_update_storage_mappings.yml') def test_update_storage_mappings( self, config, @@ -267,11 +268,11 @@ def test_update_storage_mappings( new_storage_mappings = config['new_storage_mappings'] if logs_expected: - with self.assertLogs('coriolis.api.v1.replicas', level='INFO'): - result = self.replicas._update_storage_mappings( + with self.assertLogs('coriolis.api.v1.transfers', level='INFO'): + result = self.transfers._update_storage_mappings( original_storage_mappings, new_storage_mappings) else: - result = self.replicas._update_storage_mappings( + result = self.transfers._update_storage_mappings( original_storage_mappings, new_storage_mappings) self.assertEqual( @@ -296,7 +297,7 @@ def test_get_updated_user_scripts( "mock_global_scripts_2": "mock_value"}, 'instances': {"mock_instance_scripts": "mock_new_value"} } - result = self.replicas._get_updated_user_scripts( + result = self.transfers._get_updated_user_scripts( original_user_scripts, new_user_scripts) self.assertEqual( @@ -314,7 +315,7 @@ def test_get_updated_user_scripts_new_user_scripts_empty( } new_user_scripts = {} - result = self.replicas._get_updated_user_scripts( + result = self.transfers._get_updated_user_scripts( original_user_scripts, new_user_scripts) self.assertEqual( @@ -322,11 +323,13 @@ def test_get_updated_user_scripts_new_user_scripts_empty( result ) - @mock.patch.object(replicas.ReplicaController, '_get_updated_user_scripts') + @mock.patch.object(transfers.TransferController, + '_get_updated_user_scripts') @mock.patch.object(api_utils, 'validate_user_scripts') - @mock.patch.object(replicas.ReplicaController, '_update_storage_mappings') - @ddt.file_data('data/replicas_get_merged_replica_values.yml') - def test_get_merged_replica_values( + @mock.patch.object(transfers.TransferController, + '_update_storage_mappings') + @ddt.file_data('data/transfers_get_merged_transfer_values.yml') + def test_get_merged_transfer_values( self, mock_update_storage_mappings, mock_validate_user_scripts, @@ -334,10 +337,10 @@ def test_get_merged_replica_values( config, expected_result ): - replica = config['replica'] + transfer = config['transfer'] updated_values = config['updated_values'] - original_storage_mapping = replica.get('storage_mappings', {}) - replica_user_scripts = replica.get('user_scripts', {}) + original_storage_mapping = transfer.get('storage_mappings', {}) + transfer_user_scripts = transfer.get('user_scripts', {}) updated_user_scripts = updated_values.get('user_scripts', {}) new_storage_mappings = updated_values.get('storage_mappings', {}) expected_result['storage_mappings'] = \ @@ -349,8 +352,8 @@ def test_get_merged_replica_values( mock_validate_user_scripts.side_effect = ["mock_scripts", "mock_new_scripts"] - result = self.replicas._get_merged_replica_values( - replica, updated_values) + result = self.transfers._get_merged_transfer_values( + transfer, updated_values) self.assertEqual( expected_result, @@ -360,7 +363,8 @@ def test_get_merged_replica_values( mock_update_storage_mappings.assert_called_once_with( original_storage_mapping, new_storage_mappings) mock_validate_user_scripts.assert_has_calls( - [mock.call(replica_user_scripts), mock.call(updated_user_scripts)]) + [mock.call(transfer_user_scripts), + mock.call(updated_user_scripts)]) mock_get_updated_user_scripts.assert_called_once_with( "mock_scripts", "mock_new_scripts") @@ -370,14 +374,14 @@ def test_get_merged_replica_values( @mock.patch.object(api_utils, 'validate_network_map') @mock.patch.object(endpoints_api.API, 'validate_target_environment') @mock.patch.object(endpoints_api.API, 'validate_source_environment') - @mock.patch.object(replicas.ReplicaController, - '_get_merged_replica_values') - @mock.patch.object(api.API, 'get_replica') - @ddt.file_data('data/replicas_validate_update_body.yml') + @mock.patch.object(transfers.TransferController, + '_get_merged_transfer_values') + @mock.patch.object(api.API, 'get_transfer') + @ddt.file_data('data/transfers_validate_update_body.yml') def test_validate_update_body( self, - mock_get_replica, - mock_get_merged_replica_values, + mock_get_transfer, + mock_get_merged_transfer_values, mock_validate_source_environment, mock_validate_target_environment, mock_validate_network_map, @@ -388,17 +392,18 @@ def test_validate_update_body( expected_result ): body = config['body'] - replica = config['replica'] - replica_body = body['replica'] + transfer = config['transfer'] + transfer_body = body['transfer'] context = mock.sentinel.context id = mock.sentinel.id - mock_get_replica.return_value = replica - mock_get_merged_replica_values.return_value = replica_body - mock_normalize_user_scripts.return_value = replica_body['user_scripts'] + mock_get_transfer.return_value = transfer + mock_get_merged_transfer_values.return_value = transfer_body + mock_normalize_user_scripts.return_value = transfer_body[ + 'user_scripts'] result = testutils.get_wrapped_function( - self.replicas._validate_update_body)( - self.replicas, + self.transfers._validate_update_body)( + self.transfers, id, context, body @@ -409,29 +414,29 @@ def test_validate_update_body( result ) - mock_get_replica.assert_called_once_with(context, id) - mock_get_merged_replica_values.assert_called_once_with( - replica, replica_body) + mock_get_transfer.assert_called_once_with(context, id) + mock_get_merged_transfer_values.assert_called_once_with( + transfer, transfer_body) mock_validate_source_environment.assert_called_once_with( - context, replica['origin_endpoint_id'], - replica_body['source_environment']) + context, transfer['origin_endpoint_id'], + transfer_body['source_environment']) mock_validate_target_environment.assert_called_once_with( - context, replica['destination_endpoint_id'], - replica_body['destination_environment']) + context, transfer['destination_endpoint_id'], + transfer_body['destination_environment']) mock_validate_network_map.assert_called_once_with( - replica_body['network_map']) + transfer_body['network_map']) mock_validate_storage_mappings.assert_called_once_with( - replica_body['storage_mappings']) + transfer_body['storage_mappings']) mock_validate_user_scripts.assert_called_once_with( - replica_body['user_scripts']) + transfer_body['user_scripts']) mock_normalize_user_scripts.assert_called_once_with( - replica_body['user_scripts'], replica['instances']) + transfer_body['user_scripts'], transfer['instances']) - @mock.patch.object(api.API, 'get_replica') - @ddt.file_data('data/replicas_validate_update_body_raises.yml') + @mock.patch.object(api.API, 'get_transfer') + @ddt.file_data('data/transfers_validate_update_body_raises.yml') def test_validate_update_body_raises( self, - mock_get_replica, + mock_get_transfer, body, ): context = mock.sentinel.context @@ -440,18 +445,18 @@ def test_validate_update_body_raises( self.assertRaises( exc.HTTPBadRequest, testutils.get_wrapped_function( - self.replicas._validate_update_body), - self.replicas, + self.transfers._validate_update_body), + self.transfers, id, context, body ) - mock_get_replica.assert_called_once_with(context, id) + mock_get_transfer.assert_called_once_with(context, id) - @mock.patch.object(replica_tasks_execution_view, 'single') + @mock.patch.object(transfer_tasks_execution_view, 'single') @mock.patch.object(api.API, 'update') - @mock.patch.object(replicas.ReplicaController, '_validate_update_body') + @mock.patch.object(transfers.TransferController, '_validate_update_body') def test_update( self, mock_validate_update_body, @@ -464,7 +469,7 @@ def test_update( id = mock.sentinel.id body = mock.sentinel.body - result = self.replicas.update(mock_req, id, body) + result = self.transfers.update(mock_req, id, body) self.assertEqual( mock_single.return_value, @@ -472,7 +477,7 @@ def test_update( ) mock_context.can.assert_called_once_with( - "migration:replicas:update") + "migration:transfers:update") mock_validate_update_body.assert_called_once_with( id, mock_context, body) mock_update.assert_called_once_with( @@ -481,7 +486,7 @@ def test_update( mock_single.assert_called_once_with(mock_update.return_value) @mock.patch.object(api.API, 'update') - @mock.patch.object(replicas.ReplicaController, '_validate_update_body') + @mock.patch.object(transfers.TransferController, '_validate_update_body') def test_update_not_found( self, mock_validate_update_body, @@ -496,14 +501,14 @@ def test_update_not_found( self.assertRaises( exc.HTTPNotFound, - self.replicas.update, + self.transfers.update, mock_req, id, body ) mock_context.can.assert_called_once_with( - "migration:replicas:update") + "migration:transfers:update") mock_validate_update_body.assert_called_once_with( id, mock_context, body) mock_update.assert_called_once_with( @@ -511,7 +516,7 @@ def test_update_not_found( mock_validate_update_body.return_value) @mock.patch.object(api.API, 'update') - @mock.patch.object(replicas.ReplicaController, '_validate_update_body') + @mock.patch.object(transfers.TransferController, '_validate_update_body') def test_update_not_invalid_parameter_value( self, mock_validate_update_body, @@ -526,14 +531,14 @@ def test_update_not_invalid_parameter_value( self.assertRaises( exc.HTTPNotFound, - self.replicas.update, + self.transfers.update, mock_req, id, body ) mock_context.can.assert_called_once_with( - "migration:replicas:update") + "migration:transfers:update") mock_validate_update_body.assert_called_once_with( id, mock_context, body) mock_update.assert_called_once_with( diff --git a/coriolis/tests/api/v1/views/__init__py b/coriolis/tests/api/v1/views/__init__py deleted file mode 100644 index e69de29b..00000000 diff --git a/coriolis/tests/api/v1/views/test_migration_view.py b/coriolis/tests/api/v1/views/test_migration_view.py deleted file mode 100644 index a803aac4..00000000 --- a/coriolis/tests/api/v1/views/test_migration_view.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright 2023 Cloudbase Solutions Srl -# All Rights Reserved. - -from unittest import mock - -from coriolis.api.v1.views import migration_view -from coriolis.api.v1.views import replica_tasks_execution_view as view -from coriolis.api.v1.views import utils as view_utils -from coriolis.tests import test_base - - -class MigrationViewTestCase(test_base.CoriolisApiViewsTestCase): - """Test suite for the Coriolis api v1 views.""" - - @mock.patch.object(view, 'format_replica_tasks_execution') - @mock.patch.object(view_utils, 'format_opt') - def test_format_migration( - self, - mock_format_opt, - mock_format_replica_tasks_execution - ): - mock_execution = {'tasks': 'mock_id1'} - mock_format_opt.return_value = { - "executions": [mock_execution], - 'tasks': 'mock_id2', - 'mock_key': 'mock_value' - } - mock_format_replica_tasks_execution.return_value = mock_execution - - expected_result = { - 'tasks': 'mock_id1', - 'mock_key': 'mock_value' - } - - endpoint = mock.sentinel.endpoint - keys = mock.sentinel.keys - result = migration_view._format_migration(endpoint, keys) - - mock_format_replica_tasks_execution.assert_called_once_with( - mock_execution, keys - ) - mock_format_opt.assert_called_once_with(endpoint, keys) - - self.assertEqual( - expected_result, - result - ) - - @mock.patch.object(view_utils, 'format_opt') - def test_format_migration_no_tasks( - self, - mock_format_opt, - ): - mock_format_opt.return_value = { - 'mock_key': 'mock_value' - } - - endpoint = mock.sentinel.endpoint - keys = mock.sentinel.keys - result = migration_view._format_migration(endpoint, keys) - - mock_format_opt.assert_called_once_with(endpoint, keys) - - self.assertEqual( - mock_format_opt.return_value, - result - ) - - @mock.patch.object(view_utils, 'format_opt') - def test_format_migration_migration_dict_has_tasks( - self, - mock_format_opt, - ): - mock_format_opt.return_value = { - 'tasks': 'mock_id1', - 'mock_key': 'mock_value' - } - - endpoint = mock.sentinel.endpoint - keys = mock.sentinel.keys - result = migration_view._format_migration(endpoint, keys) - - mock_format_opt.assert_called_once_with(endpoint, keys) - - self.assertEqual( - mock_format_opt.return_value, - result - ) - - def test_single(self): - fun = getattr(migration_view, 'single') - self._single_view_test(fun, 'migration') - - def test_collection(self): - fun = getattr(migration_view, 'collection') - self._collection_view_test(fun, 'migrations') diff --git a/coriolis/tests/api/v1/views/test_replica_view.py b/coriolis/tests/api/v1/views/test_replica_view.py deleted file mode 100644 index ec5ffe3e..00000000 --- a/coriolis/tests/api/v1/views/test_replica_view.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2023 Cloudbase Solutions Srl -# All Rights Reserved. - -from unittest import mock - -from coriolis.api.v1.views import replica_tasks_execution_view as view -from coriolis.api.v1.views import replica_view -from coriolis.api.v1.views import utils as view_utils -from coriolis.tests import test_base - - -class ReplicaViewTestCase(test_base.CoriolisApiViewsTestCase): - """Test suite for the Coriolis api v1 views.""" - - def setUp(self): - super(ReplicaViewTestCase, self).setUp() - self._format_fun = replica_view._format_replica - - @mock.patch.object(view, 'format_replica_tasks_execution') - @mock.patch.object(view_utils, 'format_opt') - def test_format_replica(self, mock_format_opt, - mock_format_replica_tasks_execution): - mock_format_opt.return_value = { - "executions": [{'id': 'mock_id1'}, {'id': 'mock_id2'}], - "mock_key": "mock_value" - } - - expected_calls = [ - mock.call.mock_format_replica_tasks_execution( - {'id': 'mock_id1'}), - mock.call.mock_format_replica_tasks_execution( - {'id': 'mock_id2'})] - expected_result = { - "executions": - [mock_format_replica_tasks_execution.return_value, - mock_format_replica_tasks_execution.return_value], - 'mock_key': 'mock_value' - } - - replica = mock.sentinel.replica - keys = mock.sentinel.keys - result = replica_view._format_replica(replica, keys) - - mock_format_opt.assert_called_once_with(replica, keys) - mock_format_replica_tasks_execution.assert_has_calls( - expected_calls - ) - self.assertEqual( - expected_result, - result - ) - - @mock.patch.object(view, 'format_replica_tasks_execution') - @mock.patch.object(view_utils, 'format_opt') - def test_format_replica_no_keys(self, mock_format_opt, - mock_format_replica_tasks_execution): - mock_format_opt.return_value = { - "executions": [{'id': 'mock_id1'}, {'id': 'mock_id2'}], - } - - expected_calls = [ - mock.call.mock_format_replica_tasks_execution( - {'id': 'mock_id1'}), - mock.call.mock_format_replica_tasks_execution( - {'id': 'mock_id2'})] - expected_result = { - "executions": - [mock_format_replica_tasks_execution.return_value, - mock_format_replica_tasks_execution.return_value], - } - - replica = mock.sentinel.replica - keys = mock.sentinel.keys - result = replica_view._format_replica(replica, keys) - - mock_format_opt.assert_called_once_with(replica, keys) - mock_format_replica_tasks_execution.assert_has_calls( - expected_calls - ) - self.assertEqual( - expected_result, - result - ) - - @mock.patch.object(view_utils, 'format_opt') - def test_format_replica_no_executions(self, mock_format_opt): - mock_format_opt.return_value = { - "mock_key": "mock_value" - } - - expected_result = { - 'executions': [], - 'mock_key': 'mock_value' - } - - replica = mock.sentinel.replica - keys = mock.sentinel.keys - result = replica_view._format_replica(replica, keys) - - mock_format_opt.assert_called_once_with(replica, keys) - self.assertEqual( - expected_result, - result - ) - - def test_single(self): - fun = getattr(replica_view, 'single') - self._single_view_test(fun, 'replica') - - def test_collection(self): - fun = getattr(replica_view, 'collection') - self._collection_view_test(fun, 'replicas') diff --git a/coriolis/tests/api/v1/views/test_replica_schedule_view.py b/coriolis/tests/api/v1/views/test_transfer_schedule_view.py similarity index 57% rename from coriolis/tests/api/v1/views/test_replica_schedule_view.py rename to coriolis/tests/api/v1/views/test_transfer_schedule_view.py index 3baffaad..6f3e2944 100644 --- a/coriolis/tests/api/v1/views/test_replica_schedule_view.py +++ b/coriolis/tests/api/v1/views/test_transfer_schedule_view.py @@ -1,17 +1,17 @@ # Copyright 2023 Cloudbase Solutions Srl # All Rights Reserved. -from coriolis.api.v1.views import replica_schedule_view +from coriolis.api.v1.views import transfer_schedule_view from coriolis.tests import test_base -class ReplicaViewTestCase(test_base.CoriolisApiViewsTestCase): +class TransferViewTestCase(test_base.CoriolisApiViewsTestCase): """Test suite for the Coriolis api v1 views.""" def test_single(self): - fun = getattr(replica_schedule_view, 'single') + fun = getattr(transfer_schedule_view, 'single') self._single_view_test(fun, 'schedule') def test_collection(self): - fun = getattr(replica_schedule_view, 'collection') + fun = getattr(transfer_schedule_view, 'collection') self._collection_view_test(fun, 'schedules') diff --git a/coriolis/tests/api/v1/views/test_replica_task_execution_view.py b/coriolis/tests/api/v1/views/test_transfer_task_execution_view.py similarity index 88% rename from coriolis/tests/api/v1/views/test_replica_task_execution_view.py rename to coriolis/tests/api/v1/views/test_transfer_task_execution_view.py index 9e9dbfd9..4cf58ff5 100644 --- a/coriolis/tests/api/v1/views/test_replica_task_execution_view.py +++ b/coriolis/tests/api/v1/views/test_transfer_task_execution_view.py @@ -3,18 +3,18 @@ from unittest import mock -from coriolis.api.v1.views import replica_tasks_execution_view as view +from coriolis.api.v1.views import transfer_tasks_execution_view as view from coriolis.api.v1.views import utils as view_utils from coriolis import constants from coriolis.tests import test_base -class ReplicaTaskExecutionViewTestCase(test_base.CoriolisApiViewsTestCase): +class TransferTaskExecutionViewTestCase(test_base.CoriolisApiViewsTestCase): """Test suite for the Coriolis api v1 views.""" @mock.patch.object(view, '_sort_tasks') @mock.patch.object(view_utils, 'format_opt') - def test_format_replica_tasks_execution( + def test_format_transfer_tasks_execution( self, mock_format_opt, mock_sort_tasks @@ -27,7 +27,7 @@ def test_format_replica_tasks_execution( mock_sort_tasks.return_value = mock_execution keys = mock.sentinel.keys - result = view.format_replica_tasks_execution(mock_execution, keys) + result = view.format_transfer_tasks_execution(mock_execution, keys) mock_sort_tasks.assert_called_once_with(mock_tasks) mock_format_opt.assert_called_once_with(mock_execution["tasks"], keys) @@ -38,7 +38,7 @@ def test_format_replica_tasks_execution( @mock.patch.object(view, '_sort_tasks') @mock.patch.object(view_utils, 'format_opt') - def test_format_replica_tasks_execution_no_tasks( + def test_format_transfer_tasks_execution_no_tasks( self, mock_format_opt, mock_sort_tasks @@ -48,7 +48,7 @@ def test_format_replica_tasks_execution_no_tasks( } keys = mock.sentinel.keys - result = view.format_replica_tasks_execution(mock_execution, keys) + result = view.format_transfer_tasks_execution(mock_execution, keys) mock_sort_tasks.assert_not_called() mock_format_opt.assert_called_once_with(mock_execution, keys) diff --git a/coriolis/tests/api/v1/views/test_transfer_view.py b/coriolis/tests/api/v1/views/test_transfer_view.py new file mode 100644 index 00000000..cbcdc957 --- /dev/null +++ b/coriolis/tests/api/v1/views/test_transfer_view.py @@ -0,0 +1,112 @@ +# Copyright 2023 Cloudbase Solutions Srl +# All Rights Reserved. + +from unittest import mock + +from coriolis.api.v1.views import transfer_tasks_execution_view as view +from coriolis.api.v1.views import transfer_view +from coriolis.api.v1.views import utils as view_utils +from coriolis.tests import test_base + + +class TransferViewTestCase(test_base.CoriolisApiViewsTestCase): + """Test suite for the Coriolis api v1 views.""" + + def setUp(self): + super(TransferViewTestCase, self).setUp() + self._format_fun = transfer_view._format_transfer + + @mock.patch.object(view, 'format_transfer_tasks_execution') + @mock.patch.object(view_utils, 'format_opt') + def test_format_transfer(self, mock_format_opt, + mock_format_transfer_tasks_execution): + mock_format_opt.return_value = { + "executions": [{'id': 'mock_id1'}, {'id': 'mock_id2'}], + "mock_key": "mock_value" + } + + expected_calls = [ + mock.call.mock_format_transfer_tasks_execution( + {'id': 'mock_id1'}), + mock.call.mock_format_transfer_tasks_execution( + {'id': 'mock_id2'})] + expected_result = { + "executions": + [mock_format_transfer_tasks_execution.return_value, + mock_format_transfer_tasks_execution.return_value], + 'mock_key': 'mock_value' + } + + transfer = mock.sentinel.transfer + keys = mock.sentinel.keys + result = transfer_view._format_transfer(transfer, keys) + + mock_format_opt.assert_called_once_with(transfer, keys) + mock_format_transfer_tasks_execution.assert_has_calls( + expected_calls + ) + self.assertEqual( + expected_result, + result + ) + + @mock.patch.object(view, 'format_transfer_tasks_execution') + @mock.patch.object(view_utils, 'format_opt') + def test_format_transfer_no_keys(self, mock_format_opt, + mock_format_transfer_tasks_execution): + mock_format_opt.return_value = { + "executions": [{'id': 'mock_id1'}, {'id': 'mock_id2'}], + } + + expected_calls = [ + mock.call.mock_format_transfer_tasks_execution( + {'id': 'mock_id1'}), + mock.call.mock_format_transfer_tasks_execution( + {'id': 'mock_id2'})] + expected_result = { + "executions": + [mock_format_transfer_tasks_execution.return_value, + mock_format_transfer_tasks_execution.return_value], + } + + transfer = mock.sentinel.transfer + keys = mock.sentinel.keys + result = transfer_view._format_transfer(transfer, keys) + + mock_format_opt.assert_called_once_with(transfer, keys) + mock_format_transfer_tasks_execution.assert_has_calls( + expected_calls + ) + self.assertEqual( + expected_result, + result + ) + + @mock.patch.object(view_utils, 'format_opt') + def test_format_transfer_no_executions(self, mock_format_opt): + mock_format_opt.return_value = { + "mock_key": "mock_value" + } + + expected_result = { + 'executions': [], + 'mock_key': 'mock_value' + } + + transfer = mock.sentinel.transfer + keys = mock.sentinel.keys + result = transfer_view._format_transfer(transfer, keys) + + mock_format_opt.assert_called_once_with(transfer, keys) + self.assertEqual( + expected_result, + result + ) + + def test_single(self): + fun = getattr(transfer_view, 'single') + self._single_view_test(fun, 'transfer') + + def test_collection(self): + fun = getattr(transfer_view, 'collection') + self._collection_view_test(fun, 'transfers') diff --git a/coriolis/tests/cmd/test_replica_cron.py b/coriolis/tests/cmd/test_replica_cron.py index 131ee075..03097e27 100644 --- a/coriolis/tests/cmd/test_replica_cron.py +++ b/coriolis/tests/cmd/test_replica_cron.py @@ -4,7 +4,7 @@ import sys from unittest import mock -from coriolis.cmd import replica_cron +from coriolis.cmd import transfer_cron from coriolis import constants from coriolis import service from coriolis.tests import test_base @@ -12,33 +12,33 @@ from coriolis import utils -class ReplicaCronTestCase(test_base.CoriolisBaseTestCase): +class TransferCronTestCase(test_base.CoriolisBaseTestCase): """Test suite for the Coriolis transfer_cron CMD""" @mock.patch.object(service, 'service') @mock.patch.object(service, 'MessagingService') - @mock.patch.object(rpc_server, 'ReplicaCronServerEndpoint') + @mock.patch.object(rpc_server, 'TransferCronServerEndpoint') @mock.patch.object(utils, 'setup_logging') - @mock.patch('coriolis.cmd.replica_cron.CONF') + @mock.patch('coriolis.cmd.transfer_cron.CONF') @mock.patch.object(sys, 'argv') def test_main( self, mock_argv, mock_conf, mock_setup_logging, - mock_ReplicaCronServerEndpoint, + mock_TransferCronServerEndpoint, mock_MessagingService, mock_service ): - replica_cron.main() + transfer_cron.main() mock_conf.assert_called_once_with( mock_argv[1:], project='coriolis', version="1.0.0") mock_setup_logging.assert_called_once() - mock_ReplicaCronServerEndpoint.assert_called_once() + mock_TransferCronServerEndpoint.assert_called_once() mock_MessagingService.assert_called_once_with( constants.TRANSFER_CRON_MAIN_MESSAGING_TOPIC, - [mock_ReplicaCronServerEndpoint.return_value], + [mock_TransferCronServerEndpoint.return_value], rpc_server.VERSION, worker_count=1) mock_service.launch.assert_called_once_with( diff --git a/coriolis/tests/minion_manager/rpc/test_client.py b/coriolis/tests/minion_manager/rpc/test_client.py index 6ed7e3b3..ac7c9159 100644 --- a/coriolis/tests/minion_manager/rpc/test_client.py +++ b/coriolis/tests/minion_manager/rpc/test_client.py @@ -122,24 +122,24 @@ def test_validate_minion_pool_selections_for_action(self): self.client.validate_minion_pool_selections_for_action, args ) - def test_allocate_minion_machines_for_replica(self): - args = {"replica": "test_replica"} + def test_allocate_minion_machines_for_transfer(self): + args = {"transfer": "test_transfer"} self._test( self.client.allocate_minion_machines_for_transfer, args, rpc_op='_cast', - server_fun_name='allocate_minion_machines_for_replica' + server_fun_name='allocate_minion_machines_for_transfer' ) - def test_allocate_minion_machines_for_migration(self): + def test_allocate_minion_machines_for_deployment(self): args = { - "migration": "test_migration", + "deployment": "test_deployment", "include_transfer_minions": True, "include_osmorphing_minions": True } self._test( self.client.allocate_minion_machines_for_deployment, args, rpc_op='_cast', - server_fun_name='allocate_minion_machines_for_migration' + server_fun_name='allocate_minion_machines_for_deployment' ) def test_deallocate_minion_machine(self): diff --git a/coriolis/tests/transfer_cron/rpc/test_server.py b/coriolis/tests/transfer_cron/rpc/test_server.py index a0270b46..9481f6e5 100644 --- a/coriolis/tests/transfer_cron/rpc/test_server.py +++ b/coriolis/tests/transfer_cron/rpc/test_server.py @@ -13,10 +13,10 @@ from coriolis.transfer_cron.rpc import server -class TriggerReplicaTestCase(test_base.CoriolisBaseTestCase): - """Test suite for the Coriolis _trigger_replica function.""" +class TriggerTransferTestCase(test_base.CoriolisBaseTestCase): + """Test suite for the Coriolis _trigger_transfer function.""" - def test__trigger_replica(self): + def test__trigger_transfer(self): mock_conductor_client = mock.MagicMock() mock_conductor_client.execute_transfer_tasks.return_value = { @@ -24,7 +24,7 @@ def test__trigger_replica(self): 'action_id': mock.sentinel.action_id } - result = server._trigger_replica( + result = server._trigger_transfer( mock.sentinel.ctxt, mock_conductor_client, mock.sentinel.transfer_id, False) @@ -33,10 +33,10 @@ def test__trigger_replica(self): mock.sentinel.ctxt, mock.sentinel.transfer_id, False) self.assertEqual( - result, 'Execution %s for Replica %s' % ( + result, 'Execution %s for Transfer %s' % ( mock.sentinel.id, mock.sentinel.action_id)) - def test__trigger_transfer_invalid_replica_state(self): + def test__trigger_transfer_invalid_transfer_state(self): mock_conductor_client = mock.MagicMock() mock_conductor_client.execute_transfer_tasks.side_effect = ( @@ -44,20 +44,20 @@ def test__trigger_transfer_invalid_replica_state(self): with self.assertLogs('coriolis.transfer_cron.rpc.server', level=logging.INFO): - server._trigger_replica( + server._trigger_transfer( mock.sentinel.ctxt, mock_conductor_client, mock.sentinel.action_id, False) @ddt.ddt -class ReplicaCronServerEndpointTestCase(test_base.CoriolisBaseTestCase): - """Test suite for the Coriolis ReplicaCronServerEndpoint class.""" +class TransferCronServerEndpointTestCase(test_base.CoriolisBaseTestCase): + """Test suite for the Coriolis TransferCronServerEndpoint class.""" - @mock.patch.object(server.ReplicaCronServerEndpoint, '_init_cron') + @mock.patch.object(server.TransferCronServerEndpoint, '_init_cron') def setUp(self, _): - super(ReplicaCronServerEndpointTestCase, self).setUp() - self.server = server.ReplicaCronServerEndpoint() + super(TransferCronServerEndpointTestCase, self).setUp() + self.server = server.TransferCronServerEndpoint() @ddt.data( { @@ -75,16 +75,16 @@ def test__deserialize_schedule(self, data): result = self.server._deserialize_schedule(data['input']) self.assertEqual(result, data['expected']) - @mock.patch.object(server.ReplicaCronServerEndpoint, + @mock.patch.object(server.TransferCronServerEndpoint, '_deserialize_schedule') - @mock.patch.object(server, '_trigger_replica') + @mock.patch.object(server, '_trigger_transfer') @mock.patch.object(server.timeutils, 'utcnow') @mock.patch.object(server.context, 'get_admin_context') @mock.patch.object(server.cron, 'CronJob') @mock.patch.object(server.cron.Cron, 'register') def test__register_schedule(self, mock_register, mock_cron_job, mock_get_admin_context, mock_utcnow, - mock_trigger_replica, + mock_trigger_transfer, mock_deserialize_schedule): mock_get_admin_context.return_value = 'test_admin_context' mock_utcnow.return_value = datetime.datetime(2022, 1, 1) @@ -96,7 +96,7 @@ def test__register_schedule(self, mock_register, mock_cron_job, } test_schedule = { 'trust_id': 'test_schedule_trust_id', - 'replica_id': 'test_schedule_replica_id', + 'transfer_id': 'test_schedule_transfer_id', 'shutdown_instance': 'test_schedule_shutdown_instance' } @@ -108,12 +108,12 @@ def test__register_schedule(self, mock_register, mock_cron_job, mock_cron_job.assert_called_once_with( 'test_id', 'Scheduled job for test_id', 'test_schedule', True, datetime.datetime(2022, 12, 31), None, None, - mock_trigger_replica, 'test_admin_context', - self.server._rpc_client, 'test_schedule_replica_id', + mock_trigger_transfer, 'test_admin_context', + self.server._rpc_client, 'test_schedule_transfer_id', 'test_schedule_shutdown_instance') mock_register.assert_called_once() - @mock.patch.object(server.ReplicaCronServerEndpoint, + @mock.patch.object(server.TransferCronServerEndpoint, '_deserialize_schedule') @mock.patch.object(server.timeutils, 'utcnow') def test__register_schedule_expired(self, mock_utcnow, @@ -127,7 +127,7 @@ def test__register_schedule_expired(self, mock_utcnow, } test_schedule = { 'trust_id': 'test_schedule_trust_id', - 'replica_id': 'test_schedule_replica_id', + 'transfer_id': 'test_schedule_transfer_id', 'shutdown_instance': 'test_schedule_shutdown_instance' } @@ -138,8 +138,8 @@ def test__register_schedule_expired(self, mock_utcnow, mock_deserialize_schedule.assert_called_once_with(test_schedule) @mock.patch.object(server.timeutils, 'utcnow') - @mock.patch.object(server.ReplicaCronServerEndpoint, '_get_all_schedules') - @mock.patch.object(server.ReplicaCronServerEndpoint, '_register_schedule') + @mock.patch.object(server.TransferCronServerEndpoint, '_get_all_schedules') + @mock.patch.object(server.TransferCronServerEndpoint, '_register_schedule') @mock.patch.object(server.cron.Cron, 'start') def test__init_cron(self, mock_cron_start, mock_register_schedule, mock_get_all_schedules, mock_utcnow): @@ -160,8 +160,8 @@ def test__init_cron(self, mock_cron_start, mock_register_schedule, ]) mock_cron_start.assert_called_once() - @mock.patch.object(server.ReplicaCronServerEndpoint, '_get_all_schedules') - @mock.patch.object(server.ReplicaCronServerEndpoint, '_register_schedule') + @mock.patch.object(server.TransferCronServerEndpoint, '_get_all_schedules') + @mock.patch.object(server.TransferCronServerEndpoint, '_register_schedule') def test__init_cron_with_exception(self, mock_register_schedule, mock_get_all_schedules): mock_get_all_schedules.return_value = [ @@ -189,7 +189,7 @@ def test__get_all_schedules(self, mock_get_transfer_schedules): self.assertEqual(result, mock_get_transfer_schedules.return_value) - @mock.patch.object(server.ReplicaCronServerEndpoint, '_register_schedule') + @mock.patch.object(server.TransferCronServerEndpoint, '_register_schedule') @mock.patch.object(server.timeutils, 'utcnow') def test_register(self, mock_utcnow, mock_register_schedule): mock_utcnow.return_value = datetime.datetime(2022, 1, 1) diff --git a/coriolis/replica_tasks_executions/__init__.py b/coriolis/tests/transfer_tasks_executions/__init__.py similarity index 100% rename from coriolis/replica_tasks_executions/__init__.py rename to coriolis/tests/transfer_tasks_executions/__init__.py diff --git a/coriolis/tests/replica_tasks_executions/test_api.py b/coriolis/tests/transfer_tasks_executions/test_api.py similarity index 95% rename from coriolis/tests/replica_tasks_executions/test_api.py rename to coriolis/tests/transfer_tasks_executions/test_api.py index 1a216295..614dad7c 100644 --- a/coriolis/tests/replica_tasks_executions/test_api.py +++ b/coriolis/tests/transfer_tasks_executions/test_api.py @@ -3,8 +3,8 @@ from unittest import mock -from coriolis.replica_tasks_executions import api as replicas_module from coriolis.tests import test_base +from coriolis.transfer_tasks_executions import api as transfers_module class APITestCase(test_base.CoriolisBaseTestCase): @@ -12,7 +12,7 @@ class APITestCase(test_base.CoriolisBaseTestCase): def setUp(self): super(APITestCase, self).setUp() - self.api = replicas_module.API() + self.api = transfers_module.API() self.rpc_client = mock.MagicMock() self.api._rpc_client = self.rpc_client self.ctxt = mock.sentinel.ctxt diff --git a/coriolis/replicas/__init__.py b/coriolis/tests/transfers/__init__.py similarity index 100% rename from coriolis/replicas/__init__.py rename to coriolis/tests/transfers/__init__.py diff --git a/coriolis/tests/replicas/test_api.py b/coriolis/tests/transfers/test_api.py similarity index 92% rename from coriolis/tests/replicas/test_api.py rename to coriolis/tests/transfers/test_api.py index 69d1fede..8e415dea 100644 --- a/coriolis/tests/replicas/test_api.py +++ b/coriolis/tests/transfers/test_api.py @@ -3,8 +3,8 @@ from unittest import mock -from coriolis.replicas import api as replicas_module from coriolis.tests import test_base +from coriolis.transfers import api as transfers_module class APITestCase(test_base.CoriolisBaseTestCase): @@ -12,7 +12,7 @@ class APITestCase(test_base.CoriolisBaseTestCase): def setUp(self): super(APITestCase, self).setUp() - self.api = replicas_module.API() + self.api = transfers_module.API() self.rpc_client = mock.MagicMock() self.api._rpc_client = self.rpc_client self.ctxt = mock.sentinel.ctxt @@ -64,16 +64,16 @@ def test_delete(self): self.rpc_client.delete_transfer.assert_called_once_with( self.ctxt, self.transfer_id) - def test_get_replicas(self): - result = self.api.get_replicas( + def test_get_transfers(self): + result = self.api.get_transfers( self.ctxt, include_tasks_executions=False, include_task_info=False) self.rpc_client.get_transfers.assert_called_once_with( self.ctxt, False, include_task_info=False) self.assertEqual(result, self.rpc_client.get_transfers.return_value) - def test_get_replica(self): - result = self.api.get_replica(self.ctxt, self.transfer_id) + def test_get_transfer(self): + result = self.api.get_transfer(self.ctxt, self.transfer_id) self.rpc_client.get_transfer.assert_called_once_with( self.ctxt, self.transfer_id, include_task_info=False) diff --git a/coriolis/transfer_cron/api.py b/coriolis/transfer_cron/api.py index 6fc03730..101c0eb4 100644 --- a/coriolis/transfer_cron/api.py +++ b/coriolis/transfer_cron/api.py @@ -8,24 +8,24 @@ class API(object): def __init__(self): self._rpc_client = rpc_client.ConductorClient() - def create(self, ctxt, replica_id, schedule, enabled, + def create(self, ctxt, transfer_id, schedule, enabled, exp_date, shutdown_instance): return self._rpc_client.create_transfer_schedule( - ctxt, replica_id, schedule, enabled, exp_date, + ctxt, transfer_id, schedule, enabled, exp_date, shutdown_instance) - def get_schedules(self, ctxt, replica_id, expired=True): + def get_schedules(self, ctxt, transfer_id, expired=True): return self._rpc_client.get_transfer_schedules( - ctxt, replica_id, expired=expired) + ctxt, transfer_id, expired=expired) - def get_schedule(self, ctxt, replica_id, schedule_id, expired=True): + def get_schedule(self, ctxt, transfer_id, schedule_id, expired=True): return self._rpc_client.get_transfer_schedule( - ctxt, replica_id, schedule_id, expired=expired) + ctxt, transfer_id, schedule_id, expired=expired) - def update(self, ctxt, replica_id, schedule_id, update_values): + def update(self, ctxt, transfer_id, schedule_id, update_values): return self._rpc_client.update_transfer_schedule( - ctxt, replica_id, schedule_id, update_values) + ctxt, transfer_id, schedule_id, update_values) - def delete(self, ctxt, replica_id, schedule_id): + def delete(self, ctxt, transfer_id, schedule_id): self._rpc_client.delete_transfer_schedule( - ctxt, replica_id, schedule_id) + ctxt, transfer_id, schedule_id) diff --git a/coriolis/transfer_cron/rpc/server.py b/coriolis/transfer_cron/rpc/server.py index 9b13a6ae..7ea9aab1 100644 --- a/coriolis/transfer_cron/rpc/server.py +++ b/coriolis/transfer_cron/rpc/server.py @@ -17,11 +17,11 @@ VERSION = "1.0" -def _trigger_replica(ctxt, conductor_client, replica_id, shutdown_instance): +def _trigger_transfer(ctxt, conductor_client, transfer_id, shutdown_instance): try: execution = conductor_client.execute_transfer_tasks( - ctxt, replica_id, shutdown_instance) - result_msg = 'Execution %s for Replica %s' % ( + ctxt, transfer_id, shutdown_instance) + result_msg = 'Execution %s for Transfer %s' % ( execution.get('id'), execution.get('action_id')) return result_msg except (exception.InvalidTransferState, @@ -29,7 +29,7 @@ def _trigger_replica(ctxt, conductor_client, replica_id, shutdown_instance): LOG.info("A replica or migration already running") -class ReplicaCronServerEndpoint(object): +class TransferCronServerEndpoint(object): def __init__(self): self._rpc_client = rpc_client.ConductorClient() @@ -61,8 +61,8 @@ def _register_schedule(self, schedule, date=None): job = cron.CronJob( sched["id"], description, sched["schedule"], sched["enabled"], sched["expiration_date"], - None, None, _trigger_replica, trust_ctxt, - self._rpc_client, schedule["replica_id"], + None, None, _trigger_transfer, trust_ctxt, + self._rpc_client, schedule["transfer_id"], schedule["shutdown_instance"]) self._cron.register(job) diff --git a/coriolis/tests/replica_tasks_executions/__init__.py b/coriolis/transfer_tasks_executions/__init__.py similarity index 100% rename from coriolis/tests/replica_tasks_executions/__init__.py rename to coriolis/transfer_tasks_executions/__init__.py diff --git a/coriolis/transfer_tasks_executions/api.py b/coriolis/transfer_tasks_executions/api.py new file mode 100644 index 00000000..ab293e03 --- /dev/null +++ b/coriolis/transfer_tasks_executions/api.py @@ -0,0 +1,29 @@ +# Copyright 2016 Cloudbase Solutions Srl +# All Rights Reserved. + +from coriolis.conductor.rpc import client as rpc_client + + +class API(object): + def __init__(self): + self._rpc_client = rpc_client.ConductorClient() + + def create(self, ctxt, transfer_id, shutdown_instances): + return self._rpc_client.execute_transfer_tasks( + ctxt, transfer_id, shutdown_instances) + + def delete(self, ctxt, transfer_id, execution_id): + self._rpc_client.delete_transfer_tasks_execution( + ctxt, transfer_id, execution_id) + + def cancel(self, ctxt, transfer_id, execution_id, force): + self._rpc_client.cancel_transfer_tasks_execution( + ctxt, transfer_id, execution_id, force) + + def get_executions(self, ctxt, transfer_id, include_tasks=False): + return self._rpc_client.get_transfer_tasks_executions( + ctxt, transfer_id, include_tasks) + + def get_execution(self, ctxt, transfer_id, execution_id): + return self._rpc_client.get_transfer_tasks_execution( + ctxt, transfer_id, execution_id) diff --git a/coriolis/tests/replicas/__init__.py b/coriolis/transfers/__init__.py similarity index 100% rename from coriolis/tests/replicas/__init__.py rename to coriolis/transfers/__init__.py diff --git a/coriolis/replicas/api.py b/coriolis/transfers/api.py similarity index 63% rename from coriolis/replicas/api.py rename to coriolis/transfers/api.py index f6642202..119e73a6 100644 --- a/coriolis/replicas/api.py +++ b/coriolis/transfers/api.py @@ -8,36 +8,36 @@ class API(object): def __init__(self): self._rpc_client = rpc_client.ConductorClient() - def create(self, ctxt, replica_scenario, + def create(self, ctxt, transfer_scenario, origin_endpoint_id, destination_endpoint_id, origin_minion_pool_id, destination_minion_pool_id, instance_osmorphing_minion_pool_mappings, source_environment, destination_environment, instances, network_map, storage_mappings, notes=None, user_scripts=None): return self._rpc_client.create_instances_transfer( - ctxt, replica_scenario, + ctxt, transfer_scenario, origin_endpoint_id, destination_endpoint_id, origin_minion_pool_id, destination_minion_pool_id, instance_osmorphing_minion_pool_mappings, source_environment, destination_environment, instances, network_map, storage_mappings, notes, user_scripts) - def update(self, ctxt, replica_id, updated_properties): + def update(self, ctxt, transfer_id, updated_properties): return self._rpc_client.update_transfer( - ctxt, replica_id, updated_properties) + ctxt, transfer_id, updated_properties) - def delete(self, ctxt, replica_id): - self._rpc_client.delete_transfer(ctxt, replica_id) + def delete(self, ctxt, transfer_id): + self._rpc_client.delete_transfer(ctxt, transfer_id) - def get_replicas(self, ctxt, include_tasks_executions=False, - include_task_info=False): + def get_transfers(self, ctxt, include_tasks_executions=False, + include_task_info=False): return self._rpc_client.get_transfers( ctxt, include_tasks_executions, include_task_info=include_task_info) - def get_replica(self, ctxt, replica_id, include_task_info=False): + def get_transfer(self, ctxt, transfer_id, include_task_info=False): return self._rpc_client.get_transfer( - ctxt, replica_id, include_task_info=include_task_info) + ctxt, transfer_id, include_task_info=include_task_info) - def delete_disks(self, ctxt, replica_id): - return self._rpc_client.delete_transfer_disks(ctxt, replica_id) + def delete_disks(self, ctxt, transfer_id): + return self._rpc_client.delete_transfer_disks(ctxt, transfer_id) diff --git a/setup.cfg b/setup.cfg index d1d76d9a..700f776c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -28,7 +28,7 @@ console_scripts = coriolis-api = coriolis.cmd.api:main coriolis-conductor = coriolis.cmd.conductor:main coriolis-worker = coriolis.cmd.worker:main - coriolis-replica-cron = coriolis.cmd.replica_cron:main + coriolis-transfer-cron = coriolis.cmd.transfer_cron:main coriolis-scheduler= coriolis.cmd.scheduler:main coriolis-minion-manager= coriolis.cmd.minion_manager:main coriolis-dbsync = coriolis.cmd.db_sync:main