From a44bf7a2f3d28af6dbef1f12363fceb8579d02f1 Mon Sep 17 00:00:00 2001 From: Jeffrey Devloo Date: Wed, 31 May 2017 17:52:15 +0200 Subject: [PATCH] vdisk remove via api Fixups for mds_regression --- ci/scenarios/edge/reroute/main.py | 2 +- ci/scenarios/hypervisor/automated_ha/main.py | 6 +- .../live_migrate_vm_test-exclude/main.py | 3 +- .../vDisk/advanced_dtl_vdisk_test/main.py | 4 +- .../vDisk/basic_dtl_vdisk_test/main.py | 27 ++------ .../vDisk/data_corruption_reg_test/main.py | 4 +- .../vDisk/deployment_vdisk_test/main.py | 6 +- .../vDisk/offline_migrate_test/main.py | 2 +- .../vDisk/rapid_creation_same_device/main.py | 11 ++-- .../regress_template_memleak_test/main.py | 4 +- .../vDisk/rollback_vdisk_test/main.py | 11 ++-- .../vDisk/validate_clone_disk_test/main.py | 2 +- .../vDisk/validate_template_disk_test/main.py | 6 +- .../vPool/add_remove_alba_vpool_test/main.py | 2 +- ci/scenarios/vPool/mds_regression/main.py | 61 +++++++++++-------- .../vmachine/check_scrubbing_test/main.py | 2 +- .../vmachine/perform_fio_test/main.py | 6 +- 17 files changed, 72 insertions(+), 87 deletions(-) diff --git a/ci/scenarios/edge/reroute/main.py b/ci/scenarios/edge/reroute/main.py index 620c182..ede2d03 100644 --- a/ci/scenarios/edge/reroute/main.py +++ b/ci/scenarios/edge/reroute/main.py @@ -245,7 +245,7 @@ def test_reroute_fio(cls, fio_bin_path, cluster_info, disk_amount=1, timeout=CIC for thread_category, thread_collection in threads['evented'].iteritems(): ThreadHelper.stop_evented_threads(thread_collection['pairs'], thread_collection['r_semaphore']) for vdisk in vdisk_info.values(): - VDiskRemover.remove_vdisk(vdisk.guid) + VDiskRemover.remove_vdisk(vdisk.guid, api) assert len(failed_configurations) == 0, 'Certain configuration failed: {0}'.format(failed_configurations) @staticmethod diff --git a/ci/scenarios/hypervisor/automated_ha/main.py b/ci/scenarios/hypervisor/automated_ha/main.py index 5502f6d..d3c7f59 100644 --- a/ci/scenarios/hypervisor/automated_ha/main.py +++ b/ci/scenarios/hypervisor/automated_ha/main.py @@ -163,9 +163,7 @@ def start_test(cls, vm_amount=1, hypervisor_info=CIConstants.HYPERVISOR_INFO): cls.run_test(cluster_info=cluster_info, vm_info=vm_info) finally: for vm_name, vm_object in vm_info.iteritems(): - for vdisk in vm_object['vdisks']: - VDiskRemover.remove_vdisk(vdisk.guid) - for vm_name in vm_info.keys(): + VDiskRemover.remove_vdisks_with_structure(vm_object['vdisks'], api) computenode_hypervisor.sdk.destroy(vm_name) computenode_hypervisor.sdk.undefine(vm_name) # cls.test_ha_fio(fio_bin_path, cluster_info, is_ee, api) @@ -384,7 +382,7 @@ def test_ha_fio(cls, fio_bin_path, cluster_info, is_ee, api, disk_amount=1, tim for thread_category, thread_collection in threads['evented'].iteritems(): ThreadHelper.stop_evented_threads(thread_collection['pairs'], thread_collection['r_semaphore']) for vdisk in vdisk_info.values(): - VDiskRemover.remove_vdisk(vdisk.guid) + VDiskRemover.remove_vdisk(vdisk.guid, api) assert len(failed_configurations) == 0, 'Certain configuration failed: {0}'.format(' '.join(failed_configurations)) @staticmethod diff --git a/ci/scenarios/hypervisor/live_migrate_vm_test-exclude/main.py b/ci/scenarios/hypervisor/live_migrate_vm_test-exclude/main.py index 752efb3..6c7b1ee 100644 --- a/ci/scenarios/hypervisor/live_migrate_vm_test-exclude/main.py +++ b/ci/scenarios/hypervisor/live_migrate_vm_test-exclude/main.py @@ -108,8 +108,7 @@ def start_test(cls, vm_amount=1, hypervisor_info=CIConstants.HYPERVISOR_INFO): cls.live_migrate(vm_info, cluster_info, volume_amount, hypervisor_info) finally: for vm_name, vm_object in vm_info.iteritems(): - for vdisk in vm_object['vdisks']: - VDiskRemover.remove_vdisk(vdisk.guid) + VDiskRemover.remove_vdisks_with_structure(vm_object['vdisks'], api) for vm_name in vm_info.keys(): source_hypervisor.sdk.destroy(vm_name) source_hypervisor.sdk.undefine(vm_name) diff --git a/ci/scenarios/vDisk/advanced_dtl_vdisk_test/main.py b/ci/scenarios/vDisk/advanced_dtl_vdisk_test/main.py index af9253b..cb41fab 100644 --- a/ci/scenarios/vDisk/advanced_dtl_vdisk_test/main.py +++ b/ci/scenarios/vDisk/advanced_dtl_vdisk_test/main.py @@ -113,9 +113,7 @@ def start_test(cls, vm_amount=1, hypervisor_info=CIConstants.HYPERVISOR_INFO): cls.run_test(vm_info=vm_info, cluster_info=cluster_info) finally: for vm_name, vm_object in vm_info.iteritems(): - for vdisk in vm_object['vdisks']: - VDiskRemover.remove_vdisk(vdisk.guid) - for vm_name in vm_info.keys(): + VDiskRemover.remove_vdisks_with_structure(vm_object['vdisks'], api) computenode_hypervisor.sdk.destroy(vm_name) computenode_hypervisor.sdk.undefine(vm_name) diff --git a/ci/scenarios/vDisk/basic_dtl_vdisk_test/main.py b/ci/scenarios/vDisk/basic_dtl_vdisk_test/main.py index 5e5f710..3c0de52 100644 --- a/ci/scenarios/vDisk/basic_dtl_vdisk_test/main.py +++ b/ci/scenarios/vDisk/basic_dtl_vdisk_test/main.py @@ -13,11 +13,9 @@ # # Open vStorage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY of any kind. -import json import time import random -from ci.main import CONFIG_LOC -from ci.api_lib.helpers.api import OVSClient, TimeOutError +from ci.api_lib.helpers.api import TimeOutError from ci.api_lib.helpers.domain import DomainHelper from ci.api_lib.helpers.storagedriver import StoragedriverHelper from ci.api_lib.helpers.vdisk import VDiskHelper @@ -45,7 +43,6 @@ def __init__(self): def main(blocked): """ Run all required methods for the test - :param blocked: was the test blocked by other test? :type blocked: bool :return: results of test @@ -54,36 +51,22 @@ def main(blocked): _ = blocked return DTLChecks._execute_test() - @staticmethod - def _execute_test(): + @classmethod + def _execute_test(cls): """ Validate if DTL is configured as desired - REQUIREMENTS: * 1 vPool should be available with 1 storagedriver * 1 vPool should be available with 2 or more storagedrivers in 2 separate domains - OPTIONAL: * 1 vPool with 1 storagedriver with disabled DTL - :return: """ - DTLChecks.LOGGER.info("Starting to validate the basic DTL") - - with open(CONFIG_LOC, "r") as JSON_CONFIG: - config = json.load(JSON_CONFIG) - - api = OVSClient( - config['ci']['grid_ip'], - config['ci']['user']['api']['username'], - config['ci']['user']['api']['password'] - ) - + api = cls.get_api_instance() ########################## # get deployment details # ########################## - vpools = VPoolHelper.get_vpools() assert len(vpools) >= 1, "Not enough vPools to test" @@ -255,7 +238,7 @@ def _execute_test(): DTLChecks.LOGGER.info("Changing config to a same domain with only 1 storagedriver was successful!") DTLChecks.LOGGER.info("Removing vDisk {0}".format(vdisk.name)) - VDiskRemover.remove_vdisk(vdisk_guid=vdisk.guid) + VDiskRemover.remove_vdisk(vdisk_guid=vdisk.guid, api=api) DTLChecks.LOGGER.info("Finished removing vDisk {0}".format(vdisk.name)) end = time.time() diff --git a/ci/scenarios/vDisk/data_corruption_reg_test/main.py b/ci/scenarios/vDisk/data_corruption_reg_test/main.py index 9120d0b..175a9c1 100644 --- a/ci/scenarios/vDisk/data_corruption_reg_test/main.py +++ b/ci/scenarios/vDisk/data_corruption_reg_test/main.py @@ -112,9 +112,7 @@ def start_test(cls, vm_amount=1, hypervisor_info=CIConstants.HYPERVISOR_INFO): cls.run_test(storagedriver=storagedriver, vm_info=vm_info) finally: for vm_name, vm_object in vm_info.iteritems(): - for vdisk in vm_object['vdisks']: - VDiskRemover.remove_vdisk(vdisk.guid) - for vm_name in vm_info.keys(): + VDiskRemover.remove_vdisks_with_structure(vm_object['vdisks'], api) computenode_hypervisor.sdk.destroy(vm_name) computenode_hypervisor.sdk.undefine(vm_name) diff --git a/ci/scenarios/vDisk/deployment_vdisk_test/main.py b/ci/scenarios/vDisk/deployment_vdisk_test/main.py index 9c26931..f4ba983 100644 --- a/ci/scenarios/vDisk/deployment_vdisk_test/main.py +++ b/ci/scenarios/vDisk/deployment_vdisk_test/main.py @@ -105,7 +105,7 @@ def validate_vdisk_deployment(cls): VDiskDeploymentChecks.LOGGER.info("Finished creating vdisk `{0}`".format(api_disk_name)) VDiskDeploymentChecks._check_vdisk(vdisk_name=api_disk_name, vpool_name=vpool.name) VDiskDeploymentChecks.LOGGER.info("Starting to delete vdisk `{0}`".format(api_disk_name)) - VDiskRemover.remove_vdisk_by_name(api_disk_name+'.raw', vpool.name) + VDiskRemover.remove_vdisk_by_name(api_disk_name, vpool.name, api) VDiskDeploymentChecks.LOGGER.info("Finished deleting vdisk `{0}`".format(api_disk_name)) # ======== @@ -123,7 +123,7 @@ def validate_vdisk_deployment(cls): VDiskDeploymentChecks.LOGGER.info("Finished creating vdisk `{0}`".format(qemu_disk_name)) VDiskDeploymentChecks._check_vdisk(vdisk_name=qemu_disk_name, vpool_name=vpool.name) VDiskDeploymentChecks.LOGGER.info("Starting to delete vdisk `{0}`".format(qemu_disk_name)) - VDiskRemover.remove_vdisk_by_name(qemu_disk_name+'.raw', vpool.name) + VDiskRemover.remove_vdisk_by_name(qemu_disk_name, vpool.name, api) VDiskDeploymentChecks.LOGGER.info("Finished deleting vdisk `{0}`".format(qemu_disk_name)) # ============ @@ -137,7 +137,7 @@ def validate_vdisk_deployment(cls): VDiskDeploymentChecks.LOGGER.info("Finished creating vdisk `{0}`".format(truncate_disk_name)) VDiskDeploymentChecks._check_vdisk(vdisk_name=truncate_disk_name, vpool_name=vpool.name) VDiskDeploymentChecks.LOGGER.info("Starting to delete vdisk `{0}`".format(truncate_disk_name)) - VDiskRemover.remove_vdisk_by_name(truncate_disk_name+'.raw', vpool.name) + VDiskRemover.remove_vdisk_by_name(truncate_disk_name, vpool.name, api) VDiskDeploymentChecks.LOGGER.info("Finished deleting vdisk `{0}`".format(truncate_disk_name)) VDiskDeploymentChecks.LOGGER.info("Finished to validate the vdisk deployment") diff --git a/ci/scenarios/vDisk/offline_migrate_test/main.py b/ci/scenarios/vDisk/offline_migrate_test/main.py index 21e4cfe..88b71c8 100644 --- a/ci/scenarios/vDisk/offline_migrate_test/main.py +++ b/ci/scenarios/vDisk/offline_migrate_test/main.py @@ -118,7 +118,7 @@ def _execute_test(cls, amount_vdisks=AMOUNT_VDISKS): raise finally: for vdisk in created_vdisks: - VDiskRemover.remove_vdisk(vdisk.guid) + VDiskRemover.remove_vdisk(vdisk.guid, api) MigrateTester.LOGGER.info("Finished offline migrate test.") @staticmethod diff --git a/ci/scenarios/vDisk/rapid_creation_same_device/main.py b/ci/scenarios/vDisk/rapid_creation_same_device/main.py index 1559591..dd43978 100644 --- a/ci/scenarios/vDisk/rapid_creation_same_device/main.py +++ b/ci/scenarios/vDisk/rapid_creation_same_device/main.py @@ -46,14 +46,15 @@ def main(blocked): _ = blocked return VDiskControllerTester._execute_test() - @staticmethod - def _execute_test(): + @classmethod + def _execute_test(cls): """ Mimics the healthcheck creating and deleting disks with the same name/devicename back to back :return: None """ local_sr = SystemHelper.get_local_storagerouter() VDiskControllerTester.LOGGER.info("Starting creation/deletion test.") + api = cls.get_api_instance() # Elect vpool assert len(local_sr.storagedrivers) > 0, 'Node {0} has no storagedriver. Cannot test {1}'.format(local_sr.ip, VDiskControllerTester.TEST_NAME) random_storagedriver = local_sr.storagedrivers[random.randint(0, len(local_sr.storagedrivers) - 1)] @@ -89,7 +90,7 @@ def _execute_test(): VDiskControllerTester.LOGGER.error('Unexpected exception occurred during the the loop. Got {0}.'.format(str(ex))) finally: try: - VDiskControllerTester._cleanup_vdisk(disk_name, vpool.name, not test_passed) + VDiskControllerTester._cleanup_vdisk(disk_name, vpool.name, api, not test_passed) except Exception as ex: VDiskControllerTester.LOGGER.error("Auto cleanup failed with {0}.".format(str(ex))) exceptions.append('Auto cleanup failed. Got {0}'.format(str(ex))) @@ -99,7 +100,7 @@ def _execute_test(): VDiskControllerTester.LOGGER.info("Finished create/delete test.") @staticmethod - def _cleanup_vdisk(vdisk_name, vpool_name, fail=True): + def _cleanup_vdisk(vdisk_name, vpool_name, api, fail=True): """ Attempt to cleanup vdisk :param vdisk_name: name of the vdisk @@ -109,7 +110,7 @@ def _cleanup_vdisk(vdisk_name, vpool_name, fail=True): """ # Cleanup vdisk using the controller try: - VDiskRemover.remove_vdisk_by_name('{0}.raw'.format(vdisk_name), vpool_name) + VDiskRemover.remove_vdisk_by_name(vdisk_name, vpool_name, api) except Exception as ex: VDiskControllerTester.LOGGER.error(str(ex)) if fail is True: diff --git a/ci/scenarios/vDisk/regress_template_memleak_test/main.py b/ci/scenarios/vDisk/regress_template_memleak_test/main.py index 93e8184..18ca539 100644 --- a/ci/scenarios/vDisk/regress_template_memleak_test/main.py +++ b/ci/scenarios/vDisk/regress_template_memleak_test/main.py @@ -136,14 +136,14 @@ def validate_vdisk_clone(amount_vdisks=AMOUNT_VDISKS, amount_to_write=AMOUNT_TO_ "--output={0}.json".format(vdisk_name)]) # delete vdisk time.sleep(VDiskTemplateChecks.TEMPLATE_SLEEP_BEFORE_DELETE) - VDiskRemover.remove_vdisk_by_name(vdisk_name=clone_vdisk_name + '.raw', vpool_name=vpool.name) + VDiskRemover.remove_vdisk_by_name(vdisk_name=clone_vdisk_name, vpool_name=vpool.name, api=api) ################### # remove template # ################### time.sleep(VDiskTemplateChecks.TEMPLATE_SLEEP_BEFORE_DELETE) - VDiskRemover.remove_vtemplate_by_name(vdisk_name=vdisk_name + '.raw', vpool_name=vpool.name, api=api) + VDiskRemover.remove_vtemplate_by_name(vdisk_name=vdisk_name, vpool_name=vpool.name, api=api) ###################### # log current memory # diff --git a/ci/scenarios/vDisk/rollback_vdisk_test/main.py b/ci/scenarios/vDisk/rollback_vdisk_test/main.py index 0779ad1..d061193 100644 --- a/ci/scenarios/vDisk/rollback_vdisk_test/main.py +++ b/ci/scenarios/vDisk/rollback_vdisk_test/main.py @@ -101,7 +101,7 @@ def validate_rollback(): end = time.time() # clean base disks from clones if cloned: - RollbackChecks._delete_remaining_vdisks(base_vdisks=deployed_vdisks[1]) + RollbackChecks._delete_remaining_vdisks(base_vdisks=deployed_vdisks[1], api=api) RollbackChecks.LOGGER.info("Finished deleting base vdisks") else: RollbackChecks.LOGGER.info("Skipped deleting base vdisks") @@ -112,25 +112,24 @@ def validate_rollback(): RollbackChecks.LOGGER.info("Finished to validate the rollback") @staticmethod - def _delete_remaining_vdisks(base_vdisks): + def _delete_remaining_vdisks(base_vdisks, api): """ Delete remaining base vdisks (when performing cloned=True) - :param base_vdisks: vdisk_guids of a base_vdisks ['a15908c0-f7f0-402e-ad20-2be97e401cd3', ...] :type: list + :param api: api instance :return: None """ for vdisk_guid in base_vdisks: RollbackChecks.LOGGER.info("Starting to remove base vDisk `{0}`".format(vdisk_guid)) - VDiskRemover.remove_vdisk(vdisk_guid) + VDiskRemover.remove_vdisk(vdisk_guid, api) RollbackChecks.LOGGER.info("Finished to remove base vDisk `{0}`".format(vdisk_guid)) @staticmethod def _deploy_vdisks(vpool, storagedriver, api, size=SIZE_VDISK, amount_vdisks=AMOUNT_VDISKS, cloned=False): """ Deploy X amount of vdisks, write some data to it & snapshot - :param vpool: a valid vpool object :type vpool: ovs.model.hybrids.vpool :param storagedriver: a valid storagedriver object @@ -290,7 +289,7 @@ def _rollback_vdisks(stored_vdisks, vpool, api, amount_checks=MAX_ROLLBACK_CHECK # commencing deleting volumes RollbackChecks.LOGGER.info("Starting to remove VDisk `{0}`".format(vdisk.name)) - VDiskRemover.remove_vdisk(stored_vdisk['vdisk_guid']) + VDiskRemover.remove_vdisk(stored_vdisk['vdisk_guid'], api) RollbackChecks.LOGGER.info("Finished removing VDisk `{0}`".format(vdisk.name)) diff --git a/ci/scenarios/vDisk/validate_clone_disk_test/main.py b/ci/scenarios/vDisk/validate_clone_disk_test/main.py index bbcfd69..9437ab7 100644 --- a/ci/scenarios/vDisk/validate_clone_disk_test/main.py +++ b/ci/scenarios/vDisk/validate_clone_disk_test/main.py @@ -103,7 +103,7 @@ def validate_vdisk_clone(cls): snapshot_id=snapshot_id)['vdisk_guid']) vdisks.append(cloned_vdisk) finally: - VDiskRemover.remove_vdisks_with_structure(vdisks) + VDiskRemover.remove_vdisks_with_structure(vdisks, api) cls.LOGGER.info("Finished validating clone vdisks") diff --git a/ci/scenarios/vDisk/validate_template_disk_test/main.py b/ci/scenarios/vDisk/validate_template_disk_test/main.py index 588adf0..a48838a 100644 --- a/ci/scenarios/vDisk/validate_template_disk_test/main.py +++ b/ci/scenarios/vDisk/validate_template_disk_test/main.py @@ -104,7 +104,7 @@ def validate_vdisk_clone(cls): finally: while len(vdisks) > 0: vdisk = vdisks.pop() - VDiskRemover.remove_vdisk(vdisk.guid) + VDiskRemover.remove_vdisk(vdisk.guid, api) try: # template vdisk from clone (should fail) # parent_vdisk = VDiskHelper.get_vdisk_by_guid( @@ -134,9 +134,9 @@ def validate_vdisk_clone(cls): if vdisk.parent_vdisk_guid is None: parent_vdisks.append(vdisk) continue - VDiskRemover.remove_vdisk(vdisk.guid) + VDiskRemover.remove_vdisk(vdisk.guid, api) for parent_vdisk in parent_vdisks: - VDiskRemover.remove_vdisk(parent_vdisk.guid) + VDiskRemover.remove_vdisk(parent_vdisk.guid, api) cls.LOGGER.info("Finished to validate template vdisks") diff --git a/ci/scenarios/vPool/add_remove_alba_vpool_test/main.py b/ci/scenarios/vPool/add_remove_alba_vpool_test/main.py index 249d19d..0a3d93b 100644 --- a/ci/scenarios/vPool/add_remove_alba_vpool_test/main.py +++ b/ci/scenarios/vPool/add_remove_alba_vpool_test/main.py @@ -193,7 +193,7 @@ def validate_add_extend_remove_vpool(timeout=ADD_EXTEND_REMOVE_VPOOL_TIMEOUT): timeout=AddRemoveVPool.VDISK_CREATE_TIMEOUT) AddRemoveVPool.LOGGER.info("Finished creating vdisk `{0}`".format(vdisk_name)) AddRemoveVPool.LOGGER.info("Starting to delete vdisk `{0}`".format(vdisk_name)) - VDiskRemover.remove_vdisk_by_name(vdisk_name + '.raw', AddRemoveVPool.VPOOL_NAME) + VDiskRemover.remove_vdisk_by_name(vdisk_name, AddRemoveVPool.VPOOL_NAME, api) AddRemoveVPool.LOGGER.info("Finished deleting vdisk `{0}`".format(vdisk_name)) # Delete vpool diff --git a/ci/scenarios/vPool/mds_regression/main.py b/ci/scenarios/vPool/mds_regression/main.py index f200343..882ae5e 100644 --- a/ci/scenarios/vPool/mds_regression/main.py +++ b/ci/scenarios/vPool/mds_regression/main.py @@ -16,10 +16,12 @@ import json import random import time +from multiprocessing.pool import ThreadPool from ci.api_lib.helpers.api import OVSClient from ci.api_lib.helpers.hypervisor.hypervisor import HypervisorFactory from ci.api_lib.helpers.network import NetworkHelper from ci.api_lib.helpers.storagerouter import StoragerouterHelper +from ci.api_lib.helpers.vdisk import VDiskHelper from ci.api_lib.helpers.system import SystemHelper from ci.api_lib.helpers.thread import ThreadHelper from ci.api_lib.helpers.vpool import VPoolHelper @@ -37,7 +39,6 @@ from ovs.extensions.generic.remote import remote from ovs.extensions.generic.sshclient import SSHClient from ovs.extensions.services.service import ServiceManager -from ovs.lib.generic import GenericController from ovs.lib.mdsservice import MDSServiceController from ovs.log.log_handler import LogHandler @@ -54,9 +55,6 @@ class RegressionTester(CIConstants): VM_NAME = 'mds-regression' - with open(CONFIG_LOC, 'r') as JSON_CONFIG: - SETUP_CFG = json.load(JSON_CONFIG) - @classmethod @gather_results(CASE_TYPE, LOGGER, TEST_NAME) def main(cls, blocked): @@ -108,14 +106,11 @@ def start_test(cls, vm_amount=1, hypervisor_info=CIConstants.HYPERVISOR_INFO): try: cls.run_test(cluster_info=cluster_info, compute_client=compute_client, - disk_amount=volume_amount, vm_info=vm_info, api=api) finally: for vm_name, vm_object in vm_info.iteritems(): - for vdisk in vm_object['vdisks']: - VDiskRemover.remove_vdisk(vdisk.guid) - for vm_name in vm_info.keys(): + VDiskRemover.remove_vdisks_with_structure(vm_object['vdisks'], api) computenode_hypervisor.sdk.destroy(vm_name) computenode_hypervisor.sdk.undefine(vm_name) @@ -194,7 +189,7 @@ def setup(cls, cloud_init_info=CIConstants.CLOUD_INIT_DATA, logger=LOGGER): return api, cluster_info, compute_client, to_be_downed_client, is_ee, image_path, cloud_init_loc @classmethod - def run_test(cls, cluster_info, compute_client, vm_info, disk_amount, api, vm_username=CIConstants.VM_USERNAME, vm_password=CIConstants.VM_PASSWORD, + def run_test(cls, cluster_info, compute_client, vm_info, api, vm_username=CIConstants.VM_USERNAME, vm_password=CIConstants.VM_PASSWORD, timeout=TEST_TIMEOUT, data_test_cases=CIConstants.DATA_TEST_CASES, logger=LOGGER): """ Runs the test as described in https://github.com/openvstorage/dev_ops/issues/64 @@ -202,7 +197,6 @@ def run_test(cls, cluster_info, compute_client, vm_info, disk_amount, api, vm_us :param compute_client: SSHclient of the computenode :param vm_info: vm information :param api: api instance - :param disk_amount: amount of disks :param vm_username: username to login on all vms :param vm_password: password to login on all vms :param timeout: timeout in seconds @@ -223,10 +217,12 @@ def run_test(cls, cluster_info, compute_client, vm_info, disk_amount, api, vm_us failed_configurations = [] # Extract vdisk info from vm_info - only get the data ones vdisk_info = {} + disk_amount = 0 for vm_name, vm_object in vm_info.iteritems(): for vdisk in vm_object['vdisks']: if 'vdisk_data' in vdisk.name: vdisk_info.update({vdisk.name: vdisk}) + disk_amount += 1 try: cls._adjust_automatic_scrubbing(disable=True) with remote(compute_str.ip, [SSHClient]) as rem: @@ -235,7 +231,6 @@ def run_test(cls, cluster_info, compute_client, vm_info, disk_amount, api, vm_us 'snapshots': {'pairs': [], 'r_semaphore': None}}} output_files = [] safety_set = False - mds_triggered = False try: logger.info('Starting the following configuration: {0}'.format(configuration)) for vm_name, vm_data in vm_info.iteritems(): @@ -264,9 +259,9 @@ def run_test(cls, cluster_info, compute_client, vm_info, disk_amount, api, vm_us ThreadHelper.stop_evented_threads(threads['evented']['snapshots']['pairs'], threads['evented']['snapshots']['r_semaphore']) # Stop snapshotting cls._delete_snapshots(volume_bundle=vdisk_info, api=api) - scrubbing_result = cls._start_scrubbing(volume_bundle=vdisk_info) # Starting to scrub, offloaded to celery + # Start scrubbing thread + async_scrubbing = cls.start_scrubbing(volume_bundle=vdisk_info, api=api) # Starting to scrub cls._trigger_mds_issue(vdisk_info, destination_storagedriver.storagerouter.guid, api) # Trigger mds failover while scrubber is busy - mds_triggered = True # Do some monitoring further for 60s ThreadingHandler.keep_threads_running(r_semaphore=io_r_semaphore, threads=io_thread_pairs, @@ -283,7 +278,8 @@ def run_test(cls, cluster_info, compute_client, vm_info, disk_amount, api, vm_us output_files=output_files, client=compute_client, disk_amount=disk_amount) - api.wait_for_task(task_id=scrubbing_result.id) # Wait for scrubbing to finish + possible_scrub_errors = async_scrubbing.get() # Wait until scrubbing calls have given a result + assert len(possible_scrub_errors) == 0, 'Scrubbing has encountered some errors: {0}'.format(', '.join(possible_scrub_errors)) cls._validate(values_to_check, monitoring_data) except Exception as ex: logger.error('Running the test for configuration {0} has failed because {1}'.format(configuration, str(ex))) @@ -299,11 +295,6 @@ def run_test(cls, cluster_info, compute_client, vm_info, disk_amount, api, vm_us vm_data['screen_names'] = [] if safety_set is True: cls._set_mds_safety(len(StorageRouterList.get_masters()), checkup=True) - if mds_triggered is True: # Vdisks got moved at this point - for vdisk_name, vdisk_object in vdisk_info.iteritems(): - VDiskSetup.move_vdisk(vdisk_guid=vdisk_object.guid, - target_storagerouter_guid=source_storagedriver.storagerouter.guid, - api=api) finally: cls._adjust_automatic_scrubbing(disable=False) assert len(failed_configurations) == 0, 'Certain configuration failed: {0}'.format(' '.join(failed_configurations)) @@ -409,22 +400,38 @@ def _delete_snapshots(volume_bundle, api, amount_to_delete=3, logger=LOGGER): VDiskRemover.remove_snapshot(snapshot['guid'], vdisk_object.name, vdisk_object.vpool.name, api) amount_to_delete -= 1 - @staticmethod - def _run_pg_bench(): - pass + @classmethod + def start_scrubbing(cls, volume_bundle, api, logger=LOGGER): + """ + Start the scrubbing and wait in a seperate thread until done + :param volume_bundle: volume information + :param api: api instance + :param logger: logging instance + :return: + """ + pool = ThreadPool(processes=1) + return pool.apply_async(cls._start_scrubbing, args=(volume_bundle, api, logger)) @staticmethod - def _start_scrubbing(volume_bundle): + def _start_scrubbing(volume_bundle, api, logger): """ - Starts scrubbing and offloads it to celery + Starts scrubbing and will be offloaded into a seperate thread :param volume_bundle: volume information :return: Asynchronous result of a CeleryTask :rtype: celery.result.AsyncResult """ - vdisk_guids = [] + vdisk_task_mapping = {} + error_msgs = [] + for vdisk_name, vdisk_object in volume_bundle.iteritems(): + vdisk_task_mapping[vdisk_object.guid] = VDiskHelper.scrub_vdisk(vdisk_object.guid, api, wait=False) # Tasks are launched but not checked upon for vdisk_name, vdisk_object in volume_bundle.iteritems(): - vdisk_guids.append(vdisk_object.guid) - return GenericController.execute_scrub.delay(vdisk_guids=vdisk_guids) + logger.debug('Waiting for vdisk {0}s task to finish scrubbing.'.format(vdisk_name)) + task_result = api.wait_for_task(vdisk_task_mapping[vdisk_object.guid]) + if not task_result[0]: + error_msg = "Scrubbing vDisk `{0}` has failed with error {1}".format(vdisk_name, task_result[1]) + logger.error(error_msg) + error_msgs.append(error_msg) + return error_msgs @staticmethod def _validate(dal_values, monitoring_data): diff --git a/ci/scenarios/vmachine/check_scrubbing_test/main.py b/ci/scenarios/vmachine/check_scrubbing_test/main.py index 37af4fc..2e5363d 100644 --- a/ci/scenarios/vmachine/check_scrubbing_test/main.py +++ b/ci/scenarios/vmachine/check_scrubbing_test/main.py @@ -90,7 +90,7 @@ def start_test(cls): cls._validate_scrubbing(stored_map) finally: for vdisk_type, vdisk_list in created_vdisks.iteritems(): - VDiskRemover.remove_vdisks_with_structure(vdisk_list) + VDiskRemover.remove_vdisks_with_structure(vdisk_list, api) @staticmethod def _validate_scrubbing(vdisk_stored_mapper, amount_checks=MAX_SCRUBBING_CHECKS, timeout=SCRUBBING_TIMEOUT): diff --git a/ci/scenarios/vmachine/perform_fio_test/main.py b/ci/scenarios/vmachine/perform_fio_test/main.py index a92c1ba..507f151 100644 --- a/ci/scenarios/vmachine/perform_fio_test/main.py +++ b/ci/scenarios/vmachine/perform_fio_test/main.py @@ -110,6 +110,7 @@ def run_test_fuse(cls, storagedriver, disk_amount, write_amount, logger=LOGGER): :param logger: logging instance :return: """ + api = cls.get_api_instance() vpool = storagedriver.vpool client = SSHClient(storagedriver.storagerouter, username='root') vdisk_info = {} @@ -130,7 +131,7 @@ def run_test_fuse(cls, storagedriver, disk_amount, write_amount, logger=LOGGER): raise finally: for vdisk in vdisk_info.values(): - VDiskRemover.remove_vdisk_by_name(vdisk.devicename, vdisk.vpool.name) + VDiskRemover.remove_vdisk_by_name(vdisk.devicename, vdisk.vpool.name, api) @staticmethod def _get_vdisk(vdisk_name, vpool_name, timeout=60, logger=LOGGER): @@ -168,6 +169,7 @@ def run_test_edge_blktap(cls, storagedriver, image_path, disk_amount, write_amou :param logger: logging instance :return: None """ + api = cls.get_api_instance() client = SSHClient(storagedriver.storagerouter, username='root') vpool = storagedriver.vpool edge_info = {'port': storagedriver.ports['edge'], @@ -207,7 +209,7 @@ def run_test_edge_blktap(cls, storagedriver, image_path, disk_amount, write_amou raise ValueError('Unable to destroy the blocktap connection because its output format has changed.') client.run(["tap-ctl", "destroy", "-p", tap_conn_pid, "-m", tap_conn_minor]) for vdisk_name in vdisk_info.keys(): - VDiskRemover.remove_vdisk_by_name('{0}.raw'.format(vdisk_name), vpool.name) + VDiskRemover.remove_vdisk_by_name(vdisk_name, vpool.name, api) def run(blocked=False):