From f89f48482eae9fba8cde894c8a64f1c1a5e6c6b8 Mon Sep 17 00:00:00 2001 From: Jan Pokorny Date: Tue, 22 Sep 2020 14:36:04 +0200 Subject: [PATCH] fix: raid volume pre cleanup Cause: Existing data were not removed from member disks before RAID volume creation. Fix: RAID volumes now remove existing data from member disks as needed before creation. Signed-off by: Jan Pokorny --- library/blivet.py | 9 ++- tests/tests_raid_volume_cleanup.yml | 105 ++++++++++++++++++++++++++++ 2 files changed, 112 insertions(+), 2 deletions(-) create mode 100644 tests/tests_raid_volume_cleanup.yml diff --git a/library/blivet.py b/library/blivet.py index 79eb105c..5e03a9d8 100644 --- a/library/blivet.py +++ b/library/blivet.py @@ -1002,8 +1002,13 @@ def _create(self): if self._device: return - if safe_mode: - raise BlivetAnsibleError("cannot create new RAID in safe mode") + for spec in self._volume["disks"]: + disk = self._blivet.devicetree.resolve_device(spec) + if not disk.isleaf or disk.format.type is not None: + if safe_mode and (disk.format.type is not None or disk.format.name != get_format(None).name): + raise BlivetAnsibleError("cannot remove existing formatting and/or devices on disk '%s' in safe mode" % disk.name) + else: + self._blivet.devicetree.recursive_remove(disk) # begin creating the devices members = self._create_raid_members(self._volume["disks"]) diff --git a/tests/tests_raid_volume_cleanup.yml b/tests/tests_raid_volume_cleanup.yml new file mode 100644 index 00000000..e85a503e --- /dev/null +++ b/tests/tests_raid_volume_cleanup.yml @@ -0,0 +1,105 @@ +--- +- hosts: all + become: true + vars: + storage_safe_mode: false + storage_use_partitions: true + mount_location1: '/opt/test1' + mount_location2: '/opt/test2' + volume1_size: '5g' + volume2_size: '4g' + + tasks: + - include_role: + name: linux-system-roles.storage + + - name: Mark tasks to be skipped + set_fact: + storage_skip_checks: + - blivet_available + - packages_installed + - service_facts + + - include_tasks: get_unused_disk.yml + vars: + max_return: 3 + disks_needed: 3 + + - name: Create two LVM logical volumes under volume group 'foo' + include_role: + name: linux-system-roles.storage + vars: + storage_pools: + - name: foo + disks: "{{ unused_disks }}" + volumes: + - name: test1 + size: "{{ volume1_size }}" + mount_point: "{{ mount_location1 }}" + - name: test2 + size: "{{ volume2_size }}" + mount_point: "{{ mount_location2 }}" + + - name: Enable safe mode + set_fact: + storage_safe_mode: true + + - name: Try to overwrite existing device with raid volume and safe mode on (expect failure) + block: + - name: Create a RAID0 device mounted on "{{ mount_location1 }}" + include_role: + name: linux-system-roles.storage + vars: + storage_volumes: + - name: test1 + type: raid + raid_level: "raid1" + raid_device_count: 2 + raid_spare_count: 1 + disks: "{{ unused_disks }}" + mount_point: "{{ mount_location1 }}" + state: present + + - name: unreachable task + fail: + msg: UNREACH + + rescue: + - name: Check that we failed in the role + assert: + that: + - ansible_failed_result.msg != 'UNREACH' + msg: "Role has not failed when it should have" + + - name: Disable safe mode + set_fact: + storage_safe_mode: false + + - name: Create a RAID0 device mounted on "{{ mount_location1 }}" + include_role: + name: linux-system-roles.storage + vars: + storage_volumes: + - name: test1 + type: raid + raid_level: "raid1" + raid_device_count: 2 + raid_spare_count: 1 + disks: "{{ unused_disks }}" + mount_point: "{{ mount_location1 }}" + state: present + + - name: Cleanup - remove the disk device created above + include_role: + name: linux-system-roles.storage + vars: + storage_volumes: + - name: test1 + type: raid + raid_level: "raid1" + raid_device_count: 2 + raid_spare_count: 1 + disks: "{{ unused_disks }}" + mount_point: "{{ mount_location1 }}" + state: absent +