Skip to content

Commit

Permalink
feat: Added support for creating shared LVM setups
Browse files Browse the repository at this point in the history
- feature requested by GFS2
- adds support for creating shared VGs
- shared LVM setup needs lvmlockd service with dlm lock manager to be running
- to test this change ha_cluster system role is used to set up degenerated cluster on localhost
- the test will be skipped if run locally due to an issue with underlying services
- requires blivet version with shared LVM setup support (storaged-project/blivet#1123)
  • Loading branch information
japokorn committed Nov 28, 2023
1 parent c4147d2 commit 79b1520
Show file tree
Hide file tree
Showing 6 changed files with 184 additions and 1 deletion.
11 changes: 11 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,17 @@ keys:
This specifies the type of pool to manage.
Valid values for `type`: `lvm`.

- `shared`

If set to `true`, the role creates or manages a shared volume group. Requires lvmlockd and
dlm services configured and running.

Default: `false`

__WARNING__: Modifying the `shared` value on an existing pool is a
destructive operation. The pool itself will be removed as part of the
process.

- `disks`

A list which specifies the set of disks to use as backing storage for the pool.
Expand Down
2 changes: 2 additions & 0 deletions defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ storage_pool_defaults:
raid_chunk_size: null
raid_metadata_version: null

shared: false

storage_volume_defaults:
state: "present"
type: lvm
Expand Down
3 changes: 2 additions & 1 deletion library/blivet.py
Original file line number Diff line number Diff line change
Expand Up @@ -1527,7 +1527,7 @@ def _create(self):
if not self._device:
members = self._manage_encryption(self._create_members())
try:
pool_device = self._blivet.new_vg(name=self._pool['name'], parents=members)
pool_device = self._blivet.new_vg(name=self._pool['name'], parents=members, shared=self._pool['shared'])
except Exception as e:
raise BlivetAnsibleError("failed to set up pool '%s': %s" % (self._pool['name'], str(e)))

Expand Down Expand Up @@ -1823,6 +1823,7 @@ def run_module():
raid_spare_count=dict(type='int'),
raid_metadata_version=dict(type='str'),
raid_chunk_size=dict(type='str'),
shared=dict(type='bool'),
state=dict(type='str', default='present', choices=['present', 'absent']),
type=dict(type='str'),
volumes=dict(type='list', elements='dict', default=list(),
Expand Down
3 changes: 3 additions & 0 deletions tests/collection-requirements.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
---
collections:
- fedora.linux_system_roles
14 changes: 14 additions & 0 deletions tests/test-verify-pool.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,20 @@
# compression
# deduplication

- name: Get VG shared value status
command: vgs --noheadings --binary -o shared {{ storage_test_pool.name }}
register: vgs_dump
when: storage_test_pool.type == 'lvm' and storage_test_pool.state == 'present'
changed_when: false

- name: Verify that VG shared value checks out
assert:
that: (storage_test_pool.shared | bool) and ('1' in vgs_dump.stdout)
msg: >-
Shared VG presence ({{ storage_test_pool.shared }})
does not match its expected state ({{ '1' in vgs_dump.stdout }})
when: storage_test_pool.type == 'lvm' and storage_test_pool.state == 'present'

- name: Verify pool subset
include_tasks: "test-verify-pool-{{ storage_test_pool_subset }}.yml"
loop: "{{ _storage_pool_tests }}"
Expand Down
152 changes: 152 additions & 0 deletions tests/tests_lvm_pool_shared.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
---
- name: Test LVM shared pools
hosts: all
become: true
vars:
storage_safe_mode: false
storage_use_partitions: true
mount_location1: '/opt/test1'
volume1_size: '4g'

tasks:
- name: Change node from 'localhost' to '127.0.0.1'
set_fact:
inventory_hostname: "127.0.0.1" # noqa: var-naming
when: inventory_hostname == "localhost"

- name: Run the role to install blivet
include_role:
name: linux-system-roles.storage

- name: Gather package facts
package_facts:

- name: Set blivet package name
set_fact:
blivet_pkg_name: "{{ ansible_facts.packages |
select('search', 'blivet') | select('search', 'python') | list }}"

- name: Set blivet package version
set_fact:
blivet_pkg_version: "{{
ansible_facts.packages[blivet_pkg_name[0]][0]['version'] +
'-' + ansible_facts.packages[blivet_pkg_name[0]][0]['release'] }}"

- name: Set distribution version
set_fact:
is_rhel9: "{{ (ansible_facts.distribution == 'CentOS' or
ansible_facts.distribution == 'Enterprise Linux' or
ansible_facts.distribution == 'RedHat') and
ansible_facts.distribution_major_version == '9' }}"
is_rhel8: "{{ (ansible_facts.distribution == 'CentOS' or
ansible_facts.distribution == 'Enterprise Linux' or
ansible_facts.distribution == 'RedHat') and
ansible_facts.distribution_major_version == '8' }}"
is_fedora: "{{ ansible_facts.distribution == 'Fedora' }}"

- name: Skip test if the blivet version does not support shared VGs
meta: end_host
when: ((is_fedora and blivet_pkg_version is version("3.8.2-1", "<")) or
(is_rhel8 and blivet_pkg_version is version("3.6.0-8", "<")) or
(is_rhel9 and blivet_pkg_version is version("3.6.0-11", "<")))

- name: Create cluster
ansible.builtin.include_role:
name: fedora.linux_system_roles.ha_cluster
vars:
ha_cluster_cluster_name: rhel9-1node
# Users should vault-encrypt the password
ha_cluster_hacluster_password: hapasswd
ha_cluster_extra_packages:
- dlm
- lvm2-lockd
ha_cluster_cluster_properties:
- attrs:
# Don't do this in production
- name: stonith-enabled
value: 'false'
ha_cluster_resource_primitives:
- id: dlm
agent: 'ocf:pacemaker:controld'
instance_attrs:
- attrs:
# Don't do this in production
- name: allow_stonith_disabled
value: 'true'
- id: lvmlockd
agent: 'ocf:heartbeat:lvmlockd'
ha_cluster_resource_groups:
- id: locking
resource_ids:
- dlm
- lvmlockd

- name: Get unused disks
include_tasks: get_unused_disk.yml
vars:
max_return: 1

- name: >-
Create a disk device; specify disks as non-list mounted on
{{ mount_location }}
include_role:
name: linux-system-roles.storage
vars:
storage_pools:
- name: vg1
disks: "{{ unused_disks }}"
type: lvm
shared: true
state: present
volumes:
- name: lv1
size: "{{ volume1_size }}"
mount_point: "{{ mount_location1 }}"
- name: Verify role results
include_tasks: verify-role-results.yml

- name: Repeat the previous step to verify idempotence
include_role:
name: linux-system-roles.storage
vars:
storage_pools:
- name: vg1
disks: "{{ unused_disks }}"
type: lvm
shared: true
state: present
volumes:
- name: lv1
size: "{{ volume1_size }}"
mount_point: "{{ mount_location1 }}"

- name: Verify role results
include_tasks: verify-role-results.yml

- name: >-
Remove the device created above
{{ mount_location }}
include_role:
name: linux-system-roles.storage
vars:
storage_pools:
- name: vg1
disks: "{{ unused_disks }}"
type: lvm
shared: true
state: absent
volumes:
- name: lv1
size: "{{ volume1_size }}"
mount_point: "{{ mount_location1 }}"
- name: Verify role results
include_tasks: verify-role-results.yml

- name: Remove cluster
ansible.builtin.include_role:
name: fedora.linux_system_roles.ha_cluster
vars:
ha_cluster_cluster_name: rhel9-1node
ha_cluster_cluster_present: false

0 comments on commit 79b1520

Please sign in to comment.