Skip to content

Commit

Permalink
chore: better lvm2 tests
Browse files Browse the repository at this point in the history
Use LVM2 tests that relies on module loading by lvm.

Fixes: #9300

Signed-off-by: Noel Georgi <git@frezbo.dev>
(cherry picked from commit 76318bd)
  • Loading branch information
frezbo authored and smira committed Sep 23, 2024
1 parent 882582a commit 4c7948b
Show file tree
Hide file tree
Showing 12 changed files with 48 additions and 22 deletions.
14 changes: 9 additions & 5 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-09-20T00:40:37Z by kres 8be5fa7.
# Generated on 2024-09-21T05:02:59Z by kres 8be5fa7.

name: default
concurrency:
Expand Down Expand Up @@ -1464,7 +1464,7 @@ jobs:
QEMU_EXTRA_DISKS: "3"
QEMU_MEMORY_WORKERS: "4096"
SHORT_INTEGRATION_TEST: "yes"
WITH_CONFIG_PATCH_WORKER: '@_out/installer-extensions-patch.yaml:@hack/test/patches/extensions.yaml'
WITH_CONFIG_PATCH_WORKER: '@_out/installer-extensions-patch.yaml:@hack/test/patches/extensions.yaml:@hack/test/patches/dm-raid-module.yaml'
run: |
sudo -E make e2e-qemu
- name: save artifacts
Expand Down Expand Up @@ -2774,7 +2774,7 @@ jobs:
QEMU_EXTRA_DISKS: "3"
QEMU_EXTRA_DISKS_DRIVERS: ide,nvme
QEMU_EXTRA_DISKS_SIZE: "10240"
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/ephemeral-nvme.yaml'
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/ephemeral-nvme.yaml:@hack/test/patches/dm-raid-module.yaml'
run: |
sudo -E make e2e-qemu
- name: save artifacts
Expand Down Expand Up @@ -3086,10 +3086,10 @@ jobs:
- name: e2e-qemu
env:
IMAGE_REGISTRY: registry.dev.siderolabs.io
QEMU_EXTRA_DISKS: "2"
QEMU_EXTRA_DISKS: "3"
QEMU_EXTRA_DISKS_DRIVERS: ide,nvme
QEMU_EXTRA_DISKS_SIZE: "10240"
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/ephemeral-nvme.yaml'
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/ephemeral-nvme.yaml:@hack/test/patches/dm-raid-module.yaml'
WITH_DISK_ENCRYPTION: "true"
WITH_KUBESPAN: "true"
WITH_VIRTUAL_IP: "true"
Expand Down Expand Up @@ -3193,7 +3193,11 @@ jobs:
- name: e2e-qemu-race
env:
IMAGE_REGISTRY: registry.dev.siderolabs.io
QEMU_EXTRA_DISKS: "3"
QEMU_EXTRA_DISKS_DRIVERS: ide,nvme
QEMU_EXTRA_DISKS_SIZE: "10240"
TAG_SUFFIX: -race
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/ephemeral-nvme.yaml:@hack/test/patches/dm-raid-module.yaml'
run: |
sudo -E make e2e-qemu
- name: save artifacts
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/integration-extensions-cron.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-09-21T09:26:00Z by kres 8be5fa7.
# Generated on 2024-09-23T09:06:18Z by kres 8be5fa7.

name: integration-extensions-cron
concurrency:
Expand Down Expand Up @@ -113,7 +113,7 @@ jobs:
QEMU_EXTRA_DISKS: "3"
QEMU_MEMORY_WORKERS: "4096"
SHORT_INTEGRATION_TEST: "yes"
WITH_CONFIG_PATCH_WORKER: '@_out/installer-extensions-patch.yaml:@hack/test/patches/extensions.yaml'
WITH_CONFIG_PATCH_WORKER: '@_out/installer-extensions-patch.yaml:@hack/test/patches/extensions.yaml:@hack/test/patches/dm-raid-module.yaml'
run: |
sudo -E make e2e-qemu
- name: save artifacts
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/integration-qemu-cron.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-09-21T09:26:00Z by kres 8be5fa7.
# Generated on 2024-09-23T09:06:18Z by kres 8be5fa7.

name: integration-qemu-cron
concurrency:
Expand Down Expand Up @@ -84,7 +84,7 @@ jobs:
QEMU_EXTRA_DISKS: "3"
QEMU_EXTRA_DISKS_DRIVERS: ide,nvme
QEMU_EXTRA_DISKS_SIZE: "10240"
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/ephemeral-nvme.yaml'
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/ephemeral-nvme.yaml:@hack/test/patches/dm-raid-module.yaml'
run: |
sudo -E make e2e-qemu
- name: save artifacts
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/integration-qemu-encrypted-vip-cron.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-09-09T12:40:13Z by kres 8be5fa7.
# Generated on 2024-09-23T09:06:18Z by kres 8be5fa7.

name: integration-qemu-encrypted-vip-cron
concurrency:
Expand Down Expand Up @@ -81,10 +81,10 @@ jobs:
- name: e2e-qemu
env:
IMAGE_REGISTRY: registry.dev.siderolabs.io
QEMU_EXTRA_DISKS: "2"
QEMU_EXTRA_DISKS: "3"
QEMU_EXTRA_DISKS_DRIVERS: ide,nvme
QEMU_EXTRA_DISKS_SIZE: "10240"
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/ephemeral-nvme.yaml'
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/ephemeral-nvme.yaml:@hack/test/patches/dm-raid-module.yaml'
WITH_DISK_ENCRYPTION: "true"
WITH_KUBESPAN: "true"
WITH_VIRTUAL_IP: "true"
Expand Down
6 changes: 5 additions & 1 deletion .github/workflows/integration-qemu-race-cron.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-09-09T12:40:13Z by kres 8be5fa7.
# Generated on 2024-09-23T09:06:18Z by kres 8be5fa7.

name: integration-qemu-race-cron
concurrency:
Expand Down Expand Up @@ -90,7 +90,11 @@ jobs:
- name: e2e-qemu-race
env:
IMAGE_REGISTRY: registry.dev.siderolabs.io
QEMU_EXTRA_DISKS: "3"
QEMU_EXTRA_DISKS_DRIVERS: ide,nvme
QEMU_EXTRA_DISKS_SIZE: "10240"
TAG_SUFFIX: -race
WITH_CONFIG_PATCH_WORKER: '@hack/test/patches/ephemeral-nvme.yaml:@hack/test/patches/dm-raid-module.yaml'
run: |
sudo -E make e2e-qemu
- name: save artifacts
Expand Down
12 changes: 8 additions & 4 deletions .kres.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -332,7 +332,7 @@ spec:
QEMU_EXTRA_DISKS: "3"
QEMU_EXTRA_DISKS_SIZE: "10240"
QEMU_EXTRA_DISKS_DRIVERS: "ide,nvme"
WITH_CONFIG_PATCH_WORKER: "@hack/test/patches/ephemeral-nvme.yaml"
WITH_CONFIG_PATCH_WORKER: "@hack/test/patches/ephemeral-nvme.yaml:@hack/test/patches/dm-raid-module.yaml"
- name: save-talos-logs
conditions:
- always
Expand Down Expand Up @@ -1017,7 +1017,7 @@ spec:
withSudo: true
environment:
QEMU_MEMORY_WORKERS: 4096
WITH_CONFIG_PATCH_WORKER: "@_out/installer-extensions-patch.yaml:@hack/test/patches/extensions.yaml"
WITH_CONFIG_PATCH_WORKER: "@_out/installer-extensions-patch.yaml:@hack/test/patches/extensions.yaml:@hack/test/patches/dm-raid-module.yaml"
QEMU_EXTRA_DISKS: 3
SHORT_INTEGRATION_TEST: yes
EXTRA_TEST_ARGS: -talos.extensions.qemu
Expand Down Expand Up @@ -1153,10 +1153,10 @@ spec:
WITH_VIRTUAL_IP: true
WITH_KUBESPAN: true
IMAGE_REGISTRY: registry.dev.siderolabs.io
QEMU_EXTRA_DISKS: "2"
QEMU_EXTRA_DISKS: "3"
QEMU_EXTRA_DISKS_SIZE: "10240"
QEMU_EXTRA_DISKS_DRIVERS: "ide,nvme"
WITH_CONFIG_PATCH_WORKER: "@hack/test/patches/ephemeral-nvme.yaml"
WITH_CONFIG_PATCH_WORKER: "@hack/test/patches/ephemeral-nvme.yaml:@hack/test/patches/dm-raid-module.yaml"
- name: save-talos-logs
conditions:
- always
Expand Down Expand Up @@ -1213,6 +1213,10 @@ spec:
command: e2e-qemu
withSudo: true
environment:
QEMU_EXTRA_DISKS: "3"
QEMU_EXTRA_DISKS_SIZE: "10240"
QEMU_EXTRA_DISKS_DRIVERS: "ide,nvme"
WITH_CONFIG_PATCH_WORKER: "@hack/test/patches/ephemeral-nvme.yaml:@hack/test/patches/dm-raid-module.yaml"
TAG_SUFFIX: -race
IMAGE_REGISTRY: registry.dev.siderolabs.io
- name: save-talos-logs
Expand Down
4 changes: 4 additions & 0 deletions hack/test/patches/dm-raid-module.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
machine:
kernel:
modules:
- name: dm_raid
2 changes: 1 addition & 1 deletion internal/integration/api/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ func (suite *CommonSuite) TearDownTest() {

// TestVirtioModulesLoaded verifies that the virtio modules are loaded.
func (suite *CommonSuite) TestVirtioModulesLoaded() {
if suite.Cluster == nil || suite.Cluster.Provisioner() != "qemu" {
if suite.Cluster == nil || suite.Cluster.Provisioner() != base.ProvisionerQEMU {
suite.T().Skip("skipping virtio test since provisioner is not qemu")
}

Expand Down
9 changes: 7 additions & 2 deletions internal/integration/api/volumes.go
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,10 @@ func (suite *VolumesSuite) TestLVMActivation() {
suite.T().Skip("skipping test in short mode.")
}

if suite.Cluster == nil || suite.Cluster.Provisioner() != base.ProvisionerQEMU {
suite.T().Skip("skipping test for non-qemu provisioner")
}

node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker)

userDisks, err := suite.UserDisks(suite.ctx, node)
Expand All @@ -211,7 +215,7 @@ func (suite *VolumesSuite) TestLVMActivation() {

stdout, _, err = podDef.Exec(
suite.ctx,
"nsenter --mount=/proc/1/ns/mnt -- lvcreate -n lv0 -L 1G vg0",
"nsenter --mount=/proc/1/ns/mnt -- lvcreate --mirrors=1 --type=raid1 --nosync -n lv0 -L 1G vg0",
)
suite.Require().NoError(err)

Expand Down Expand Up @@ -275,7 +279,8 @@ func (suite *VolumesSuite) lvmVolumeExists() bool {
}

// we test with creating a volume group with two logical volumes
return lvmVolumeCount == 2
// one mirrored and one not, so we expect to see 6 volumes
return lvmVolumeCount == 6
}

func init() {
Expand Down
2 changes: 1 addition & 1 deletion internal/integration/api/watchdog.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ func (suite *WatchdogSuite) SuiteName() string {
func (suite *WatchdogSuite) SetupTest() {
suite.ctx, suite.ctxCancel = context.WithTimeout(context.Background(), 1*time.Minute)

if suite.Cluster == nil || suite.Cluster.Provisioner() != "qemu" {
if suite.Cluster == nil || suite.Cluster.Provisioner() != base.ProvisionerQEMU {
suite.T().Skip("skipping watchdog test since provisioner is not qemu")
}
}
Expand Down
5 changes: 5 additions & 0 deletions internal/integration/base/base.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,11 @@ import (
"github.com/siderolabs/talos/pkg/provision/access"
)

const (
// ProvisionerQEMU is the name of the QEMU provisioner.
ProvisionerQEMU = "qemu"
)

// TalosSuite defines most common settings for integration test suites.
type TalosSuite struct {
// Endpoint to use to connect, if not set config is used
Expand Down
2 changes: 1 addition & 1 deletion internal/integration/k8s/apparmor.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ func (suite *ApparmorSuite) TestApparmor() {
suite.T().Skip("without full cluster state reaching out to the node IP is not reliable")
}

if suite.Cluster.Provisioner() != "qemu" {
if suite.Cluster.Provisioner() != base.ProvisionerQEMU {
suite.T().Skip("skipping apparmor test since provisioner is not qemu")
}

Expand Down

0 comments on commit 4c7948b

Please sign in to comment.