Skip to content

Commit

Permalink
chore: move csi tests as go test
Browse files Browse the repository at this point in the history
Move rook-ceph CSI tests as go tests.
This allows us to add more CSI tests in the future.

Fixes: siderolabs#9135

Signed-off-by: Noel Georgi <git@frezbo.dev>
  • Loading branch information
frezbo committed Aug 23, 2024
1 parent 5b4b649 commit ca4255f
Show file tree
Hide file tree
Showing 11 changed files with 354 additions and 30 deletions.
9 changes: 9 additions & 0 deletions .github/renovate.json
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,15 @@
],
"versioningTemplate": "{{#if versioning}}{{versioning}}{{else}}semver{{/if}}"
},
{
"fileMatch": [
"internal/integration/k8s/constants.go"
],
"matchStrings": [
"\\/\\/\\s+renovate: datasource=(?<datasource>.*?)(?:\\s+extractVersion=(?<extractVersion>.+?))?(?:\\s+versioning=(?<versioning>.+?))?\\s+depName=(?<depName>.+?)?(?:\\s+registryUrl=(?<registryUrl>.+?))?\\s.*Version\\s+=\\s+\\\"(?<currentValue>.+?)\\\""
],
"versioningTemplate": "{{#if versioning}}{{versioning}}{{else}}semver{{/if}}"
},
{
"fileMatch": [
"Dockerfile"
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-08-14T14:49:10Z by kres 7be2a05.
# Generated on 2024-08-22T16:03:09Z by kres 9cc7f48.

name: default
concurrency:
Expand Down Expand Up @@ -2637,14 +2637,14 @@ jobs:
make talosctl-cni-bundle
- name: e2e-qemu-csi
env:
EXTRA_TEST_ARGS: -talos.csi=rook-ceph
IMAGE_REGISTRY: registry.dev.siderolabs.io
QEMU_CPUS_WORKERS: "4"
QEMU_EXTRA_DISKS: "1"
QEMU_EXTRA_DISKS_SIZE: "12288"
QEMU_MEMORY_WORKERS: "5120"
QEMU_WORKERS: "3"
SHORT_INTEGRATION_TEST: "yes"
WITH_TEST: run_csi_tests
run: |
sudo -E make e2e-qemu
- name: save artifacts
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/integration-qemu-csi-cron.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2024-05-27T16:20:10Z by kres bcb280a.
# Generated on 2024-08-22T16:03:09Z by kres 9cc7f48.

name: integration-qemu-csi-cron
concurrency:
Expand Down Expand Up @@ -76,14 +76,14 @@ jobs:
make talosctl-cni-bundle
- name: e2e-qemu-csi
env:
EXTRA_TEST_ARGS: -talos.csi=rook-ceph
IMAGE_REGISTRY: registry.dev.siderolabs.io
QEMU_CPUS_WORKERS: "4"
QEMU_EXTRA_DISKS: "1"
QEMU_EXTRA_DISKS_SIZE: "12288"
QEMU_MEMORY_WORKERS: "5120"
QEMU_WORKERS: "3"
SHORT_INTEGRATION_TEST: "yes"
WITH_TEST: run_csi_tests
run: |
sudo -E make e2e-qemu
- name: save artifacts
Expand Down
2 changes: 1 addition & 1 deletion .kres.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1201,7 +1201,7 @@ spec:
QEMU_MEMORY_WORKERS: 5120
QEMU_EXTRA_DISKS: 1
QEMU_EXTRA_DISKS_SIZE: 12288
WITH_TEST: run_csi_tests
EXTRA_TEST_ARGS: -talos.csi=rook-ceph
IMAGE_REGISTRY: registry.dev.siderolabs.io
- name: save-talos-logs
conditions:
Expand Down
45 changes: 25 additions & 20 deletions hack/test/e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,19 @@ function run_talos_integration_test {
;;
esac

"${INTEGRATION_TEST}" -test.v -talos.failfast -talos.talosctlpath "${TALOSCTL}" -talos.kubectlpath "${KUBECTL}" -talos.provisioner "${PROVISIONER}" -talos.name "${CLUSTER_NAME}" -talos.image "${REGISTRY}/siderolabs/talos" "${EXTRA_TEST_ARGS[@]}" "${TEST_RUN[@]}" "${TEST_SHORT[@]}"
"${INTEGRATION_TEST}" \
-test.v \
-talos.failfast \
-talos.talosctlpath "${TALOSCTL}" \
-talos.kubectlpath "${KUBECTL}" \
-talos.helmpath "${HELM}" \
-talos.kubestrpath "${KUBESTR}" \
-talos.provisioner "${PROVISIONER}" \
-talos.name "${CLUSTER_NAME}" \
-talos.image "${REGISTRY}/siderolabs/talos" \
"${EXTRA_TEST_ARGS[@]}" \
"${TEST_RUN[@]}" \
"${TEST_SHORT[@]}"
}

function run_talos_integration_test_docker {
Expand All @@ -169,7 +181,18 @@ function run_talos_integration_test_docker {
;;
esac

"${INTEGRATION_TEST}" -test.v -talos.talosctlpath "${TALOSCTL}" -talos.kubectlpath "${KUBECTL}" -talos.provisioner "${PROVISIONER}" -talos.name "${CLUSTER_NAME}" -talos.image "${REGISTRY}/siderolabs/talos" "${EXTRA_TEST_ARGS[@]}" "${TEST_RUN[@]}" "${TEST_SHORT[@]}"
"${INTEGRATION_TEST}" \
-test.v \
-talos.talosctlpath "${TALOSCTL}" \
-talos.kubectlpath "${KUBECTL}" \
-talos.helmpath "${HELM}" \
-talos.kubestrpath "${KUBESTR}" \
-talos.provisioner "${PROVISIONER}" \
-talos.name "${CLUSTER_NAME}" \
-talos.image "${REGISTRY}/siderolabs/talos" \
"${EXTRA_TEST_ARGS[@]}" \
"${TEST_RUN[@]}" \
"${TEST_SHORT[@]}"
}

function run_kubernetes_conformance_test {
Expand Down Expand Up @@ -220,24 +243,6 @@ function build_registry_mirrors {
fi
}

function run_csi_tests {
${HELM} repo add rook-release https://charts.rook.io/release
${HELM} repo update
${HELM} upgrade --install --version=v1.8.2 --set=pspEnable=false --create-namespace --namespace rook-ceph rook-ceph rook-release/rook-ceph
${HELM} upgrade --install --version=v1.8.2 --set=pspEnable=false --create-namespace --namespace rook-ceph rook-ceph-cluster rook-release/rook-ceph-cluster

${KUBECTL} label ns rook-ceph pod-security.kubernetes.io/enforce=privileged
# wait for the controller to populate the status field
sleep 30
${KUBECTL} --namespace rook-ceph wait --timeout=900s --for=jsonpath='{.status.phase}=Ready' cephclusters.ceph.rook.io/rook-ceph
${KUBECTL} --namespace rook-ceph wait --timeout=900s --for=jsonpath='{.status.state}=Created' cephclusters.ceph.rook.io/rook-ceph
# .status.ceph is populated later only
sleep 60
${KUBECTL} --namespace rook-ceph wait --timeout=900s --for=jsonpath='{.status.ceph.health}=HEALTH_OK' cephclusters.ceph.rook.io/rook-ceph
# hack until https://github.com/kastenhq/kubestr/issues/101 is addressed
KUBERNETES_SERVICE_HOST="" KUBECONFIG="${TMP}/kubeconfig" "${KUBESTR}" fio --storageclass ceph-block --size 10G
}

function install_and_run_cilium_cni_tests {
get_kubeconfig

Expand Down
8 changes: 8 additions & 0 deletions internal/integration/base/base.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,10 @@ type TalosSuite struct {
TalosctlPath string
// KubectlPath is a path to kubectl binary
KubectlPath string
// HelmPath is a path to helm binary
HelmPath string
// KubeStrPath is a path to kubestr binary
KubeStrPath string
// ExtensionsQEMU runs tests with qemu and extensions enabled
ExtensionsQEMU bool
// ExtensionsNvidia runs tests with nvidia extensions enabled
Expand All @@ -41,6 +45,10 @@ type TalosSuite struct {
TrustedBoot bool
// TalosImage is the image name for 'talos' container.
TalosImage string
// CSITestName is the name of the CSI test to run
CSITestName string
// CSITestTimeout is the timeout for the CSI test
CSITestTimeout string

discoveredNodes cluster.Info
}
Expand Down
159 changes: 159 additions & 0 deletions internal/integration/base/k8s.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@ import (
"encoding/json"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"slices"
"strings"
"time"
Expand Down Expand Up @@ -40,6 +43,7 @@ import (
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/tools/remotecommand"
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/client-go/util/jsonpath"
"k8s.io/kubectl/pkg/scheme"

taloskubernetes "github.com/siderolabs/talos/pkg/kubernetes"
Expand Down Expand Up @@ -278,6 +282,161 @@ func (k8sSuite *K8sSuite) WaitForPodToBeDeleted(ctx context.Context, timeout tim
}
}

// HelmInstall installs the Helm chart with the given namespace, repository, version, release name, chart name and values.
func (k8sSuite *K8sSuite) HelmInstall(ctx context.Context, namespace, repository, version, releaseName, chartName string, valuesBytes []byte) error {
tempFile := filepath.Join(k8sSuite.T().TempDir(), "values.yaml")

if err := os.WriteFile(tempFile, valuesBytes, 0o644); err != nil {
return err
}

defer os.Remove(tempFile) //nolint:errcheck

args := []string{
"upgrade",
"--install",
"--cleanup-on-fail",
"--create-namespace",
"--namespace",
namespace,
"--wait",
"--repo",
repository,
"--version",
version,
"--values",
tempFile,
releaseName,
chartName,
}

cmd := exec.Command(k8sSuite.HelmPath, args...)

cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr

k8sSuite.T().Logf("running helm command: %s", strings.Join(cmd.Args, " "))

return cmd.Run()
}

// WaitForResource waits for the resource with the given group, kind, version, namespace and jsonpath field selector to have the given expected value.
// mostly a restructuring of `kubectl wait` from https://github.com/kubernetes/kubectl/blob/master/pkg/cmd/wait/wait.go
//
//nolint:gocyclo
func (k8sSuite *K8sSuite) WaitForResource(ctx context.Context, namespace, group, kind, version, resourceName, jsonPathSelector, expectedValue string) error {
j := jsonpath.New("wait").AllowMissingKeys(true)

if jsonPathSelector == "" {
return fmt.Errorf("jsonpath condition is empty")
}

if err := j.Parse(jsonPathSelector); err != nil {
return fmt.Errorf("error parsing jsonpath condition: %v", err)
}

mapping, err := k8sSuite.Mapper.RESTMapping(schema.GroupKind{
Group: group,
Kind: kind,
}, version)
if err != nil {
return fmt.Errorf("error creating mapping for resource %s/%s/%s", group, kind, version)
}

dr := k8sSuite.DynamicClient.Resource(mapping.Resource).Namespace(namespace)

fieldSelector := fields.OneTermEqualSelector("metadata.name", resourceName).String()

lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fieldSelector

return dr.List(ctx, options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = fieldSelector

return dr.Watch(ctx, options)
},
}

preconditionFunc := func(store cache.Store) (bool, error) {
var exists bool

_, exists, err = store.Get(&metav1.ObjectMeta{Namespace: namespace, Name: resourceName})
if err != nil {
return true, err
}

if !exists {
return true, fmt.Errorf("resource %s/%s/%s/%s not found", group, version, kind, resourceName)
}

return false, nil
}

if _, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, preconditionFunc, func(event watch.Event) (bool, error) {
obj, ok := event.Object.(*unstructured.Unstructured)
if !ok {
return false, fmt.Errorf("error converting object to unstructured")
}

queryObj := obj.UnstructuredContent()

k8sSuite.T().Logf("waiting for resource %s/%s/%s/%s to have field %s with value %s", group, version, kind, resourceName, jsonPathSelector, expectedValue)

parseResults, err := j.FindResults(queryObj)
if err != nil {
return false, fmt.Errorf("error finding results: %v", err)
}

if len(parseResults) == 0 || len(parseResults[0]) == 0 {
return false, nil
}

if len(parseResults) > 1 {
return false, fmt.Errorf("given jsonpath expression matches more than one list")
}

if len(parseResults[0]) > 1 {
return false, fmt.Errorf("given jsonpath expression matches more than one value")
}

switch parseResults[0][0].Interface().(type) {
case map[string]interface{}, []interface{}:
return false, fmt.Errorf("jsonpath leads to a nested object or list which is not supported")
}

s := fmt.Sprintf("%v", parseResults[0][0].Interface())

return strings.TrimSpace(s) == strings.TrimSpace(expectedValue), nil
}); err != nil {
return err
}

return nil
}

// RunFIOTest runs the FIO test with the given storage class and size using kubestr.
func (k8sSuite *K8sSuite) RunFIOTest(ctx context.Context, storageClasss, size string) error {
args := []string{
"fio",
"--storageclass",
storageClasss,
"--size",
size,
}

cmd := exec.Command(k8sSuite.KubeStrPath, args...)

cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr

k8sSuite.T().Logf("running kubestr command: %s", strings.Join(cmd.Args, " "))

return cmd.Run()
}

// ExecuteCommandInPod executes the given command in the pod with the given namespace and name.
func (k8sSuite *K8sSuite) ExecuteCommandInPod(ctx context.Context, namespace, podName, command string) (string, string, error) {
cmd := []string{
Expand Down
Loading

0 comments on commit ca4255f

Please sign in to comment.