From 6aaf337db6cfea639a22a18e32b8ec29b085ec81 Mon Sep 17 00:00:00 2001 From: Prasanna Kumar Kalever Date: Thu, 21 Jan 2021 16:59:25 +0530 Subject: [PATCH 1/6] e2e: add a test case for rbd-nbd mounter To validate the basic working of rbd-nbd Signed-off-by: Prasanna Kumar Kalever --- e2e/rbd.go | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/e2e/rbd.go b/e2e/rbd.go index 0e506ca8e0e..6df0a3da150 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -349,6 +349,31 @@ var _ = Describe("RBD", func() { } }) + By("create a PVC and bind it to an app using rbd-nbd mounter", func() { + err := deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, nil, map[string]string{"mounter": "rbd-nbd"}, deletePolicy) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } + err = validatePVCAndAppBinding(pvcPath, appPath, f) + if err != nil { + e2elog.Failf("failed to validate pvc and application binding with error %v", err) + } + // validate created backend rbd images + validateRBDImageCount(f, 0) + err = deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, nil, nil, deletePolicy) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } + }) + By("create a PVC and bind it to an app with encrypted RBD volume", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { From 8aa84bee3d53faab182ad383ddd70391dcf0453d Mon Sep 17 00:00:00 2001 From: Prasanna Kumar Kalever Date: Tue, 9 Feb 2021 18:47:09 +0530 Subject: [PATCH 2/6] e2e: Test IO after nodeplugin reboot This is a negative testcase to showcase as per current design the IO will fail because of the missing mappings Signed-off-by: Prasanna Kumar Kalever --- e2e/rbd.go | 86 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/e2e/rbd.go b/e2e/rbd.go index 6df0a3da150..9b54f1ec175 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -374,6 +374,92 @@ var _ = Describe("RBD", func() { } }) + By("perform IO on rbd-nbd volume after nodeplugin restart and expect a failure", func() { + err := deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + // Storage class with rbd-nbd mounter + err = createRBDStorageClass(f.ClientSet, f, nil, map[string]string{"mounter": "rbd-nbd"}, deletePolicy) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } + pvc, err := loadPVC(pvcPath) + if err != nil { + e2elog.Failf("failed to load PVC with error %v", err) + } + pvc.Namespace = f.UniqueName + + app, err := loadApp(appPath) + if err != nil { + e2elog.Failf("failed to load application with error %v", err) + } + + app.Namespace = f.UniqueName + label := map[string]string{ + "app": app.Name, + } + app.Labels = label + app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name + err = createPVCAndApp("", f, pvc, app, deployTimeout) + if err != nil { + e2elog.Failf("failed to create PVC and application with error %v", err) + } + + // validate created backend rbd images + validateRBDImageCount(f, 1) + + selector, err := getDaemonSetLabelSelector(f, cephCSINamespace, rbdDaemonsetName) + if err != nil { + e2elog.Failf("failed to get the labels with error %v", err) + } + // delete rbd nodeplugin pods + err = deletePodWithLabel(selector, cephCSINamespace, false) + if err != nil { + e2elog.Failf("fail to delete pod with error %v", err) + } + + // wait for nodeplugin pods to come up + err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout) + if err != nil { + e2elog.Failf("timeout waiting for daemonset pods with error %v", err) + } + + opt := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app=%s", app.Name), + } + + // FIXME: Fix this behavior, i.e. when the nodeplugin is + // restarted, the rbd-nbd processes should be back to life + // as rbd-nbd processes are responsible for IO + + // For now to prove this isn't working, write something to + // mountpoint and expect a failure as the processes are terminated. + filePath := app.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" + _, stdErr := execCommandInPodAndAllowFail(f, fmt.Sprintf("echo 'Hello World' > %s", filePath), app.Namespace, &opt) + IOErr := fmt.Sprintf("cannot create %s: Input/output error", filePath) + if !strings.Contains(stdErr, IOErr) { + e2elog.Failf(stdErr) + } else { + e2elog.Logf("failed IO as expected: %v", stdErr) + } + + err = deletePVCAndApp("", f, pvc, app) + if err != nil { + e2elog.Failf("failed to delete PVC and application with error %v", err) + } + // validate created backend rbd images + validateRBDImageCount(f, 0) + err = deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, nil, nil, deletePolicy) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } + }) + By("create a PVC and bind it to an app with encrypted RBD volume", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { From 5cdc62c52a31c57ae614cc297c5ffaf5e0c6baec Mon Sep 17 00:00:00 2001 From: Prasanna Kumar Kalever Date: Fri, 5 Mar 2021 15:57:35 +0530 Subject: [PATCH 3/6] e2e: add ability to run command inside specified container Signed-off-by: Prasanna Kumar Kalever --- e2e/pod.go | 49 ++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 42 insertions(+), 7 deletions(-) diff --git a/e2e/pod.go b/e2e/pod.go index f985189968d..c1c63a9d8d0 100644 --- a/e2e/pod.go +++ b/e2e/pod.go @@ -105,17 +105,40 @@ func waitForDeploymentComplete(name, ns string, c kubernetes.Interface, t int) e return nil } -func getCommandInPodOpts(f *framework.Framework, c, ns string, opt *metav1.ListOptions) (framework.ExecOptions, error) { +func findPodAndContainerName(f *framework.Framework, ns, cn string, opt *metav1.ListOptions) (string, string, error) { + podList, err := f.PodClientNS(ns).List(context.TODO(), *opt) + if err != nil { + return "", "", err + } + + if len(podList.Items) == 0 { + return "", "", errors.New("podlist is empty") + } + + if cn != "" { + for i := range podList.Items { + for j := range podList.Items[i].Spec.Containers { + if podList.Items[i].Spec.Containers[j].Name == cn { + return podList.Items[i].Name, cn, nil + } + } + } + return "", "", errors.New("container name not found") + } + return podList.Items[0].Name, podList.Items[0].Spec.Containers[0].Name, nil +} + +func getCommandInPodOpts(f *framework.Framework, c, ns, cn string, opt *metav1.ListOptions) (framework.ExecOptions, error) { cmd := []string{"/bin/sh", "-c", c} - pods, err := listPods(f, ns, opt) + pName, cName, err := findPodAndContainerName(f, ns, cn, opt) if err != nil { return framework.ExecOptions{}, err } return framework.ExecOptions{ Command: cmd, - PodName: pods[0].Name, + PodName: pName, Namespace: ns, - ContainerName: pods[0].Spec.Containers[0].Name, + ContainerName: cName, Stdin: nil, CaptureStdout: true, CaptureStderr: true, @@ -170,7 +193,19 @@ func listPods(f *framework.Framework, ns string, opt *metav1.ListOptions) ([]v1. } func execCommandInPod(f *framework.Framework, c, ns string, opt *metav1.ListOptions) (string, string, error) { - podOpt, err := getCommandInPodOpts(f, c, ns, opt) + podOpt, err := getCommandInPodOpts(f, c, ns, "", opt) + if err != nil { + return "", "", err + } + stdOut, stdErr, err := f.ExecWithOptions(podOpt) + if stdErr != "" { + e2elog.Logf("stdErr occurred: %v", stdErr) + } + return stdOut, stdErr, err +} + +func execCommandInContainer(f *framework.Framework, c, ns, cn string, opt *metav1.ListOptions) (string, string, error) { + podOpt, err := getCommandInPodOpts(f, c, ns, cn, opt) if err != nil { return "", "", err } @@ -185,7 +220,7 @@ func execCommandInToolBoxPod(f *framework.Framework, c, ns string) (string, stri opt := &metav1.ListOptions{ LabelSelector: rookToolBoxPodLabel, } - podOpt, err := getCommandInPodOpts(f, c, ns, opt) + podOpt, err := getCommandInPodOpts(f, c, ns, "", opt) if err != nil { return "", "", err } @@ -197,7 +232,7 @@ func execCommandInToolBoxPod(f *framework.Framework, c, ns string) (string, stri } func execCommandInPodAndAllowFail(f *framework.Framework, c, ns string, opt *metav1.ListOptions) (string, string) { - podOpt, err := getCommandInPodOpts(f, c, ns, opt) + podOpt, err := getCommandInPodOpts(f, c, ns, "", opt) if err != nil { return "", err.Error() } From 6095ac29edecdb21ecc372652170991dae483799 Mon Sep 17 00:00:00 2001 From: Prasanna Kumar Kalever Date: Thu, 25 Feb 2021 12:53:22 +0530 Subject: [PATCH 4/6] e2e: restart rbd-nbd process after nodeplugin reboot Bringup the rbd-nbd map/attach process on the rbd node plugin and expect the IO to continue uninterrupted. Signed-off-by: Prasanna Kumar Kalever --- e2e/rbd.go | 134 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 134 insertions(+) diff --git a/e2e/rbd.go b/e2e/rbd.go index 9b54f1ec175..e27bc38edc5 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -5,6 +5,7 @@ import ( "fmt" "io/ioutil" "os" + "regexp" "strings" . "github.com/onsi/ginkgo" // nolint @@ -460,6 +461,139 @@ var _ = Describe("RBD", func() { } }) + By("restart rbd-nbd process on nodeplugin and continue IO after nodeplugin restart", func() { + err := deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + // Tweak Storageclass to add netlink,reattach rbd-nbd mounter options + scOpts := map[string]string{ + "mounter": "rbd-nbd", + "mapOptions": "try-netlink,reattach-timeout=180", + } + err = createRBDStorageClass(f.ClientSet, f, nil, scOpts, deletePolicy) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } + + pvc, err := loadPVC(pvcPath) + if err != nil { + e2elog.Failf("failed to load PVC with error %v", err) + } + pvc.Namespace = f.UniqueName + + app, err := loadApp(appPath) + if err != nil { + e2elog.Failf("failed to load application with error %v", err) + } + app.Namespace = f.UniqueName + label := map[string]string{ + "app": app.Name, + } + app.Labels = label + app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name + err = createPVCAndApp("", f, pvc, app, deployTimeout) + if err != nil { + e2elog.Failf("failed to create PVC and application with error %v", err) + } + // validate created backend rbd images + validateRBDImageCount(f, 1) + + selector, err := getDaemonSetLabelSelector(f, cephCSINamespace, rbdDaemonsetName) + if err != nil { + e2elog.Failf("failed to get the labels with error %v", err) + } + + opt := metav1.ListOptions{ + LabelSelector: selector, + } + + uname, stdErr, err := execCommandInContainer(f, "uname -a", cephCSINamespace, "csi-rbdplugin", &opt) + if err != nil || stdErr != "" { + e2elog.Failf("failed to run uname cmd : %v, stdErr: %v ", err, stdErr) + } + e2elog.Logf("uname -a: %v", uname) + rpmv, stdErr, err := execCommandInContainer(f, "rpm -qa | grep rbd-nbd", cephCSINamespace, "csi-rbdplugin", &opt) + if err != nil || stdErr != "" { + e2elog.Failf("failed to run rpm -qa cmd : %v, stdErr: %v ", err, stdErr) + } + e2elog.Logf("rbd-nbd package version: %v", rpmv) + + // Get details of rbd-nbd process + // # ps -eo 'cmd' | grep [r]bd-nbd + // /usr/bin/rbd-nbd --id cephcsi-rbd-node -m svc-name:6789 --keyfile=/tmp/csi/keys/keyfile attach --device /dev/nbd0 pool-name/image-name --try-netlink --reattach-timeout=180 + mapCmd, stdErr, err := execCommandInContainer(f, "ps -eo 'cmd' | grep [r]bd-nbd", cephCSINamespace, "csi-rbdplugin", &opt) + if err != nil || stdErr != "" { + e2elog.Failf("failed to run ps cmd : %v, stdErr: %v ", err, stdErr) + } + e2elog.Logf("map command running before restart, mapCmd: %v", mapCmd) + + rbdNodeKey, stdErr, err := execCommandInToolBoxPod(f, "ceph auth get-key client.cephcsi-rbd-node", rookNamespace) + if err != nil || stdErr != "" { + e2elog.Failf("error getting cephcsi-rbd-node key, err: %v, stdErr: %v ", err, stdErr) + } + + // restart the rbd node plugin + err = deletePodWithLabel(selector, cephCSINamespace, false) + if err != nil { + e2elog.Failf("fail to delete pod with error %v", err) + } + + // wait for nodeplugin pods to come up + err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout) + if err != nil { + e2elog.Failf("timeout waiting for daemonset pods with error %v", err) + } + + // Prepare the rbd-nbd with command args + attachCmd := strings.ReplaceAll(mapCmd, "map", "attach --device /dev/nbd0") + m1 := regexp.MustCompile(`/keyfile-[0-9]* `) + attachCmd = m1.ReplaceAllString(attachCmd, "/keyfile-test ") + e2elog.Logf("attach command to run after restart, attachCmd: %v", attachCmd) + + // create the keyfile + _, stdErr, err = execCommandInContainer(f, fmt.Sprintf("echo %s > /tmp/csi/keys/keyfile-test", rbdNodeKey), cephCSINamespace, "csi-rbdplugin", &opt) + if err != nil || stdErr != "" { + e2elog.Failf("failed to write key to a file, err: %v, stdErr: %v ", err, stdErr) + } + + _, stdErr, err = execCommandInContainer(f, attachCmd, cephCSINamespace, "csi-rbdplugin", &opt) + if err != nil || stdErr != "" { + e2elog.Failf("failed to run attach cmd err: %v, stdErr: %v ", err, stdErr) + } + + runningAttachCmd, stdErr, err := execCommandInContainer(f, "ps -eo 'cmd' | grep [r]bd-nbd", cephCSINamespace, "csi-rbdplugin", &opt) + if err != nil || stdErr != "" { + e2elog.Failf("failed to run ps cmd : %v, stdErr: %v ", err, stdErr) + } + e2elog.Logf("attach command running after restart, runningAttachCmd: %v", runningAttachCmd) + + appOpt := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app=%s", app.Name), + } + // Write something to mountpoint and expect it to happen + filePath := app.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" + _, stdErr, err = execCommandInPod(f, fmt.Sprintf("echo 'Hello World' > %s", filePath), app.Namespace, &appOpt) + if err != nil || stdErr != "" { + e2elog.Failf("failed to write IO, err: %v, stdErr: %v ", err, stdErr) + } + + err = deletePVCAndApp("", f, pvc, app) + if err != nil { + e2elog.Failf("failed to delete PVC and application with error %v", err) + } + // validate created backend rbd images + validateRBDImageCount(f, 0) + err = deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, nil, nil, deletePolicy) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } + }) + By("create a PVC and bind it to an app with encrypted RBD volume", func() { err := deleteResource(rbdExamplePath + "storageclass.yaml") if err != nil { From 7b7a4265ba65fc359ac4769f70594dc0c8e1216e Mon Sep 17 00:00:00 2001 From: Prasanna Kumar Kalever Date: Mon, 3 May 2021 13:34:46 +0530 Subject: [PATCH 5/6] e2e: enable an old testcase as the ndb module is available This testcase tests journaling/exclusive-lock image-features with rbd-nbd mounter Signed-off-by: Prasanna Kumar Kalever --- e2e/rbd.go | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/e2e/rbd.go b/e2e/rbd.go index e27bc38edc5..e912b212c84 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -727,17 +727,28 @@ var _ = Describe("RBD", func() { } }) - // TODO: enable this test when we support rbd-nbd mounter in E2E. - // nbd module should be present on the host machine to run use the - // rbd-nbd mounter. - - // By("create a PVC and Bind it to an app with journaling/exclusive-lock image-features and rbd-nbd mounter", func() { - // deleteResource(rbdExamplePath + "storageclass.yaml") - // createRBDStorageClass(f.ClientSet, f, nil, map[string]string{"imageFeatures": "layering,journaling,exclusive-lock", "mounter": "rbd-nbd"}) - // validatePVCAndAppBinding(pvcPath, appPath, f) - // deleteResource(rbdExamplePath + "storageclass.yaml") - // createRBDStorageClass(f.ClientSet, f, nil, make(map[string]string)) - // }) + By("create a PVC and Bind it to an app with journaling/exclusive-lock image-features and rbd-nbd mounter", func() { + err := deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, nil, map[string]string{"imageFeatures": "layering,journaling,exclusive-lock", "mounter": "rbd-nbd"}, deletePolicy) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } + err = validatePVCAndAppBinding(pvcPath, appPath, f) + if err != nil { + e2elog.Failf("failed to validate pvc and application binding with error %v", err) + } + err = deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, nil, nil, deletePolicy) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } + }) By("create a PVC clone and bind it to an app", func() { // snapshot beta is only supported from v1.17+ From aaf5414b55ef0e7886d50602609d2337f0d909b1 Mon Sep 17 00:00:00 2001 From: Prasanna Kumar Kalever Date: Thu, 6 May 2021 11:36:23 +0530 Subject: [PATCH 6/6] build: ignore unparam linter false positive Ignoring below warnings: e2e/pod.go:207:60: `execCommandInContainer` - `cn` always receives `"csi-rbdplugin"` (unparam) func execCommandInContainer(f *framework.Framework, c, ns, cn string, opt *metav1.ListOptions) (string, string, error) { ^ e2e/pod.go:308:43: `deletePodWithLabel` - `skipNotFound` always receives `false` (unparam) func deletePodWithLabel(label, ns string, skipNotFound bool) error { Signed-off-by: Prasanna Kumar Kalever --- e2e/pod.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/e2e/pod.go b/e2e/pod.go index c1c63a9d8d0..701f5caf606 100644 --- a/e2e/pod.go +++ b/e2e/pod.go @@ -204,7 +204,7 @@ func execCommandInPod(f *framework.Framework, c, ns string, opt *metav1.ListOpti return stdOut, stdErr, err } -func execCommandInContainer(f *framework.Framework, c, ns, cn string, opt *metav1.ListOptions) (string, string, error) { +func execCommandInContainer(f *framework.Framework, c, ns, cn string, opt *metav1.ListOptions) (string, string, error) { //nolint:unparam,lll // cn can be used with different inputs later podOpt, err := getCommandInPodOpts(f, c, ns, cn, opt) if err != nil { return "", "", err @@ -304,7 +304,7 @@ func deletePod(name, ns string, c kubernetes.Interface, t int) error { }) } -func deletePodWithLabel(label, ns string, skipNotFound bool) error { +func deletePodWithLabel(label, ns string, skipNotFound bool) error { //nolint:unparam // skipNotFound can be used with different inputs later _, err := framework.RunKubectl(ns, "delete", "po", "-l", label, fmt.Sprintf("--ignore-not-found=%t", skipNotFound)) if err != nil { e2elog.Logf("failed to delete pod %v", err)