From 59a15da27280a4a487a8755d0e5d12ad4a84dbc1 Mon Sep 17 00:00:00 2001 From: Jan Grant Date: Mon, 10 Jun 2024 13:13:20 +0100 Subject: [PATCH] e2e: confirm the cleanup of PVs with legacy affinity attributes This applies a small refactor to the e2e tests to ensure that the newer provisioner is capable of siting helper pods correctly to clean up PVs with "legacy" affinity constraints. The kind cluster itself is reconfigured to ensure that all nodes have `metadata.name` != `metadata.labels["kubernetes.io/hostname"]`, which is an assumption that does not hold for many cloud providers. --- test/pod_test.go | 51 ++++++++++++------- test/testdata/kind-cluster.yaml | 4 ++ .../pod-with-node-affinity/patch.yaml | 2 +- .../kustomization.yaml | 10 ++++ test/testdata/pv-with-legacy-affinity/pv.yaml | 38 ++++++++++++++ test/util.go | 2 +- 6 files changed, 86 insertions(+), 21 deletions(-) create mode 100644 test/testdata/pv-with-legacy-affinity/kustomization.yaml create mode 100644 test/testdata/pv-with-legacy-affinity/pv.yaml diff --git a/test/pod_test.go b/test/pod_test.go index ed52ca36..cf040529 100644 --- a/test/pod_test.go +++ b/test/pod_test.go @@ -82,38 +82,38 @@ func TestPVCTestSuite(t *testing.T) { func (p *PodTestSuite) TestPodWithHostPathVolume() { p.kustomizeDir = "pod" - runTest(p, []string{p.config.IMAGE}, "ready", hostPathVolumeType) + runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), hostPathVolumeType) } func (p *PodTestSuite) TestPodWithLocalVolume() { p.kustomizeDir = "pod-with-local-volume" - runTest(p, []string{p.config.IMAGE}, "ready", localVolumeType) + runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), localVolumeType) } func (p *PodTestSuite) TestPodWithLocalVolumeDefault() { p.kustomizeDir = "pod-with-default-local-volume" - runTest(p, []string{p.config.IMAGE}, "ready", localVolumeType) + runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), localVolumeType) } func (p *PodTestSuite) TestPodWithNodeAffinity() { p.kustomizeDir = "pod-with-node-affinity" - runTest(p, []string{p.config.IMAGE}, "ready", hostPathVolumeType) + runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), hostPathVolumeType) } func (p *PodTestSuite) TestPodWithRWOPVolume() { p.kustomizeDir = "pod-with-rwop-volume" - runTest(p, []string{p.config.IMAGE}, "ready", localVolumeType) + runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), localVolumeType) } func (p *PodTestSuite) TestPodWithSecurityContext() { p.kustomizeDir = "pod-with-security-context" kustomizeDir := testdataFile(p.kustomizeDir) - runTest(p, []string{p.config.IMAGE}, "podscheduled", hostPathVolumeType) + runTest(p, []string{p.config.IMAGE}, waitCondition("podscheduled"), hostPathVolumeType) cmd := fmt.Sprintf(`kubectl get pod -l %s=%s -o=jsonpath='{.items[0].status.conditions[?(@.type=="Ready")].reason}'`, LabelKey, LabelValue) @@ -142,22 +142,33 @@ loop: func (p *PodTestSuite) TestPodWithSubpath() { p.kustomizeDir = "pod-with-subpath" - runTest(p, []string{p.config.IMAGE}, "ready", hostPathVolumeType) + runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), hostPathVolumeType) } func (p *PodTestSuite) xxTestPodWithMultipleStorageClasses() { p.kustomizeDir = "multiple-storage-classes" - runTest(p, []string{p.config.IMAGE}, "ready", hostPathVolumeType) + runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), hostPathVolumeType) } func (p *PodTestSuite) TestPodWithCustomPathPatternStorageClasses() { p.kustomizeDir = "custom-path-pattern" - runTest(p, []string{p.config.IMAGE}, "ready", hostPathVolumeType) + runTest(p, []string{p.config.IMAGE}, waitCondition("ready"), hostPathVolumeType) } -func runTest(p *PodTestSuite, images []string, waitCondition, volumeType string) { +func (p *PodTestSuite) TestPodWithLegacyAffinityConstraint() { + // The helper pod should be correctly scheduled + p.kustomizeDir = "pv-with-legacy-affinity" + + runTest(p, []string{p.config.IMAGE}, "kubectl wait pv pvc-to-clean-up --for delete --timeout=120s", "") +} + +func waitCondition(waitCondition string) string { + return fmt.Sprintf("kubectl wait pod -l %s=%s --for condition=%s --timeout=120s", LabelKey, LabelValue, waitCondition) +} + +func runTest(p *PodTestSuite, images []string, waitCmd, volumeType string) { kustomizeDir := testdataFile(p.kustomizeDir) var cmds []string @@ -171,7 +182,7 @@ func runTest(p *PodTestSuite, images []string, waitCondition, volumeType string) cmds, fmt.Sprintf("kustomize edit add label %s:%s -f", LabelKey, LabelValue), "kustomize build | kubectl apply -f -", - fmt.Sprintf("kubectl wait pod -l %s=%s --for condition=%s --timeout=120s", LabelKey, LabelValue, waitCondition), + waitCmd, ) for _, cmd := range cmds { @@ -188,13 +199,15 @@ func runTest(p *PodTestSuite, images []string, waitCondition, volumeType string) } } - typeCheckCmd := fmt.Sprintf("kubectl get pv $(%s) -o jsonpath='{.spec.%s}'", "kubectl get pv -o jsonpath='{.items[0].metadata.name}'", volumeType) - c := createCmd(p.T(), typeCheckCmd, kustomizeDir, p.config.envs(), nil) - typeCheckOutput, err := c.CombinedOutput() - if err != nil { - p.FailNow("", "failed to check volume type: %v", err) - } - if len(typeCheckOutput) == 0 || !strings.Contains(string(typeCheckOutput), "path") { - p.FailNow("volume Type not correct") + if volumeType != "" { + typeCheckCmd := fmt.Sprintf("kubectl get pv $(%s) -o jsonpath='{.spec.%s}'", "kubectl get pv -o jsonpath='{.items[0].metadata.name}'", volumeType) + c := createCmd(p.T(), typeCheckCmd, kustomizeDir, p.config.envs(), nil) + typeCheckOutput, err := c.CombinedOutput() + if err != nil { + p.FailNow("", "failed to check volume type: %v", err) + } + if len(typeCheckOutput) == 0 || !strings.Contains(string(typeCheckOutput), "path") { + p.FailNow("volume Type not correct") + } } } diff --git a/test/testdata/kind-cluster.yaml b/test/testdata/kind-cluster.yaml index 5d48018e..9d1fb8ac 100644 --- a/test/testdata/kind-cluster.yaml +++ b/test/testdata/kind-cluster.yaml @@ -3,4 +3,8 @@ kind: Cluster nodes: - role: control-plane - role: worker + labels: + kubernetes.io/hostname: kind-worker1.hostname - role: worker + labels: + kubernetes.io/hostname: kind-worker2.hostname diff --git a/test/testdata/pod-with-node-affinity/patch.yaml b/test/testdata/pod-with-node-affinity/patch.yaml index 204d775d..efbf3d19 100644 --- a/test/testdata/pod-with-node-affinity/patch.yaml +++ b/test/testdata/pod-with-node-affinity/patch.yaml @@ -11,4 +11,4 @@ spec: - key: kubernetes.io/hostname operator: In values: - - kind-worker + - kind-worker1.hostname diff --git a/test/testdata/pv-with-legacy-affinity/kustomization.yaml b/test/testdata/pv-with-legacy-affinity/kustomization.yaml new file mode 100644 index 00000000..b0f1729c --- /dev/null +++ b/test/testdata/pv-with-legacy-affinity/kustomization.yaml @@ -0,0 +1,10 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ../../../deploy +- pv.yaml +commonLabels: + app: local-path-provisioner +images: +- name: rancher/local-path-provisioner + newTag: dev \ No newline at end of file diff --git a/test/testdata/pv-with-legacy-affinity/pv.yaml b/test/testdata/pv-with-legacy-affinity/pv.yaml new file mode 100644 index 00000000..e13781c2 --- /dev/null +++ b/test/testdata/pv-with-legacy-affinity/pv.yaml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + annotations: + local.path.provisioner/selected-node: kind-worker + pv.kubernetes.io/provisioned-by: rancher.io/local-path + finalizers: + - kubernetes.io/pv-protection + labels: + test/avoid-cleanup: "true" + name: pvc-to-clean-up +spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 100Mi + hostPath: + path: /opt/local-path-provisioner/default/local-path-pvc + type: DirectoryOrCreate + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - kind-worker1.hostname + claimRef: + apiVersion: v1 + kind: PersistentVolumeClaim + name: no-such-pvc + namespace: default + # The PVC "definitely doesn't exist any more" + resourceVersion: "1" + uid: 12345678-1234-5678-9abc-123456789abc + persistentVolumeReclaimPolicy: Delete + storageClassName: local-path-custom-path-pattern + volumeMode: Filesystem diff --git a/test/util.go b/test/util.go index 7aedec0b..b20cebef 100644 --- a/test/util.go +++ b/test/util.go @@ -78,7 +78,7 @@ func testdataFile(fields ...string) string { func deleteKustomizeDeployment(t *testing.T, kustomizeDir string, envs []string) error { _, err := runCmd( t, - "kustomize build | kubectl delete --timeout=180s -f -", + "kustomize build | kubectl delete --timeout=180s -f - -l 'test/avoid-cleanup!=true'", testdataFile(kustomizeDir), envs, nil,