From eab5b9b451e1825e226fda22ac3f4e3ed20667d7 Mon Sep 17 00:00:00 2001 From: Daniil Antoshin Date: Fri, 14 Nov 2025 14:41:04 +0200 Subject: [PATCH 1/7] fix(audit): break event log on error (#1697) Description Don't try to log audit message if got error during fill event struct. --------- Signed-off-by: Daniil Antoshin --- .../pkg/audit/handler/handler.go | 6 ++++-- .../pkg/audit/informer/informer.go | 20 +++++++++++++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/images/virtualization-artifact/pkg/audit/handler/handler.go b/images/virtualization-artifact/pkg/audit/handler/handler.go index 6857fcf7d9..2d14c22b4b 100644 --- a/images/virtualization-artifact/pkg/audit/handler/handler.go +++ b/images/virtualization-artifact/pkg/audit/handler/handler.go @@ -105,17 +105,19 @@ func NewEventHandler( if err := eventLogger.Fill(); err != nil { log.Debug("fail to fill event: %w", err) + return err } if !eventLogger.ShouldLog() { - break + return nil } if err := eventLogger.Log(); err != nil { log.Debug("fail to log event: %w", err) + return err } - break + return nil } return nil diff --git a/images/virtualization-artifact/pkg/audit/informer/informer.go b/images/virtualization-artifact/pkg/audit/informer/informer.go index 70840ca3ff..7ca2eb462d 100644 --- a/images/virtualization-artifact/pkg/audit/informer/informer.go +++ b/images/virtualization-artifact/pkg/audit/informer/informer.go @@ -68,6 +68,11 @@ func NewInformerList(ctx context.Context, kubeCfg *rest.Config, ttlCache cache) vmInformer := virtSharedInformerFactory.Virtualization().V1alpha2().VirtualMachines().Informer() _, err = vmInformer.AddEventHandler(kubecache.ResourceEventHandlerFuncs{ DeleteFunc: func(obj any) { + _, ok := obj.(kubecache.DeletedFinalStateUnknown) + if ok { + return + } + vm := obj.(*v1alpha2.VirtualMachine) key := fmt.Sprintf("virtualmachines/%s/%s", vm.Namespace, vm.Name) ttlCache.Add(key, vm) @@ -82,6 +87,11 @@ func NewInformerList(ctx context.Context, kubeCfg *rest.Config, ttlCache cache) vdInformer := virtSharedInformerFactory.Virtualization().V1alpha2().VirtualDisks().Informer() _, err = vdInformer.AddEventHandler(kubecache.ResourceEventHandlerFuncs{ DeleteFunc: func(obj any) { + _, ok := obj.(kubecache.DeletedFinalStateUnknown) + if ok { + return + } + vd := obj.(*v1alpha2.VirtualDisk) key := fmt.Sprintf("pods/%s/%s", vd.Namespace, vd.Name) ttlCache.Add(key, vd) @@ -96,6 +106,11 @@ func NewInformerList(ctx context.Context, kubeCfg *rest.Config, ttlCache cache) podInformer := coreSharedInformerFactory.Core().V1().Pods().Informer() _, err = podInformer.AddEventHandler(kubecache.ResourceEventHandlerFuncs{ DeleteFunc: func(obj any) { + _, ok := obj.(kubecache.DeletedFinalStateUnknown) + if ok { + return + } + pod := obj.(*corev1.Pod) key := fmt.Sprintf("pods/%s/%s", pod.Namespace, pod.Name) ttlCache.Add(key, pod) @@ -110,6 +125,11 @@ func NewInformerList(ctx context.Context, kubeCfg *rest.Config, ttlCache cache) internalVMIInformer := GetInternalVMIInformer(dynamicInformerFactory).Informer() _, err = internalVMIInformer.AddEventHandler(kubecache.ResourceEventHandlerFuncs{ DeleteFunc: func(obj any) { + _, ok := obj.(kubecache.DeletedFinalStateUnknown) + if ok { + return + } + unstructuredObj, ok := obj.(*unstructured.Unstructured) if !ok { return From ca13800fa997ab6e853c0264cfd7aa0763b117c9 Mon Sep 17 00:00:00 2001 From: Roman Sysoev <36233932+hardcoretime@users.noreply.github.com> Date: Fri, 14 Nov 2025 19:23:56 +0300 Subject: [PATCH 2/7] chore: update dlv deployment patch (#1670) Signed-off-by: Roman Sysoev --- Taskfile.yaml | 78 +++++++++++++++++----- images/virtualization-artifact/hack/dlv.sh | 26 ++++++-- 2 files changed, 84 insertions(+), 20 deletions(-) diff --git a/Taskfile.yaml b/Taskfile.yaml index 12aaf94cd5..4a9272fe56 100644 --- a/Taskfile.yaml +++ b/Taskfile.yaml @@ -155,11 +155,27 @@ tasks: "spec": { "template": { "spec": { - "containers": [ { - "name": "virt-controller", - "image": "${IMAGE}", - "ports": [ { "containerPort": 2345, "name": "dlv" } ] - }] + "containers": [ + { + "name": "virt-controller", + "image": "${IMAGE}", + "ports": [ { "containerPort": 2345, "name": "dlv" } ], + "readinessProbe": null, + "livenessProbe": null, + "command": null, + "args": [] + }, + { + "name": "proxy", + "readinessProbe": null, + "livenessProbe": null + }, + { + "name": "kube-rbac-proxy", + "readinessProbe": null, + "livenessProbe": null + } + ] } } } @@ -189,11 +205,27 @@ tasks: "spec": { "template": { "spec": { - "containers": [ { - "name": "virt-handler", - "image": "${IMAGE}", - "ports": [ { "containerPort": 2345, "name": "dlv" } ] - }] + "containers": [ + { + "name": "virt-handler", + "image": "${IMAGE}", + "ports": [ { "containerPort": 2345, "name": "dlv" } ], + "readinessProbe": null, + "livenessProbe": null, + "command": null, + "args": [] + }, + { + "name": "proxy", + "readinessProbe": null, + "livenessProbe": null + }, + { + "name": "kube-rbac-proxy", + "readinessProbe": null, + "livenessProbe": null + } + ] } } } @@ -223,11 +255,27 @@ tasks: "spec": { "template": { "spec": { - "containers": [ { - "name": "virt-api", - "image": "${IMAGE}", - "ports": [ { "containerPort": 2345, "name": "dlv" } ] - }] + "containers": [ + { + "name": "virt-api", + "image": "${IMAGE}", + "ports": [ { "containerPort": 2345, "name": "dlv" } ], + "readinessProbe": null, + "livenessProbe": null, + "command": null, + "args": [] + }, + { + "name": "proxy", + "readinessProbe": null, + "livenessProbe": null + }, + { + "name": "kube-rbac-proxy", + "readinessProbe": null, + "livenessProbe": null + } + ] } } } diff --git a/images/virtualization-artifact/hack/dlv.sh b/images/virtualization-artifact/hack/dlv.sh index 03160c6089..9881789165 100755 --- a/images/virtualization-artifact/hack/dlv.sh +++ b/images/virtualization-artifact/hack/dlv.sh @@ -75,11 +75,27 @@ kubectl -n d8-virtualization patch deployment ${deployment} --type='strategic' - "spec": { "template": { "spec": { - "containers": [{ - "name": "${deployment}", - "image": "${IMAGE}", - "ports": [{"containerPort": 2345, "name": "dlv"}] - }] + "containers": [ + { + "name": "${deployment}", + "image": "${IMAGE}", + "ports": [{"containerPort": 2345, "name": "dlv"}], + "readinessProbe": null, + "livenessProbe": null, + "command": null, + "args": [] + }, + { + "name": "proxy", + "readinessProbe": null, + "livenessProbe": null + }, + { + "name": "kube-rbac-proxy", + "readinessProbe": null, + "livenessProbe": null + } + ] } } } From eb61d320c0ee4842a8990c6f29102129e1b22bf2 Mon Sep 17 00:00:00 2001 From: Nikita Korolev <141920865+universal-itengineer@users.noreply.github.com> Date: Mon, 17 Nov 2025 18:46:43 +0300 Subject: [PATCH 3/7] chore(core): mitigation cve report-2025-11-17 (#1702) mitigation cve report-2025-11-17 Signed-off-by: Nikita Korolev --- images/virt-launcher/vlctl/go.mod | 4 ++-- images/virt-launcher/vlctl/go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/images/virt-launcher/vlctl/go.mod b/images/virt-launcher/vlctl/go.mod index bedea8e1e3..e7f58ff103 100644 --- a/images/virt-launcher/vlctl/go.mod +++ b/images/virt-launcher/vlctl/go.mod @@ -6,7 +6,7 @@ require ( github.com/spf13/cobra v1.9.1 github.com/spf13/pflag v1.0.6 gopkg.in/yaml.v3 v3.0.1 - kubevirt.io/kubevirt v1.6.2 + kubevirt.io/kubevirt v1.6.1 ) require ( @@ -79,7 +79,7 @@ replace ( k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.31.0 k8s.io/sample-controller => k8s.io/sample-controller v0.31.0 - kubevirt.io/client-go => kubevirt.io/client-go v1.6.2 + kubevirt.io/client-go => kubevirt.io/client-go v1.6.1 sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.6.2 ) diff --git a/images/virt-launcher/vlctl/go.sum b/images/virt-launcher/vlctl/go.sum index 29fd96516f..c776252305 100644 --- a/images/virt-launcher/vlctl/go.sum +++ b/images/virt-launcher/vlctl/go.sum @@ -401,14 +401,14 @@ k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6J k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= kubevirt.io/api v0.0.0-20250930144221-aaa67e9803df h1:WLXWkHCVkKXUrKD3wuOrkfPUUEZpIY5xDIQoeknHabE= kubevirt.io/api v0.0.0-20250930144221-aaa67e9803df/go.mod h1:p66fEy/g79x7VpgUwrkUgOoG2lYs5LQq37WM6JXMwj4= -kubevirt.io/client-go v1.6.2 h1:JgwvllnzHFeOx9w95U+5uEEGZP0+YRlepZF/QTV39yk= -kubevirt.io/client-go v1.6.2/go.mod h1:6b+UYrs7tPplysezuZ8bP2EKtPd71xN7h+rry09zeV8= +kubevirt.io/client-go v1.6.1 h1:hGxIsJZjxeVgPGRUnLeqWNiLv52WkSTq3LLLIQ8SNXM= +kubevirt.io/client-go v1.6.1/go.mod h1:Y/8zow0q41oBVM3f+wSkk581sGO2a9pchynOgJ8ALoc= kubevirt.io/containerized-data-importer-api v1.60.3-0.20241105012228-50fbed985de9 h1:KTb8wO1Lxj220DX7d2Rdo9xovvlyWWNo3AVm2ua+1nY= kubevirt.io/containerized-data-importer-api v1.60.3-0.20241105012228-50fbed985de9/go.mod h1:SDJjLGhbPyayDqAqawcGmVNapBp0KodOQvhKPLVGCQU= kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 h1:QMrd0nKP0BGbnxTqakhDZAUhGKxPiPiN5gSDqKUmGGc= kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90/go.mod h1:018lASpFYBsYN6XwmA2TIrPCx6e0gviTd/ZNtSitKgc= -kubevirt.io/kubevirt v1.6.2 h1:+dWHfSAsjQl4HRdMMox0RS0wdvupyWQKkuAAP/jm2rk= -kubevirt.io/kubevirt v1.6.2/go.mod h1:Cz4iZeAC3ieVVmWsbeOY0RcebKLzJHslovRCwUwsIx0= +kubevirt.io/kubevirt v1.6.1 h1:n+aLOEam6YNPncvxckoVdbToV4fFU9JPF85PV8guY3I= +kubevirt.io/kubevirt v1.6.1/go.mod h1:Cz4iZeAC3ieVVmWsbeOY0RcebKLzJHslovRCwUwsIx0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= From 7f22c8c093910d8c9a2b13e05a204c087710d5c6 Mon Sep 17 00:00:00 2001 From: Nikita Korolev <141920865+universal-itengineer@users.noreply.github.com> Date: Mon, 17 Nov 2025 19:08:27 +0300 Subject: [PATCH 4/7] chore(ci): add lint and test build for d8v cli (#1699) * chore(ci): add lint and test build for d8v cli --------- Signed-off-by: Nikita Korolev --- .github/workflows/dev_module_build.yml | 33 ++++++++++++++++++++++++++ Taskfile.yaml | 3 +++ src/cli/Taskfile.yaml | 3 +++ 3 files changed, 39 insertions(+) diff --git a/.github/workflows/dev_module_build.yml b/.github/workflows/dev_module_build.yml index 1b0355d12d..d7d3f7ab0d 100644 --- a/.github/workflows/dev_module_build.yml +++ b/.github/workflows/dev_module_build.yml @@ -371,6 +371,39 @@ jobs: svace_analyze_ssh_user: "${{ secrets.SVACE_ANALYZE_SSH_USER }}" svace_analyze_ssh_key: "${{ secrets.SVACE_ANALYZE_SSH_PRIVATE_KEY }}" + test_build_d8v_cli: + name: Test build d8v cli + if: ${{ github.event_name == 'pull_request' }} + runs-on: ubuntu-latest + needs: set_vars + steps: + - name: Set up Go ${{ env.GO_VERSION }} + uses: actions/setup-go@v5 + with: + go-version: "${{ env.GO_VERSION }}" + + - name: Install Task + uses: arduino/setup-task@v2 + with: + version: 3.x + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha || github.sha }} + + - name: Test build d8v cli + run: | + task d8v-cli:build + + - name: Workability check + run: | + task d8v-cli:install + export PATH=$PATH:~/.local/bin + echo "" + echo "Print help" + d8v --help + pull_request_info: name: Get PR info if: ${{ github.event_name == 'pull_request' }} diff --git a/Taskfile.yaml b/Taskfile.yaml index 4a9272fe56..e193b2b1d7 100644 --- a/Taskfile.yaml +++ b/Taskfile.yaml @@ -21,6 +21,9 @@ includes: perf: taskfile: ./test/performance dir: ./test/performance + d8v-cli: + taskfile: ./src/cli + dir: ./src/cli vars: deckhouse_lib_helm_ver: 1.55.1 diff --git a/src/cli/Taskfile.yaml b/src/cli/Taskfile.yaml index e9287b53d8..776ccb788f 100644 --- a/src/cli/Taskfile.yaml +++ b/src/cli/Taskfile.yaml @@ -6,9 +6,11 @@ silent: true tasks: build: + desc: "Build d8v cli" cmds: - go build -o .out/d8v cmd/main.go install: + desc: "Install d8v cli to ~/.local/bin" deps: [build] cmds: - echo "Check that ~/.local/bin in your PATH" @@ -17,6 +19,7 @@ tasks: - cp .out/d8v ~/.local/bin/d8v - task: clean clean: + desc: "Clean up build artifacts" cmds: - rm -rf .out From ec8bceba0dc7a2debfa310b2843e58f58c6acbe3 Mon Sep 17 00:00:00 2001 From: Daniil Loktev <70405899+loktev-d@users.noreply.github.com> Date: Tue, 18 Nov 2025 09:25:47 +0300 Subject: [PATCH 5/7] feat(core): add d8v- prefix to all BlockDevices underlying resources (#1469) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implemented new naming strategy for BlockDevices underlying resources with d8v- prefix according to ADR. New naming templates (generator.go): - Added d8v- prefix to all resource templates - Changed format from vi-- to d8v-vi--- Backward compatibility (fetch.go, fetch_test.go): - Implemented FetchSupplement helper with automatic fallback to legacy names - Added Legacy methods in Generator for old naming format - Resources can be fetched by new names with fallback to old names Updated resource naming: - Importer pod: vi-importer- → d8v-vi-importer-- - Uploader pod: vi-uploader- → d8v-vi-uploader-- - Bounder pod: vi-bounder- → d8v-vi-bounder-- - DVCR auth secret: vi-dvcr-auth- → d8v-vi-dvcr-auth-- - DVCR auth secret for DV: vi-dvcr-auth-dv- → d8v-vi-dvcr-auth-dv-- - CA bundle: vi-dvcr-ca-dv- → d8v-vi-dvcr-ca-- - Uploader service: vi-uploader-svc- → d8v-vi-- - Uploader ingress: vi-uploader-ingress- → d8v-vi-- - TLS secret: vi-uploader-tls-ing- → d8v-vi-tls-- - Network policy: vi-uploader- → d8v-vi-- - Image pull secret: vi-pull-image- → d8v-vi-pull-image-- - Target PVC: vi-- → d8v-vi-- - Target PVC VD: vd-- → d8v-vd-- - DataVolume: vi-- → d8v-vi-- Name truncation handling: - Implemented smart name shortening to respect K8s limits (253 for most, 63 for services) - Names truncated while preserving prefix, UID, and uniqueness Signed-off-by: Daniil Loktev --- .../common/network_policy/network_policy.go | 34 ++- .../pkg/controller/bounder/bounder.go | 10 - .../controller/cvi/internal/source/http.go | 4 +- .../cvi/internal/source/interfaces.go | 10 +- .../controller/cvi/internal/source/mock.go | 70 +++-- .../cvi/internal/source/object_ref.go | 2 +- .../cvi/internal/source/object_ref_vd.go | 7 +- .../internal/source/object_ref_vdsnapshot.go | 4 +- .../internal/source/object_ref_vi_on_pvc.go | 7 +- .../cvi/internal/source/registry.go | 2 +- .../controller/cvi/internal/source/upload.go | 4 +- .../pkg/controller/importer/importer_pod.go | 10 - .../service/base_storage_class_service.go | 2 +- .../pkg/controller/service/bounder_service.go | 7 +- .../pkg/controller/service/disk_service.go | 18 +- .../controller/service/importer_service.go | 23 +- .../controller/service/uploader_service.go | 36 +-- .../pkg/controller/supplements/ensure.go | 1 + .../pkg/controller/supplements/fetch.go | 175 ++++++++++++ .../pkg/controller/supplements/fetch_test.go | 257 ++++++++++++++++++ .../pkg/controller/supplements/generator.go | 166 +++++++++-- .../controller/supplements/generator_test.go | 96 +++++++ .../controller/uploader/uploader_ingress.go | 10 - .../pkg/controller/uploader/uploader_pod.go | 10 - .../controller/uploader/uploader_service.go | 10 - .../pkg/controller/vd/internal/init.go | 2 +- .../pkg/controller/vd/internal/migration.go | 2 +- .../vd/internal/source/blank_test.go | 2 +- .../pkg/controller/vd/internal/source/http.go | 10 +- .../pkg/controller/vd/internal/source/mock.go | 56 ++-- .../vd/internal/source/object_ref_cvi_test.go | 2 +- .../source/object_ref_vdsnapshot_test.go | 2 +- .../vd/internal/source/object_ref_vi_test.go | 2 +- .../controller/vd/internal/source/registry.go | 10 +- .../vd/internal/source/step/ready_step.go | 6 +- .../controller/vd/internal/source/upload.go | 14 +- .../pkg/controller/vi/internal/source/http.go | 14 +- .../vi/internal/source/interfaces.go | 9 +- .../pkg/controller/vi/internal/source/mock.go | 108 ++++++-- .../vi/internal/source/object_ref.go | 10 +- .../vi/internal/source/object_ref_vd.go | 10 +- .../source/object_ref_vdsnapshot_cr.go | 6 +- .../source/object_ref_vdsnapshot_cr_test.go | 13 + .../source/object_ref_vdsnapshot_pvc.go | 7 +- .../source/object_ref_vdsnapshot_pvc_test.go | 50 ++-- .../internal/source/object_ref_vi_on_pvc.go | 10 +- .../controller/vi/internal/source/registry.go | 12 +- .../controller/vi/internal/source/upload.go | 14 +- 48 files changed, 1049 insertions(+), 297 deletions(-) create mode 100644 images/virtualization-artifact/pkg/controller/supplements/fetch.go create mode 100644 images/virtualization-artifact/pkg/controller/supplements/fetch_test.go create mode 100644 images/virtualization-artifact/pkg/controller/supplements/generator_test.go diff --git a/images/virtualization-artifact/pkg/common/network_policy/network_policy.go b/images/virtualization-artifact/pkg/common/network_policy/network_policy.go index 6aff0c1afd..43834c7318 100644 --- a/images/virtualization-artifact/pkg/common/network_policy/network_policy.go +++ b/images/virtualization-artifact/pkg/common/network_policy/network_policy.go @@ -26,17 +26,19 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/annotations" "github.com/deckhouse/virtualization-controller/pkg/common/object" + "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" ) -func CreateNetworkPolicy(ctx context.Context, c client.Client, obj client.Object, finalizer string) error { +func CreateNetworkPolicy(ctx context.Context, c client.Client, obj metav1.Object, sup supplements.DataVolumeSupplement, finalizer string) error { + npName := sup.NetworkPolicy() networkPolicy := netv1.NetworkPolicy{ TypeMeta: metav1.TypeMeta{ Kind: "NetworkPolicy", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: obj.GetName(), - Namespace: obj.GetNamespace(), + Name: npName.Name, + Namespace: npName.Namespace, OwnerReferences: obj.GetOwnerReferences(), Finalizers: []string{finalizer}, }, @@ -59,10 +61,28 @@ func CreateNetworkPolicy(ctx context.Context, c client.Client, obj client.Object return client.IgnoreAlreadyExists(err) } -func GetNetworkPolicy(ctx context.Context, client client.Client, name types.NamespacedName) (*netv1.NetworkPolicy, error) { - return object.FetchObject(ctx, name, client, &netv1.NetworkPolicy{}) +func GetNetworkPolicy(ctx context.Context, client client.Client, legacyName types.NamespacedName, sup supplements.Generator) (*netv1.NetworkPolicy, error) { + np, err := object.FetchObject(ctx, sup.NetworkPolicy(), client, &netv1.NetworkPolicy{}) + if err != nil { + return nil, err + } + if np != nil { + return np, nil + } + + // Return object with legacy naming otherwise + return object.FetchObject(ctx, legacyName, client, &netv1.NetworkPolicy{}) } -func GetNetworkPolicyFromObject(ctx context.Context, client client.Client, obj client.Object) (*netv1.NetworkPolicy, error) { - return object.FetchObject(ctx, types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()}, client, &netv1.NetworkPolicy{}) +func GetNetworkPolicyFromObject(ctx context.Context, client client.Client, legacyObjectKey client.Object, sup supplements.Generator) (*netv1.NetworkPolicy, error) { + np, err := object.FetchObject(ctx, sup.NetworkPolicy(), client, &netv1.NetworkPolicy{}) + if err != nil { + return nil, err + } + if np != nil { + return np, nil + } + + // Return object with legacy naming otherwise + return object.FetchObject(ctx, types.NamespacedName{Name: legacyObjectKey.GetName(), Namespace: legacyObjectKey.GetNamespace()}, client, &netv1.NetworkPolicy{}) } diff --git a/images/virtualization-artifact/pkg/controller/bounder/bounder.go b/images/virtualization-artifact/pkg/controller/bounder/bounder.go index d6b16324cb..ed361e6a0d 100644 --- a/images/virtualization-artifact/pkg/controller/bounder/bounder.go +++ b/images/virtualization-artifact/pkg/controller/bounder/bounder.go @@ -21,13 +21,11 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/deckhouse/virtualization-controller/pkg/common" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" - "github.com/deckhouse/virtualization-controller/pkg/common/object" podutil "github.com/deckhouse/virtualization-controller/pkg/common/pod" "github.com/deckhouse/virtualization-controller/pkg/common/provisioner" ) @@ -162,11 +160,3 @@ func (imp *Bounder) addVolumes(pod *corev1.Pod, container *corev1.Container) { ) } } - -type PodNamer interface { - BounderPod() types.NamespacedName -} - -func FindPod(ctx context.Context, client client.Client, name PodNamer) (*corev1.Pod, error) { - return object.FetchObject(ctx, name.BounderPod(), client, &corev1.Pod{}) -} diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/http.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/http.go index 5a8ba64cdf..2ab9add2a6 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/http.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/http.go @@ -95,7 +95,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *v1alpha2.ClusterVirtualI cvi.Status.Phase = v1alpha2.ImageReady // Unprotect import time supplements to delete them later. - err = ds.importerService.Unprotect(ctx, pod) + err = ds.importerService.Unprotect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } @@ -200,7 +200,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, cvi *v1alpha2.ClusterVirtualI } } - err = ds.importerService.Protect(ctx, pod) + err = ds.importerService.Protect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/interfaces.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/interfaces.go index 09cc02d606..a760da0487 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/interfaces.go @@ -41,9 +41,9 @@ type Importer interface { CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) GetPod(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) - DeletePod(ctx context.Context, obj client.Object, controllerName string) (bool, error) - Protect(ctx context.Context, pod *corev1.Pod) error - Unprotect(ctx context.Context, pod *corev1.Pod) error + DeletePod(ctx context.Context, obj client.Object, controllerName string, sup supplements.Generator) (bool, error) + Protect(ctx context.Context, pod *corev1.Pod, sup supplements.Generator) error + Unprotect(ctx context.Context, pod *corev1.Pod, sup supplements.Generator) error GetPodSettingsWithPVC(ownerRef *metav1.OwnerReference, sup supplements.Generator, pvcName, pvcNamespace string) *importer.PodSettings } @@ -53,8 +53,8 @@ type Uploader interface { GetPod(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) GetIngress(ctx context.Context, sup supplements.Generator) (*netv1.Ingress, error) GetService(ctx context.Context, sup supplements.Generator) (*corev1.Service, error) - Protect(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error - Unprotect(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error + Protect(ctx context.Context, sup supplements.Generator, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error + Unprotect(ctx context.Context, sup supplements.Generator, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error GetExternalURL(ctx context.Context, ing *netv1.Ingress) string GetInClusterURL(ctx context.Context, svc *corev1.Service) string } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/mock.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/mock.go index 270a719a89..3afe5454ac 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/mock.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/mock.go @@ -35,7 +35,7 @@ var _ Importer = &ImporterMock{} // CleanUpSupplementsFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { // panic("mock out the CleanUpSupplements method") // }, -// DeletePodFunc: func(ctx context.Context, obj client.Object, controllerName string) (bool, error) { +// DeletePodFunc: func(ctx context.Context, obj client.Object, controllerName string, sup supplements.Generator) (bool, error) { // panic("mock out the DeletePod method") // }, // GetPodFunc: func(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) { @@ -44,7 +44,7 @@ var _ Importer = &ImporterMock{} // GetPodSettingsWithPVCFunc: func(ownerRef *metav1.OwnerReference, sup supplements.Generator, pvcName string, pvcNamespace string) *importer.PodSettings { // panic("mock out the GetPodSettingsWithPVC method") // }, -// ProtectFunc: func(ctx context.Context, pod *corev1.Pod) error { +// ProtectFunc: func(ctx context.Context, pod *corev1.Pod, sup supplements.Generator) error { // panic("mock out the Protect method") // }, // StartFunc: func(ctx context.Context, settings *importer.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { @@ -53,7 +53,7 @@ var _ Importer = &ImporterMock{} // StartWithPodSettingFunc: func(ctx context.Context, settings *importer.Settings, sup supplements.Generator, caBundle *datasource.CABundle, podSettings *importer.PodSettings) error { // panic("mock out the StartWithPodSetting method") // }, -// UnprotectFunc: func(ctx context.Context, pod *corev1.Pod) error { +// UnprotectFunc: func(ctx context.Context, pod *corev1.Pod, sup supplements.Generator) error { // panic("mock out the Unprotect method") // }, // } @@ -70,7 +70,7 @@ type ImporterMock struct { CleanUpSupplementsFunc func(ctx context.Context, sup supplements.Generator) (bool, error) // DeletePodFunc mocks the DeletePod method. - DeletePodFunc func(ctx context.Context, obj client.Object, controllerName string) (bool, error) + DeletePodFunc func(ctx context.Context, obj client.Object, controllerName string, sup supplements.Generator) (bool, error) // GetPodFunc mocks the GetPod method. GetPodFunc func(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) @@ -79,7 +79,7 @@ type ImporterMock struct { GetPodSettingsWithPVCFunc func(ownerRef *metav1.OwnerReference, sup supplements.Generator, pvcName string, pvcNamespace string) *importer.PodSettings // ProtectFunc mocks the Protect method. - ProtectFunc func(ctx context.Context, pod *corev1.Pod) error + ProtectFunc func(ctx context.Context, pod *corev1.Pod, sup supplements.Generator) error // StartFunc mocks the Start method. StartFunc func(ctx context.Context, settings *importer.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error @@ -88,7 +88,7 @@ type ImporterMock struct { StartWithPodSettingFunc func(ctx context.Context, settings *importer.Settings, sup supplements.Generator, caBundle *datasource.CABundle, podSettings *importer.PodSettings) error // UnprotectFunc mocks the Unprotect method. - UnprotectFunc func(ctx context.Context, pod *corev1.Pod) error + UnprotectFunc func(ctx context.Context, pod *corev1.Pod, sup supplements.Generator) error // calls tracks calls to the methods. calls struct { @@ -114,6 +114,8 @@ type ImporterMock struct { Obj client.Object // ControllerName is the controllerName argument value. ControllerName string + // Sup is the sup argument value. + Sup supplements.Generator } // GetPod holds details about calls to the GetPod method. GetPod []struct { @@ -139,6 +141,8 @@ type ImporterMock struct { Ctx context.Context // Pod is the pod argument value. Pod *corev1.Pod + // Sup is the sup argument value. + Sup supplements.Generator } // Start holds details about calls to the Start method. Start []struct { @@ -174,6 +178,8 @@ type ImporterMock struct { Ctx context.Context // Pod is the pod argument value. Pod *corev1.Pod + // Sup is the sup argument value. + Sup supplements.Generator } } lockCleanUp sync.RWMutex @@ -260,7 +266,7 @@ func (mock *ImporterMock) CleanUpSupplementsCalls() []struct { } // DeletePod calls DeletePodFunc. -func (mock *ImporterMock) DeletePod(ctx context.Context, obj client.Object, controllerName string) (bool, error) { +func (mock *ImporterMock) DeletePod(ctx context.Context, obj client.Object, controllerName string, sup supplements.Generator) (bool, error) { if mock.DeletePodFunc == nil { panic("ImporterMock.DeletePodFunc: method is nil but Importer.DeletePod was just called") } @@ -268,15 +274,17 @@ func (mock *ImporterMock) DeletePod(ctx context.Context, obj client.Object, cont Ctx context.Context Obj client.Object ControllerName string + Sup supplements.Generator }{ Ctx: ctx, Obj: obj, ControllerName: controllerName, + Sup: sup, } mock.lockDeletePod.Lock() mock.calls.DeletePod = append(mock.calls.DeletePod, callInfo) mock.lockDeletePod.Unlock() - return mock.DeletePodFunc(ctx, obj, controllerName) + return mock.DeletePodFunc(ctx, obj, controllerName, sup) } // DeletePodCalls gets all the calls that were made to DeletePod. @@ -287,11 +295,13 @@ func (mock *ImporterMock) DeletePodCalls() []struct { Ctx context.Context Obj client.Object ControllerName string + Sup supplements.Generator } { var calls []struct { Ctx context.Context Obj client.Object ControllerName string + Sup supplements.Generator } mock.lockDeletePod.RLock() calls = mock.calls.DeletePod @@ -380,21 +390,23 @@ func (mock *ImporterMock) GetPodSettingsWithPVCCalls() []struct { } // Protect calls ProtectFunc. -func (mock *ImporterMock) Protect(ctx context.Context, pod *corev1.Pod) error { +func (mock *ImporterMock) Protect(ctx context.Context, pod *corev1.Pod, sup supplements.Generator) error { if mock.ProtectFunc == nil { panic("ImporterMock.ProtectFunc: method is nil but Importer.Protect was just called") } callInfo := struct { Ctx context.Context Pod *corev1.Pod + Sup supplements.Generator }{ Ctx: ctx, Pod: pod, + Sup: sup, } mock.lockProtect.Lock() mock.calls.Protect = append(mock.calls.Protect, callInfo) mock.lockProtect.Unlock() - return mock.ProtectFunc(ctx, pod) + return mock.ProtectFunc(ctx, pod, sup) } // ProtectCalls gets all the calls that were made to Protect. @@ -404,10 +416,12 @@ func (mock *ImporterMock) Protect(ctx context.Context, pod *corev1.Pod) error { func (mock *ImporterMock) ProtectCalls() []struct { Ctx context.Context Pod *corev1.Pod + Sup supplements.Generator } { var calls []struct { Ctx context.Context Pod *corev1.Pod + Sup supplements.Generator } mock.lockProtect.RLock() calls = mock.calls.Protect @@ -516,21 +530,23 @@ func (mock *ImporterMock) StartWithPodSettingCalls() []struct { } // Unprotect calls UnprotectFunc. -func (mock *ImporterMock) Unprotect(ctx context.Context, pod *corev1.Pod) error { +func (mock *ImporterMock) Unprotect(ctx context.Context, pod *corev1.Pod, sup supplements.Generator) error { if mock.UnprotectFunc == nil { panic("ImporterMock.UnprotectFunc: method is nil but Importer.Unprotect was just called") } callInfo := struct { Ctx context.Context Pod *corev1.Pod + Sup supplements.Generator }{ Ctx: ctx, Pod: pod, + Sup: sup, } mock.lockUnprotect.Lock() mock.calls.Unprotect = append(mock.calls.Unprotect, callInfo) mock.lockUnprotect.Unlock() - return mock.UnprotectFunc(ctx, pod) + return mock.UnprotectFunc(ctx, pod, sup) } // UnprotectCalls gets all the calls that were made to Unprotect. @@ -540,10 +556,12 @@ func (mock *ImporterMock) Unprotect(ctx context.Context, pod *corev1.Pod) error func (mock *ImporterMock) UnprotectCalls() []struct { Ctx context.Context Pod *corev1.Pod + Sup supplements.Generator } { var calls []struct { Ctx context.Context Pod *corev1.Pod + Sup supplements.Generator } mock.lockUnprotect.RLock() calls = mock.calls.Unprotect @@ -579,13 +597,13 @@ var _ Uploader = &UploaderMock{} // GetServiceFunc: func(ctx context.Context, sup supplements.Generator) (*corev1.Service, error) { // panic("mock out the GetService method") // }, -// ProtectFunc: func(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error { +// ProtectFunc: func(ctx context.Context, sup supplements.Generator, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error { // panic("mock out the Protect method") // }, // StartFunc: func(ctx context.Context, settings *uploader.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { // panic("mock out the Start method") // }, -// UnprotectFunc: func(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error { +// UnprotectFunc: func(ctx context.Context, sup supplements.Generator, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error { // panic("mock out the Unprotect method") // }, // } @@ -614,13 +632,13 @@ type UploaderMock struct { GetServiceFunc func(ctx context.Context, sup supplements.Generator) (*corev1.Service, error) // ProtectFunc mocks the Protect method. - ProtectFunc func(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error + ProtectFunc func(ctx context.Context, sup supplements.Generator, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error // StartFunc mocks the Start method. StartFunc func(ctx context.Context, settings *uploader.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error // UnprotectFunc mocks the Unprotect method. - UnprotectFunc func(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error + UnprotectFunc func(ctx context.Context, sup supplements.Generator, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error // calls tracks calls to the methods. calls struct { @@ -670,6 +688,8 @@ type UploaderMock struct { Protect []struct { // Ctx is the ctx argument value. Ctx context.Context + // Sup is the sup argument value. + Sup supplements.Generator // Pod is the pod argument value. Pod *corev1.Pod // Svc is the svc argument value. @@ -696,6 +716,8 @@ type UploaderMock struct { Unprotect []struct { // Ctx is the ctx argument value. Ctx context.Context + // Sup is the sup argument value. + Sup supplements.Generator // Pod is the pod argument value. Pod *corev1.Pod // Svc is the svc argument value. @@ -932,17 +954,19 @@ func (mock *UploaderMock) GetServiceCalls() []struct { } // Protect calls ProtectFunc. -func (mock *UploaderMock) Protect(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error { +func (mock *UploaderMock) Protect(ctx context.Context, sup supplements.Generator, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error { if mock.ProtectFunc == nil { panic("UploaderMock.ProtectFunc: method is nil but Uploader.Protect was just called") } callInfo := struct { Ctx context.Context + Sup supplements.Generator Pod *corev1.Pod Svc *corev1.Service Ing *netv1.Ingress }{ Ctx: ctx, + Sup: sup, Pod: pod, Svc: svc, Ing: ing, @@ -950,7 +974,7 @@ func (mock *UploaderMock) Protect(ctx context.Context, pod *corev1.Pod, svc *cor mock.lockProtect.Lock() mock.calls.Protect = append(mock.calls.Protect, callInfo) mock.lockProtect.Unlock() - return mock.ProtectFunc(ctx, pod, svc, ing) + return mock.ProtectFunc(ctx, sup, pod, svc, ing) } // ProtectCalls gets all the calls that were made to Protect. @@ -959,12 +983,14 @@ func (mock *UploaderMock) Protect(ctx context.Context, pod *corev1.Pod, svc *cor // len(mockedUploader.ProtectCalls()) func (mock *UploaderMock) ProtectCalls() []struct { Ctx context.Context + Sup supplements.Generator Pod *corev1.Pod Svc *corev1.Service Ing *netv1.Ingress } { var calls []struct { Ctx context.Context + Sup supplements.Generator Pod *corev1.Pod Svc *corev1.Service Ing *netv1.Ingress @@ -1028,17 +1054,19 @@ func (mock *UploaderMock) StartCalls() []struct { } // Unprotect calls UnprotectFunc. -func (mock *UploaderMock) Unprotect(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error { +func (mock *UploaderMock) Unprotect(ctx context.Context, sup supplements.Generator, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error { if mock.UnprotectFunc == nil { panic("UploaderMock.UnprotectFunc: method is nil but Uploader.Unprotect was just called") } callInfo := struct { Ctx context.Context + Sup supplements.Generator Pod *corev1.Pod Svc *corev1.Service Ing *netv1.Ingress }{ Ctx: ctx, + Sup: sup, Pod: pod, Svc: svc, Ing: ing, @@ -1046,7 +1074,7 @@ func (mock *UploaderMock) Unprotect(ctx context.Context, pod *corev1.Pod, svc *c mock.lockUnprotect.Lock() mock.calls.Unprotect = append(mock.calls.Unprotect, callInfo) mock.lockUnprotect.Unlock() - return mock.UnprotectFunc(ctx, pod, svc, ing) + return mock.UnprotectFunc(ctx, sup, pod, svc, ing) } // UnprotectCalls gets all the calls that were made to Unprotect. @@ -1055,12 +1083,14 @@ func (mock *UploaderMock) Unprotect(ctx context.Context, pod *corev1.Pod, svc *c // len(mockedUploader.UnprotectCalls()) func (mock *UploaderMock) UnprotectCalls() []struct { Ctx context.Context + Sup supplements.Generator Pod *corev1.Pod Svc *corev1.Service Ing *netv1.Ingress } { var calls []struct { Ctx context.Context + Sup supplements.Generator Pod *corev1.Pod Svc *corev1.Service Ing *netv1.Ingress diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref.go index 466c875aa4..512af20ee8 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref.go @@ -155,7 +155,7 @@ func (ds ObjectRefDataSource) Sync(ctx context.Context, cvi *v1alpha2.ClusterVir cvi.Status.Phase = v1alpha2.ImageReady // Unprotect import time supplements to delete them later. - err = ds.importerService.Unprotect(ctx, pod) + err = ds.importerService.Unprotect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go index 33f590be4c..8faf025d12 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vd.go @@ -86,7 +86,7 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *v1alpha2.ClusterVi cvi.Status.Phase = v1alpha2.ImageReady - err = ds.importerService.Unprotect(ctx, pod) + err = ds.importerService.Unprotect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } @@ -191,7 +191,7 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *v1alpha2.ClusterVi } } - err = ds.importerService.Protect(ctx, pod) + err = ds.importerService.Protect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } @@ -212,7 +212,8 @@ func (ds ObjectRefVirtualDisk) Sync(ctx context.Context, cvi *v1alpha2.ClusterVi } func (ds ObjectRefVirtualDisk) CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) { - return ds.importerService.DeletePod(ctx, cvi, controllerName) + supgen := supplements.NewGenerator(annotations.CVIShortName, cvi.Name, ds.controllerNamespace, cvi.UID) + return ds.importerService.DeletePod(ctx, cvi, controllerName, supgen) } func (ds ObjectRefVirtualDisk) getEnvSettings(cvi *v1alpha2.ClusterVirtualImage, sup supplements.Generator) *importer.Settings { diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vdsnapshot.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vdsnapshot.go index 6c81e18a8c..4064d59a5e 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vdsnapshot.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vdsnapshot.go @@ -107,7 +107,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *v1alpha2.C cvi.Status.Phase = v1alpha2.ImageReady - err = ds.importerService.Unprotect(ctx, pod) + err = ds.importerService.Unprotect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } @@ -297,7 +297,7 @@ func (ds ObjectRefVirtualDiskSnapshot) Sync(ctx context.Context, cvi *v1alpha2.C } } - err = ds.importerService.Protect(ctx, pod) + err = ds.importerService.Protect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vi_on_pvc.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vi_on_pvc.go index 0eee3e9f81..e98ba2880f 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vi_on_pvc.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/object_ref_vi_on_pvc.go @@ -80,7 +80,7 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *v1alpha2.Clu cvi.Status.Phase = v1alpha2.ImageReady - err = ds.importerService.Unprotect(ctx, pod) + err = ds.importerService.Unprotect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } @@ -185,7 +185,7 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *v1alpha2.Clu } } - err = ds.importerService.Protect(ctx, pod) + err = ds.importerService.Protect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } @@ -206,7 +206,8 @@ func (ds ObjectRefVirtualImageOnPvc) Sync(ctx context.Context, cvi *v1alpha2.Clu } func (ds ObjectRefVirtualImageOnPvc) CleanUp(ctx context.Context, cvi *v1alpha2.ClusterVirtualImage) (bool, error) { - return ds.importerService.DeletePod(ctx, cvi, controllerName) + supgen := supplements.NewGenerator(annotations.CVIShortName, cvi.Name, cvi.Namespace, cvi.UID) + return ds.importerService.DeletePod(ctx, cvi, controllerName, supgen) } func (ds ObjectRefVirtualImageOnPvc) getEnvSettings(cvi *v1alpha2.ClusterVirtualImage, sup supplements.Generator) *importer.Settings { diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/registry.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/registry.go index f2fded59c7..a09fd8e89e 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/registry.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/registry.go @@ -101,7 +101,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, cvi *v1alpha2.ClusterVirt cvi.Status.Phase = v1alpha2.ImageReady // Unprotect import time supplements to delete them later. - err = ds.importerService.Unprotect(ctx, pod) + err = ds.importerService.Unprotect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } diff --git a/images/virtualization-artifact/pkg/controller/cvi/internal/source/upload.go b/images/virtualization-artifact/pkg/controller/cvi/internal/source/upload.go index ece31eb6ed..aeee4cadf9 100644 --- a/images/virtualization-artifact/pkg/controller/cvi/internal/source/upload.go +++ b/images/virtualization-artifact/pkg/controller/cvi/internal/source/upload.go @@ -104,7 +104,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *v1alpha2.ClusterVirtua cvi.Status.Phase = v1alpha2.ImageReady // Unprotect upload time supplements to delete them later. - err = ds.uploaderService.Unprotect(ctx, pod, svc, ing) + err = ds.uploaderService.Unprotect(ctx, supgen, pod, svc, ing) if err != nil { return reconcile.Result{}, err } @@ -215,7 +215,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, cvi *v1alpha2.ClusterVirtua cvi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) cvi.Status.DownloadSpeed = ds.statService.GetDownloadSpeed(cvi.GetUID(), pod) - err = ds.uploaderService.Protect(ctx, pod, svc, ing) + err = ds.uploaderService.Protect(ctx, supgen, pod, svc, ing) if err != nil { return reconcile.Result{}, err } diff --git a/images/virtualization-artifact/pkg/controller/importer/importer_pod.go b/images/virtualization-artifact/pkg/controller/importer/importer_pod.go index b3aa834760..44d2ac912a 100644 --- a/images/virtualization-artifact/pkg/controller/importer/importer_pod.go +++ b/images/virtualization-artifact/pkg/controller/importer/importer_pod.go @@ -23,13 +23,11 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/deckhouse/virtualization-controller/pkg/common" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" - "github.com/deckhouse/virtualization-controller/pkg/common/object" podutil "github.com/deckhouse/virtualization-controller/pkg/common/pod" "github.com/deckhouse/virtualization-controller/pkg/common/provisioner" ) @@ -385,11 +383,3 @@ func (imp *Importer) addVolumes(pod *corev1.Pod, container *corev1.Container) { ) } } - -type PodNamer interface { - ImporterPod() types.NamespacedName -} - -func FindPod(ctx context.Context, client client.Client, name PodNamer) (*corev1.Pod, error) { - return object.FetchObject(ctx, name.ImporterPod(), client, &corev1.Pod{}) -} diff --git a/images/virtualization-artifact/pkg/controller/service/base_storage_class_service.go b/images/virtualization-artifact/pkg/controller/service/base_storage_class_service.go index bad08227e6..e480d3c313 100644 --- a/images/virtualization-artifact/pkg/controller/service/base_storage_class_service.go +++ b/images/virtualization-artifact/pkg/controller/service/base_storage_class_service.go @@ -74,7 +74,7 @@ func (s BaseStorageClassService) GetStorageClass(ctx context.Context, scName str } func (s BaseStorageClassService) GetPersistentVolumeClaim(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) { - return object.FetchObject(ctx, sup.PersistentVolumeClaim(), s.client, &corev1.PersistentVolumeClaim{}) + return supplements.FetchSupplement(ctx, s.client, sup, supplements.SupplementPVC, &corev1.PersistentVolumeClaim{}) } func (s BaseStorageClassService) IsStorageClassDeprecated(sc *storagev1.StorageClass) bool { diff --git a/images/virtualization-artifact/pkg/controller/service/bounder_service.go b/images/virtualization-artifact/pkg/controller/service/bounder_service.go index 8debca6863..a6b7fffb65 100644 --- a/images/virtualization-artifact/pkg/controller/service/bounder_service.go +++ b/images/virtualization-artifact/pkg/controller/service/bounder_service.go @@ -108,12 +108,7 @@ func (s BounderPodService) CleanUpSupplements(ctx context.Context, sup supplemen } func (s BounderPodService) GetPod(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) { - pod, err := bounder.FindPod(ctx, s.client, sup) - if err != nil { - return nil, err - } - - return pod, nil + return supplements.FetchSupplement(ctx, s.client, sup, supplements.SupplementBounderPod, &corev1.Pod{}) } func (s BounderPodService) GetPodSettings(ownerRef *metav1.OwnerReference, sup supplements.Generator) *bounder.PodSettings { diff --git a/images/virtualization-artifact/pkg/controller/service/disk_service.go b/images/virtualization-artifact/pkg/controller/service/disk_service.go index 4865cf173f..bef84b117c 100644 --- a/images/virtualization-artifact/pkg/controller/service/disk_service.go +++ b/images/virtualization-artifact/pkg/controller/service/disk_service.go @@ -112,7 +112,7 @@ func (s DiskService) Start( return err } - err = networkpolicy.CreateNetworkPolicy(ctx, s.client, dv, s.protection.GetFinalizer()) + err = networkpolicy.CreateNetworkPolicy(ctx, s.client, dv, sup, s.protection.GetFinalizer()) if err != nil { return fmt.Errorf("failed to create NetworkPolicy: %w", err) } @@ -199,7 +199,7 @@ func (s DiskService) StartImmediate( return err } - err = networkpolicy.CreateNetworkPolicy(ctx, s.client, dv, s.protection.GetFinalizer()) + err = networkpolicy.CreateNetworkPolicy(ctx, s.client, dv, dataVolumeSupplement, s.protection.GetFinalizer()) if err != nil { return fmt.Errorf("failed to create NetworkPolicy: %w", err) } @@ -299,7 +299,7 @@ func (s DiskService) CleanUpSupplements(ctx context.Context, sup supplements.Gen } // 2. Delete network policy. - networkPolicy, err := networkpolicy.GetNetworkPolicy(ctx, s.client, sup.DataVolume()) + networkPolicy, err := networkpolicy.GetNetworkPolicy(ctx, s.client, sup.LegacyDataVolume(), sup) if err != nil { return false, err } @@ -340,7 +340,7 @@ func (s DiskService) CleanUpSupplements(ctx context.Context, sup supplements.Gen return hasDeleted, supplements.CleanupForDataVolume(ctx, s.client, sup, s.dvcrSettings) } -func (s DiskService) Protect(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { +func (s DiskService) Protect(ctx context.Context, sup supplements.Generator, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { err := s.protection.AddOwnerRef(ctx, owner, pvc) if err != nil { return fmt.Errorf("failed to add owner ref for pvc: %w", err) @@ -352,7 +352,7 @@ func (s DiskService) Protect(ctx context.Context, owner client.Object, dv *cdiv1 } if dv != nil { - networkPolicy, err := networkpolicy.GetNetworkPolicy(ctx, s.client, types.NamespacedName{Namespace: dv.Namespace, Name: dv.Name}) + networkPolicy, err := networkpolicy.GetNetworkPolicyFromObject(ctx, s.client, dv, sup) if err != nil { return fmt.Errorf("failed to get networkPolicy for disk's supplements protection: %w", err) } @@ -368,14 +368,14 @@ func (s DiskService) Protect(ctx context.Context, owner client.Object, dv *cdiv1 return nil } -func (s DiskService) Unprotect(ctx context.Context, dv *cdiv1.DataVolume) error { +func (s DiskService) Unprotect(ctx context.Context, sup supplements.Generator, dv *cdiv1.DataVolume) error { err := s.protection.RemoveProtection(ctx, dv) if err != nil { return fmt.Errorf("failed to remove protection for disk's supplements: %w", err) } if dv != nil { - networkPolicy, err := networkpolicy.GetNetworkPolicy(ctx, s.client, types.NamespacedName{Namespace: dv.Namespace, Name: dv.Name}) + networkPolicy, err := networkpolicy.GetNetworkPolicyFromObject(ctx, s.client, dv, sup) if err != nil { return fmt.Errorf("failed to get networkPolicy for removing disk's supplements protection: %w", err) } @@ -550,11 +550,11 @@ func (s DiskService) GetStorageClass(ctx context.Context, scName string) (*stora } func (s DiskService) GetDataVolume(ctx context.Context, sup supplements.Generator) (*cdiv1.DataVolume, error) { - return object.FetchObject(ctx, sup.DataVolume(), s.client, &cdiv1.DataVolume{}) + return supplements.FetchSupplement(ctx, s.client, sup, supplements.SupplementDataVolume, &cdiv1.DataVolume{}) } func (s DiskService) GetPersistentVolumeClaim(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) { - return object.FetchObject(ctx, sup.PersistentVolumeClaim(), s.client, &corev1.PersistentVolumeClaim{}) + return supplements.FetchSupplement(ctx, s.client, sup, supplements.SupplementPVC, &corev1.PersistentVolumeClaim{}) } func (s DiskService) GetVolumeSnapshot(ctx context.Context, name, namespace string) (*vsv1.VolumeSnapshot, error) { diff --git a/images/virtualization-artifact/pkg/controller/service/importer_service.go b/images/virtualization-artifact/pkg/controller/service/importer_service.go index 51dc08a591..c6db2c8105 100644 --- a/images/virtualization-artifact/pkg/controller/service/importer_service.go +++ b/images/virtualization-artifact/pkg/controller/service/importer_service.go @@ -89,7 +89,7 @@ func (s ImporterService) Start( return err } - err = networkpolicy.CreateNetworkPolicy(ctx, s.client, pod, s.protection.GetFinalizer()) + err = networkpolicy.CreateNetworkPolicy(ctx, s.client, pod, sup, s.protection.GetFinalizer()) if err != nil { return fmt.Errorf("failed to create NetworkPolicy: %w", err) } @@ -113,7 +113,7 @@ func (s ImporterService) CleanUp(ctx context.Context, sup supplements.Generator) return s.CleanUpSupplements(ctx, sup) } -func (s ImporterService) DeletePod(ctx context.Context, obj client.Object, controllerName string) (bool, error) { +func (s ImporterService) DeletePod(ctx context.Context, obj client.Object, controllerName string, sup supplements.Generator) (bool, error) { labelSelector := client.MatchingLabels{annotations.AppKubernetesManagedByLabel: controllerName} podList := &corev1.PodList{} @@ -124,7 +124,7 @@ func (s ImporterService) DeletePod(ctx context.Context, obj client.Object, contr for _, pod := range podList.Items { for _, ownerRef := range pod.OwnerReferences { if ownerRef.Kind == obj.GetObjectKind().GroupVersionKind().Kind && ownerRef.Name == obj.GetName() && ownerRef.UID == obj.GetUID() { - networkPolicy, err := networkpolicy.GetNetworkPolicyFromObject(ctx, s.client, &pod) + networkPolicy, err := networkpolicy.GetNetworkPolicyFromObject(ctx, s.client, &pod, sup) if err != nil { return false, err } @@ -159,7 +159,7 @@ func (s ImporterService) DeletePod(ctx context.Context, obj client.Object, contr } func (s ImporterService) CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) { - networkPolicy, err := networkpolicy.GetNetworkPolicy(ctx, s.client, sup.ImporterPod()) + networkPolicy, err := networkpolicy.GetNetworkPolicy(ctx, s.client, sup.LegacyImporterPod(), sup) if err != nil { return false, err } @@ -199,11 +199,11 @@ func (s ImporterService) CleanUpSupplements(ctx context.Context, sup supplements return hasDeleted, nil } -func (s ImporterService) Protect(ctx context.Context, pod *corev1.Pod) (err error) { +func (s ImporterService) Protect(ctx context.Context, pod *corev1.Pod, sup supplements.Generator) (err error) { var networkPolicy *netv1.NetworkPolicy if pod != nil { - networkPolicy, err = networkpolicy.GetNetworkPolicyFromObject(ctx, s.client, pod) + networkPolicy, err = networkpolicy.GetNetworkPolicyFromObject(ctx, s.client, pod, sup) if err != nil { return fmt.Errorf("failed to get networkPolicy for importer's supplements protection: %w", err) } @@ -217,11 +217,11 @@ func (s ImporterService) Protect(ctx context.Context, pod *corev1.Pod) (err erro return nil } -func (s ImporterService) Unprotect(ctx context.Context, pod *corev1.Pod) (err error) { +func (s ImporterService) Unprotect(ctx context.Context, pod *corev1.Pod, sup supplements.Generator) (err error) { var networkPolicy *netv1.NetworkPolicy if pod != nil { - networkPolicy, err = networkpolicy.GetNetworkPolicyFromObject(ctx, s.client, pod) + networkPolicy, err = networkpolicy.GetNetworkPolicyFromObject(ctx, s.client, pod, sup) if err != nil { return fmt.Errorf("failed to get networkPolicy for removing importer's supplements protection: %w", err) } @@ -236,12 +236,7 @@ func (s ImporterService) Unprotect(ctx context.Context, pod *corev1.Pod) (err er } func (s ImporterService) GetPod(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) { - pod, err := importer.FindPod(ctx, s.client, sup) - if err != nil { - return nil, err - } - - return pod, nil + return supplements.FetchSupplement(ctx, s.client, sup, supplements.SupplementImporterPod, &corev1.Pod{}) } func (s ImporterService) getPodSettings(ownerRef *metav1.OwnerReference, sup supplements.Generator) *importer.PodSettings { diff --git a/images/virtualization-artifact/pkg/controller/service/uploader_service.go b/images/virtualization-artifact/pkg/controller/service/uploader_service.go index 34e7a5b2cc..f556262f62 100644 --- a/images/virtualization-artifact/pkg/controller/service/uploader_service.go +++ b/images/virtualization-artifact/pkg/controller/service/uploader_service.go @@ -91,7 +91,7 @@ func (s UploaderService) Start( return err } - err = networkpolicy.CreateNetworkPolicy(ctx, s.client, pod, s.protection.GetFinalizer()) + err = networkpolicy.CreateNetworkPolicy(ctx, s.client, pod, sup, s.protection.GetFinalizer()) if err != nil { return fmt.Errorf("failed to create NetworkPolicy: %w", err) } @@ -131,7 +131,7 @@ func (s UploaderService) CleanUpSupplements(ctx context.Context, sup supplements if err != nil { return false, err } - networkPolicy, err := networkpolicy.GetNetworkPolicy(ctx, s.client, sup.UploaderPod()) + networkPolicy, err := networkpolicy.GetNetworkPolicy(ctx, s.client, sup.LegacyUploaderPod(), sup) if err != nil { return false, err } @@ -178,11 +178,11 @@ func (s UploaderService) CleanUpSupplements(ctx context.Context, sup supplements return haveDeleted, nil } -func (s UploaderService) Protect(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) (err error) { +func (s UploaderService) Protect(ctx context.Context, sup supplements.Generator, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) (err error) { var networkPolicy *netv1.NetworkPolicy if pod != nil { - networkPolicy, err = networkpolicy.GetNetworkPolicyFromObject(ctx, s.client, pod) + networkPolicy, err = networkpolicy.GetNetworkPolicyFromObject(ctx, s.client, pod, sup) if err != nil { return fmt.Errorf("failed to get networkPolicy for removing importer's supplements protection: %w", err) } @@ -195,11 +195,11 @@ func (s UploaderService) Protect(ctx context.Context, pod *corev1.Pod, svc *core return nil } -func (s UploaderService) Unprotect(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) (err error) { +func (s UploaderService) Unprotect(ctx context.Context, sup supplements.Generator, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) (err error) { var networkPolicy *netv1.NetworkPolicy if pod != nil { - networkPolicy, err = networkpolicy.GetNetworkPolicyFromObject(ctx, s.client, pod) + networkPolicy, err = networkpolicy.GetNetworkPolicyFromObject(ctx, s.client, pod, sup) if err != nil { return fmt.Errorf("failed to get networkPolicy for removing importer's supplements protection: %w", err) } @@ -213,30 +213,18 @@ func (s UploaderService) Unprotect(ctx context.Context, pod *corev1.Pod, svc *co } func (s UploaderService) GetPod(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) { - pod, err := uploader.FindPod(ctx, s.client, sup) - if err != nil { - return nil, err - } - - return pod, nil + pod := &corev1.Pod{} + return supplements.FetchSupplement(ctx, s.client, sup, supplements.SupplementUploaderPod, pod) } func (s UploaderService) GetService(ctx context.Context, sup supplements.Generator) (*corev1.Service, error) { - svc, err := uploader.FindService(ctx, s.client, sup) - if err != nil { - return nil, err - } - - return svc, nil + svc := &corev1.Service{} + return supplements.FetchSupplement(ctx, s.client, sup, supplements.SupplementUploaderService, svc) } func (s UploaderService) GetIngress(ctx context.Context, sup supplements.Generator) (*netv1.Ingress, error) { - ing, err := uploader.FindIngress(ctx, s.client, sup) - if err != nil { - return nil, err - } - - return ing, nil + ing := &netv1.Ingress{} + return supplements.FetchSupplement(ctx, s.client, sup, supplements.SupplementUploaderIngress, ing) } func (s UploaderService) GetExternalURL(ctx context.Context, ing *netv1.Ingress) string { diff --git a/images/virtualization-artifact/pkg/controller/supplements/ensure.go b/images/virtualization-artifact/pkg/controller/supplements/ensure.go index 7f2ca82367..8a9a62b0e0 100644 --- a/images/virtualization-artifact/pkg/controller/supplements/ensure.go +++ b/images/virtualization-artifact/pkg/controller/supplements/ensure.go @@ -209,4 +209,5 @@ type DataVolumeSupplement interface { DataVolume() types.NamespacedName DVCRAuthSecretForDV() types.NamespacedName DVCRCABundleConfigMapForDV() types.NamespacedName + NetworkPolicy() types.NamespacedName } diff --git a/images/virtualization-artifact/pkg/controller/supplements/fetch.go b/images/virtualization-artifact/pkg/controller/supplements/fetch.go new file mode 100644 index 0000000000..1b150f9e93 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/supplements/fetch.go @@ -0,0 +1,175 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package supplements + +import ( + "context" + "fmt" + + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// SupplementType represents the type of supplement resource +type SupplementType string + +const ( + // Pods + SupplementImporterPod SupplementType = "ImporterPod" + SupplementUploaderPod SupplementType = "UploaderPod" + SupplementBounderPod SupplementType = "BounderPod" + + // Network + SupplementUploaderService SupplementType = "UploaderService" + SupplementUploaderIngress SupplementType = "UploaderIngress" + + // Volumes + SupplementPVC SupplementType = "PersistentVolumeClaim" + SupplementDataVolume SupplementType = "DataVolume" + + // ConfigMaps/Secrets + SupplementDVCRAuthSecret SupplementType = "DVCRAuthSecret" + SupplementDVCRAuthSecretForDV SupplementType = "DVCRAuthSecretForDV" + SupplementDVCRCABundleConfigMap SupplementType = "DVCRCABundleConfigMapForDV" + SupplementCABundleConfigMap SupplementType = "CABundleConfigMap" + SupplementImagePullSecret SupplementType = "ImagePullSecret" + SupplementUploaderTLSSecret SupplementType = "UploaderTLSSecret" +) + +// GetSupplementName returns the name for the requested supplement type +func GetSupplementName(gen Generator, supplementType SupplementType) (types.NamespacedName, error) { + switch supplementType { + // Pods + case SupplementImporterPod: + return gen.ImporterPod(), nil + case SupplementUploaderPod: + return gen.UploaderPod(), nil + case SupplementBounderPod: + return gen.BounderPod(), nil + + // Network + case SupplementUploaderService: + return gen.UploaderService(), nil + case SupplementUploaderIngress: + return gen.UploaderIngress(), nil + + // Volumes + case SupplementPVC: + return gen.PersistentVolumeClaim(), nil + case SupplementDataVolume: + return gen.DataVolume(), nil + + // ConfigMaps/Secrets + case SupplementDVCRAuthSecret: + return gen.DVCRAuthSecret(), nil + case SupplementDVCRAuthSecretForDV: + return gen.DVCRAuthSecretForDV(), nil + case SupplementDVCRCABundleConfigMap: + return gen.DVCRCABundleConfigMapForDV(), nil + case SupplementCABundleConfigMap: + return gen.CABundleConfigMap(), nil + case SupplementImagePullSecret: + return gen.ImagePullSecret(), nil + case SupplementUploaderTLSSecret: + return gen.UploaderTLSSecretForIngress(), nil + + default: + return types.NamespacedName{}, fmt.Errorf("unknown supplement type: %s", supplementType) + } +} + +// GetLegacySupplementName returns the legacy name for the requested supplement type +func GetLegacySupplementName(gen Generator, supplementType SupplementType) (types.NamespacedName, error) { + switch supplementType { + // Pods + case SupplementImporterPod: + return gen.LegacyImporterPod(), nil + case SupplementUploaderPod: + return gen.LegacyUploaderPod(), nil + case SupplementBounderPod: + return gen.LegacyBounderPod(), nil + + // Network + case SupplementUploaderService: + return gen.LegacyUploaderService(), nil + case SupplementUploaderIngress: + return gen.LegacyUploaderIngress(), nil + + // Volumes + case SupplementPVC: + return gen.LegacyPersistentVolumeClaim(), nil + case SupplementDataVolume: + return gen.LegacyDataVolume(), nil + + // ConfigMaps/Secrets + case SupplementDVCRAuthSecret: + return gen.LegacyDVCRAuthSecret(), nil + case SupplementDVCRAuthSecretForDV: + return gen.LegacyDVCRAuthSecretForDV(), nil + case SupplementDVCRCABundleConfigMap: + return gen.LegacyDVCRCABundleConfigMapForDV(), nil + case SupplementCABundleConfigMap: + return gen.LegacyCABundleConfigMap(), nil + case SupplementImagePullSecret: + return gen.LegacyImagePullSecret(), nil + case SupplementUploaderTLSSecret: + return gen.LegacyUploaderTLSSecretForIngress(), nil + + default: + return types.NamespacedName{}, fmt.Errorf("unknown supplement type: %s", supplementType) + } +} + +// FetchSupplement fetches a supplement resource with fallback to legacy naming +func FetchSupplement[T client.Object]( + ctx context.Context, + c client.Client, + gen Generator, + supplementType SupplementType, + obj T, +) (T, error) { + var empty T + + newName, err := GetSupplementName(gen, supplementType) + if err != nil { + return empty, err + } + + err = c.Get(ctx, newName, obj) + if err == nil { + return obj, nil + } + if !k8serrors.IsNotFound(err) { + return empty, err + } + + legacyName, err := GetLegacySupplementName(gen, supplementType) + if err != nil { + return empty, err + } + + err = c.Get(ctx, legacyName, obj) + if err != nil { + if k8serrors.IsNotFound(err) { + return empty, nil + } + return empty, err + } + + return obj, nil +} diff --git a/images/virtualization-artifact/pkg/controller/supplements/fetch_test.go b/images/virtualization-artifact/pkg/controller/supplements/fetch_test.go new file mode 100644 index 0000000000..b5f18eba03 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/supplements/fetch_test.go @@ -0,0 +1,257 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package supplements + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestFetch(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Fetch Suite") +} + +var _ = Describe("FetchSupplement", func() { + var ( + ctx context.Context + scheme *runtime.Scheme + gen Generator + ) + + BeforeEach(func() { + ctx = context.Background() + + scheme = runtime.NewScheme() + Expect(corev1.AddToScheme(scheme)).To(Succeed()) + + gen = NewGenerator("vi", "test-image", "default", "12345678-1234-1234-1234-123456789abc") + }) + + Context("when resource exists with new naming", func() { + It("should fetch the resource successfully", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "d8v-vi-importer-test-image-12345678-1234-1234-1234-123456789abc", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "importer", + Image: "importer:latest", + }}, + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(pod). + Build() + + result := &corev1.Pod{} + fetchedPod, err := FetchSupplement(ctx, fakeClient, gen, SupplementImporterPod, result) + + Expect(err).NotTo(HaveOccurred()) + Expect(fetchedPod).NotTo(BeNil()) + Expect(fetchedPod.Name).To(Equal(pod.Name)) + Expect(fetchedPod.Namespace).To(Equal(pod.Namespace)) + }) + }) + + Context("when resource exists with legacy naming", func() { + It("should fetch the resource from legacy naming as fallback", func() { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vi-importer-test-image", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "importer", + Image: "importer:latest", + }}, + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(pod). + Build() + + result := &corev1.Pod{} + fetchedPod, err := FetchSupplement(ctx, fakeClient, gen, SupplementImporterPod, result) + + Expect(err).NotTo(HaveOccurred()) + Expect(fetchedPod).NotTo(BeNil()) + Expect(fetchedPod.Name).To(Equal(pod.Name)) + Expect(fetchedPod.Namespace).To(Equal(pod.Namespace)) + }) + }) + + Context("when resource exists in both new and legacy naming", func() { + It("should prefer the new naming", func() { + newPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "d8v-vi-importer-test-image-12345678-1234-1234-1234-123456789abc", + Namespace: "default", + Labels: map[string]string{"version": "new"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "importer", + Image: "importer:v2", + }}, + }, + } + + legacyPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vi-importer-test-image", + Namespace: "default", + Labels: map[string]string{"version": "legacy"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "importer", + Image: "importer:v1", + }}, + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(newPod, legacyPod). + Build() + + result := &corev1.Pod{} + fetchedPod, err := FetchSupplement(ctx, fakeClient, gen, SupplementImporterPod, result) + + Expect(err).NotTo(HaveOccurred()) + Expect(fetchedPod).NotTo(BeNil()) + Expect(fetchedPod.Name).To(Equal(newPod.Name)) + Expect(fetchedPod.Labels["version"]).To(Equal("new")) + }) + }) + + Context("when resource does not exist", func() { + It("should return nil without error", func() { + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + result := &corev1.Pod{} + fetchedPod, err := FetchSupplement(ctx, fakeClient, gen, SupplementImporterPod, result) + + Expect(err).NotTo(HaveOccurred()) + Expect(fetchedPod).To(BeNil()) + }) + }) + + Context("with different supplement types", func() { + It("should work with UploaderService", func() { + svc := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "d8v-vi-test-image-12345678-1234-1234-1234-123456789abc", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Port: 8080, + }}, + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(svc). + Build() + + result := &corev1.Service{} + fetchedSvc, err := FetchSupplement(ctx, fakeClient, gen, SupplementUploaderService, result) + + Expect(err).NotTo(HaveOccurred()) + Expect(fetchedSvc).NotTo(BeNil()) + Expect(fetchedSvc.Name).To(Equal(svc.Name)) + }) + + It("should work with PersistentVolumeClaim", func() { + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vi-test-image-12345678-1234-1234-1234-123456789abc", + Namespace: "default", + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(pvc). + Build() + + result := &corev1.PersistentVolumeClaim{} + fetchedPVC, err := FetchSupplement(ctx, fakeClient, gen, SupplementPVC, result) + + Expect(err).NotTo(HaveOccurred()) + Expect(fetchedPVC).NotTo(BeNil()) + Expect(fetchedPVC.Name).To(Equal(pvc.Name)) + }) + }) + + Context("when client returns other errors", func() { + It("should propagate the error", func() { + fakeClient := &errorClient{ + Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + } + + result := &corev1.Pod{} + _, err := FetchSupplement(ctx, fakeClient, gen, SupplementImporterPod, result) + + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("internal error")) + }) + }) +}) + +// errorClient is a client that always returns an error (not NotFound) +type errorClient struct { + client.Client +} + +func (e *errorClient) Get(ctx context.Context, key types.NamespacedName, obj client.Object, opts ...client.GetOption) error { + return &testError{message: "internal error"} +} + +type testError struct { + message string +} + +func (e *testError) Error() string { + return e.message +} diff --git a/images/virtualization-artifact/pkg/controller/supplements/generator.go b/images/virtualization-artifact/pkg/controller/supplements/generator.go index a0c6da5ad3..0238009b95 100644 --- a/images/virtualization-artifact/pkg/controller/supplements/generator.go +++ b/images/virtualization-artifact/pkg/controller/supplements/generator.go @@ -24,10 +24,24 @@ import ( "k8s.io/utils/strings" ) +const ( + tplCommon = "d8v-%s-%s-%s" + tplDVCRAuthSecret = "d8v-%s-dvcr-auth-%s-%s" + tplDVCRAuthSecretForDV = "d8v-%s-dvcr-auth-dv-%s-%s" + tplDVCRCABundle = "d8v-%s-dvcr-ca-%s-%s" + tplCABundle = "d8v-%s-ca-%s-%s" + tplImagePullSecret = "d8v-%s-pull-image-%s-%s" + tplImporterPod = "d8v-%s-importer-%s-%s" + tplBounderPod = "d8v-%s-bounder-%s-%s" + tplUploaderPod = "d8v-%s-uploader-%s-%s" + tplUploaderTLSSecret = "d8v-%s-tls-%s-%s" +) + type Generator interface { Namespace() string Name() string UID() types.UID + BounderPod() types.NamespacedName ImporterPod() types.NamespacedName UploaderPod() types.NamespacedName @@ -41,6 +55,21 @@ type Generator interface { DVCRAuthSecretForDV() types.NamespacedName UploaderTLSSecretForIngress() types.NamespacedName ImagePullSecret() types.NamespacedName + NetworkPolicy() types.NamespacedName + + LegacyBounderPod() types.NamespacedName + LegacyImporterPod() types.NamespacedName + LegacyUploaderPod() types.NamespacedName + LegacyUploaderService() types.NamespacedName + LegacyUploaderIngress() types.NamespacedName + LegacyDataVolume() types.NamespacedName + LegacyPersistentVolumeClaim() types.NamespacedName + LegacyCABundleConfigMap() types.NamespacedName + LegacyDVCRAuthSecret() types.NamespacedName + LegacyDVCRCABundleConfigMapForDV() types.NamespacedName + LegacyDVCRAuthSecretForDV() types.NamespacedName + LegacyUploaderTLSSecretForIngress() types.NamespacedName + LegacyImagePullSecret() types.NamespacedName } // Generator calculates names for supplemental resources, e.g. ImporterPod, AuthSecret or CABundleConfigMap. @@ -60,6 +89,15 @@ func NewGenerator(prefix, name, namespace string, uid types.UID) Generator { } } +func (g *generator) generateName(template string, maxLength int) types.NamespacedName { + maxNameLen := maxLength - len(template) + 6 - len(g.prefix) - len(g.uid) // 6 is for %s placeholders + name := fmt.Sprintf(template, g.prefix, strings.ShortenString(g.name, maxNameLen), g.UID()) + return types.NamespacedName{ + Name: name, + Namespace: g.namespace, + } +} + func (g *generator) Namespace() string { return g.namespace } @@ -74,85 +112,161 @@ func (g *generator) UID() types.UID { // DVCRAuthSecret returns name and namespace for auth Secret copy. func (g *generator) DVCRAuthSecret() types.NamespacedName { - name := fmt.Sprintf("%s-dvcr-auth-%s", g.prefix, g.name) - return g.shortenNamespaced(name) + return g.generateName(tplDVCRAuthSecret, kvalidation.DNS1123SubdomainMaxLength) } // DVCRAuthSecretForDV returns name and namespace for auth Secret copy // compatible with DataVolume: with accessKeyId and secretKey fields. func (g *generator) DVCRAuthSecretForDV() types.NamespacedName { - name := fmt.Sprintf("%s-dvcr-auth-dv-%s", g.prefix, g.name) - return g.shortenNamespaced(name) + return g.generateName(tplDVCRAuthSecretForDV, kvalidation.DNS1123SubdomainMaxLength) } // DVCRCABundleConfigMapForDV returns name and namespace for ConfigMap with ca.crt. func (g *generator) DVCRCABundleConfigMapForDV() types.NamespacedName { - name := fmt.Sprintf("%s-dvcr-ca-dv-%s", g.prefix, g.name) - return g.shortenNamespaced(name) + return g.generateName(tplDVCRCABundle, kvalidation.DNS1123SubdomainMaxLength) } // CABundleConfigMap returns name and namespace for ConfigMap which contains caBundle from dataSource. func (g *generator) CABundleConfigMap() types.NamespacedName { - name := fmt.Sprintf("%s-ca-%s", g.prefix, g.name) - return g.shortenNamespaced(name) + return g.generateName(tplCABundle, kvalidation.DNS1123SubdomainMaxLength) } // ImagePullSecret returns name and namespace for image pull secret for the containerImage dataSource. func (g *generator) ImagePullSecret() types.NamespacedName { - name := fmt.Sprintf("%s-pull-image-%s", g.prefix, g.name) - return g.shortenNamespaced(name) + return g.generateName(tplImagePullSecret, kvalidation.DNS1123SubdomainMaxLength) } // ImporterPod generates name for importer Pod. func (g *generator) ImporterPod() types.NamespacedName { - name := fmt.Sprintf("%s-importer-%s", g.prefix, g.name) - return g.shortenNamespaced(name) + return g.generateName(tplImporterPod, kvalidation.DNS1123SubdomainMaxLength) } -// ImporterPod generates name for importer Pod. +// BounderPod generates name for bounder Pod. func (g *generator) BounderPod() types.NamespacedName { - name := fmt.Sprintf("%s-bounder-%s", g.prefix, g.name) - return g.shortenNamespaced(name) + return g.generateName(tplBounderPod, kvalidation.DNS1123SubdomainMaxLength) } // UploaderPod generates name for uploader Pod. func (g *generator) UploaderPod() types.NamespacedName { - name := fmt.Sprintf("%s-uploader-%s", g.prefix, g.name) - return g.shortenNamespaced(name) + return g.generateName(tplUploaderPod, kvalidation.DNS1123SubdomainMaxLength) } // UploaderService generates name for uploader Service. func (g *generator) UploaderService() types.NamespacedName { - name := fmt.Sprintf("%s-uploader-svc-%s", g.prefix, g.uid) - return g.shortenNamespaced(name) + return g.generateName(tplCommon, kvalidation.DNS1123LabelMaxLength) } // UploaderIngress generates name for uploader Ingress. func (g *generator) UploaderIngress() types.NamespacedName { - name := fmt.Sprintf("%s-uploader-ingress-%s", g.prefix, g.uid) - return g.shortenNamespaced(name) + return g.generateName(tplCommon, kvalidation.DNS1123SubdomainMaxLength) } // UploaderTLSSecretForIngress generates name for uploader tls secret. func (g *generator) UploaderTLSSecretForIngress() types.NamespacedName { - name := fmt.Sprintf("%s-uploader-tls-ing-%s", g.prefix, g.name) - return g.shortenNamespaced(name) + return g.generateName(tplUploaderTLSSecret, kvalidation.DNS1123SubdomainMaxLength) } // DataVolume generates name for underlying DataVolume. // DataVolume is always one for vmd/vmi, so prefix is used. func (g *generator) DataVolume() types.NamespacedName { - dvName := fmt.Sprintf("%s-%s-%s", g.prefix, g.name, g.uid) - return g.shortenNamespaced(dvName) + return g.generateName(tplCommon, kvalidation.DNS1123SubdomainMaxLength) } +// NetworkPolicy generates name for NetworkPolicy. +func (g *generator) NetworkPolicy() types.NamespacedName { + return g.generateName(tplCommon, kvalidation.DNS1123SubdomainMaxLength) +} + +// PersistentVolumeClaim generates name for underlying PersistentVolumeClaim. +// PVC is always one for vmd/vmi, so prefix is used. func (g *generator) PersistentVolumeClaim() types.NamespacedName { - return g.DataVolume() + return g.generateName(tplCommon, kvalidation.DNS1123SubdomainMaxLength) } +// Legacy methods for backward compatibility + func (g *generator) shortenNamespaced(name string) types.NamespacedName { return types.NamespacedName{ Name: strings.ShortenString(name, kvalidation.DNS1123SubdomainMaxLength), Namespace: g.namespace, } } + +// LegacyDVCRAuthSecret returns old format name for auth Secret copy. +func (g *generator) LegacyDVCRAuthSecret() types.NamespacedName { + name := fmt.Sprintf("%s-dvcr-auth-%s", g.prefix, g.name) + return g.shortenNamespaced(name) +} + +// LegacyDVCRAuthSecretForDV returns old format name for auth Secret copy +// compatible with DataVolume: with accessKeyId and secretKey fields. +func (g *generator) LegacyDVCRAuthSecretForDV() types.NamespacedName { + name := fmt.Sprintf("%s-dvcr-auth-dv-%s", g.prefix, g.name) + return g.shortenNamespaced(name) +} + +// LegacyDVCRCABundleConfigMapForDV returns old format name for ConfigMap with ca.crt. +func (g *generator) LegacyDVCRCABundleConfigMapForDV() types.NamespacedName { + name := fmt.Sprintf("%s-dvcr-ca-dv-%s", g.prefix, g.name) + return g.shortenNamespaced(name) +} + +// LegacyCABundleConfigMap returns old format name for ConfigMap which contains caBundle from dataSource. +func (g *generator) LegacyCABundleConfigMap() types.NamespacedName { + name := fmt.Sprintf("%s-ca-%s", g.prefix, g.name) + return g.shortenNamespaced(name) +} + +// LegacyImagePullSecret returns old format name for image pull secret for the containerImage dataSource. +func (g *generator) LegacyImagePullSecret() types.NamespacedName { + name := fmt.Sprintf("%s-pull-image-%s", g.prefix, g.name) + return g.shortenNamespaced(name) +} + +// LegacyImporterPod generates old format name for importer Pod. +func (g *generator) LegacyImporterPod() types.NamespacedName { + name := fmt.Sprintf("%s-importer-%s", g.prefix, g.name) + return g.shortenNamespaced(name) +} + +// LegacyBounderPod generates old format name for bounder Pod. +func (g *generator) LegacyBounderPod() types.NamespacedName { + name := fmt.Sprintf("%s-bounder-%s", g.prefix, g.name) + return g.shortenNamespaced(name) +} + +// LegacyUploaderPod generates old format name for uploader Pod. +func (g *generator) LegacyUploaderPod() types.NamespacedName { + name := fmt.Sprintf("%s-uploader-%s", g.prefix, g.name) + return g.shortenNamespaced(name) +} + +// LegacyUploaderService generates old format name for uploader Service. +func (g *generator) LegacyUploaderService() types.NamespacedName { + name := fmt.Sprintf("%s-uploader-svc-%s", g.prefix, string(g.uid)) + return g.shortenNamespaced(name) +} + +// LegacyUploaderIngress generates old format name for uploader Ingress. +func (g *generator) LegacyUploaderIngress() types.NamespacedName { + name := fmt.Sprintf("%s-uploader-ingress-%s", g.prefix, string(g.uid)) + return g.shortenNamespaced(name) +} + +// LegacyUploaderTLSSecretForIngress generates old format name for uploader tls secret. +func (g *generator) LegacyUploaderTLSSecretForIngress() types.NamespacedName { + name := fmt.Sprintf("%s-uploader-tls-ing-%s", g.prefix, g.name) + return g.shortenNamespaced(name) +} + +// LegacyDataVolume generates old format name for underlying DataVolume. +// DataVolume is always one for vmd/vmi, so prefix is used. +func (g *generator) LegacyDataVolume() types.NamespacedName { + dvName := fmt.Sprintf("%s-%s-%s", g.prefix, g.name, string(g.uid)) + return g.shortenNamespaced(dvName) +} + +// LegacyPersistentVolumeClaim generates old format name for underlying PersistentVolumeClaim. +func (g *generator) LegacyPersistentVolumeClaim() types.NamespacedName { + return g.LegacyDataVolume() +} diff --git a/images/virtualization-artifact/pkg/controller/supplements/generator_test.go b/images/virtualization-artifact/pkg/controller/supplements/generator_test.go new file mode 100644 index 0000000000..56c140346a --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/supplements/generator_test.go @@ -0,0 +1,96 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package supplements + +import ( + "strings" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/types" + kvalidation "k8s.io/apimachinery/pkg/util/validation" +) + +var _ = Describe("Generator", func() { + var ( + gen Generator + prefix string + namespace string + uid types.UID + ) + + BeforeEach(func() { + prefix = "vi" + namespace = "default" + uid = types.UID("12345678-1234-1234-1234-123456789012") + }) + + Context("Name shortening", func() { + DescribeTable("should handle short names without truncation", + func(method func(Generator) types.NamespacedName, expectedPrefix string) { + name := "test-image" + gen = NewGenerator(prefix, name, namespace, uid) + result := method(gen) + + Expect(result.Name).To(HavePrefix("d8v-")) + Expect(result.Name).To(ContainSubstring(expectedPrefix)) + Expect(result.Name).To(ContainSubstring(name)) + Expect(result.Name).To(HaveSuffix(string(uid))) + }, + Entry("DVCRAuthSecret", func(g Generator) types.NamespacedName { return g.DVCRAuthSecret() }, "dvcr-auth"), + Entry("DVCRAuthSecretForDV", func(g Generator) types.NamespacedName { return g.DVCRAuthSecretForDV() }, "dvcr-auth-dv"), + Entry("DVCRCABundleConfigMapForDV", func(g Generator) types.NamespacedName { return g.DVCRCABundleConfigMapForDV() }, "dvcr-ca"), + Entry("CABundleConfigMap", func(g Generator) types.NamespacedName { return g.CABundleConfigMap() }, "ca"), + Entry("ImagePullSecret", func(g Generator) types.NamespacedName { return g.ImagePullSecret() }, "pull-image"), + Entry("ImporterPod", func(g Generator) types.NamespacedName { return g.ImporterPod() }, "importer"), + Entry("BounderPod", func(g Generator) types.NamespacedName { return g.BounderPod() }, "bounder"), + Entry("UploaderPod", func(g Generator) types.NamespacedName { return g.UploaderPod() }, "uploader"), + Entry("UploaderService", func(g Generator) types.NamespacedName { return g.UploaderService() }, "vi"), + Entry("UploaderIngress", func(g Generator) types.NamespacedName { return g.UploaderIngress() }, "vi"), + Entry("UploaderTLSSecret", func(g Generator) types.NamespacedName { return g.UploaderTLSSecretForIngress() }, "tls"), + Entry("DataVolume", func(g Generator) types.NamespacedName { return g.DataVolume() }, "vi"), + Entry("PersistentVolumeClaim", func(g Generator) types.NamespacedName { return g.PersistentVolumeClaim() }, "vi"), + Entry("NetworkPolicy", func(g Generator) types.NamespacedName { return g.NetworkPolicy() }, "vi"), + ) + + DescribeTable("should truncate long names to respect limits", + func(method func(Generator) types.NamespacedName, maxLength int) { + name := strings.Repeat("very-long-resource-name-", 30) + gen = NewGenerator(prefix, name, namespace, uid) + result := method(gen) + + Expect(len(result.Name)).To(BeNumerically("<=", maxLength)) + Expect(result.Name).To(HavePrefix("d8v-")) + Expect(result.Name).To(ContainSubstring(string(uid))) + }, + Entry("DVCRAuthSecret - 253 limit", func(g Generator) types.NamespacedName { return g.DVCRAuthSecret() }, kvalidation.DNS1123SubdomainMaxLength), + Entry("DVCRAuthSecretForDV - 253 limit", func(g Generator) types.NamespacedName { return g.DVCRAuthSecretForDV() }, kvalidation.DNS1123SubdomainMaxLength), + Entry("DVCRCABundleConfigMapForDV - 253 limit", func(g Generator) types.NamespacedName { return g.DVCRCABundleConfigMapForDV() }, kvalidation.DNS1123SubdomainMaxLength), + Entry("CABundleConfigMap - 253 limit", func(g Generator) types.NamespacedName { return g.CABundleConfigMap() }, kvalidation.DNS1123SubdomainMaxLength), + Entry("ImagePullSecret - 253 limit", func(g Generator) types.NamespacedName { return g.ImagePullSecret() }, kvalidation.DNS1123SubdomainMaxLength), + Entry("ImporterPod - 253 limit", func(g Generator) types.NamespacedName { return g.ImporterPod() }, kvalidation.DNS1123SubdomainMaxLength), + Entry("BounderPod - 253 limit", func(g Generator) types.NamespacedName { return g.BounderPod() }, kvalidation.DNS1123SubdomainMaxLength), + Entry("UploaderPod - 253 limit", func(g Generator) types.NamespacedName { return g.UploaderPod() }, kvalidation.DNS1123SubdomainMaxLength), + Entry("UploaderService - 63 limit", func(g Generator) types.NamespacedName { return g.UploaderService() }, kvalidation.DNS1123LabelMaxLength), + Entry("UploaderIngress - 253 limit", func(g Generator) types.NamespacedName { return g.UploaderIngress() }, kvalidation.DNS1123SubdomainMaxLength), + Entry("UploaderTLSSecret - 253 limit", func(g Generator) types.NamespacedName { return g.UploaderTLSSecretForIngress() }, kvalidation.DNS1123SubdomainMaxLength), + Entry("DataVolume - 253 limit", func(g Generator) types.NamespacedName { return g.DataVolume() }, kvalidation.DNS1123SubdomainMaxLength), + Entry("PersistentVolumeClaim - 253 limit", func(g Generator) types.NamespacedName { return g.PersistentVolumeClaim() }, kvalidation.DNS1123SubdomainMaxLength), + Entry("NetworkPolicy - 253 limit", func(g Generator) types.NamespacedName { return g.NetworkPolicy() }, kvalidation.DNS1123SubdomainMaxLength), + ) + }) +}) diff --git a/images/virtualization-artifact/pkg/controller/uploader/uploader_ingress.go b/images/virtualization-artifact/pkg/controller/uploader/uploader_ingress.go index 686ba23244..b4d3deab28 100644 --- a/images/virtualization-artifact/pkg/controller/uploader/uploader_ingress.go +++ b/images/virtualization-artifact/pkg/controller/uploader/uploader_ingress.go @@ -22,12 +22,10 @@ import ( netv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/deckhouse/virtualization-controller/pkg/common" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" - "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/common/pwgen" ) @@ -147,11 +145,3 @@ func (i *Ingress) makeSpec() *netv1.Ingress { func (i *Ingress) generatePath() string { return fmt.Sprintf(tmplIngressPath, pwgen.AlphaNum(32)) } - -type IngressNamer interface { - UploaderIngress() types.NamespacedName -} - -func FindIngress(ctx context.Context, client client.Client, name IngressNamer) (*netv1.Ingress, error) { - return object.FetchObject(ctx, name.UploaderIngress(), client, &netv1.Ingress{}) -} diff --git a/images/virtualization-artifact/pkg/controller/uploader/uploader_pod.go b/images/virtualization-artifact/pkg/controller/uploader/uploader_pod.go index 0472ee3839..813e6d86f0 100644 --- a/images/virtualization-artifact/pkg/controller/uploader/uploader_pod.go +++ b/images/virtualization-artifact/pkg/controller/uploader/uploader_pod.go @@ -22,13 +22,11 @@ import ( corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/deckhouse/virtualization-controller/pkg/common" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" - "github.com/deckhouse/virtualization-controller/pkg/common/object" podutil "github.com/deckhouse/virtualization-controller/pkg/common/pod" "github.com/deckhouse/virtualization-controller/pkg/common/provisioner" ) @@ -193,11 +191,3 @@ func (p *Pod) addVolumes(pod *corev1.Pod, container *corev1.Container) { ) } } - -type PodNamer interface { - UploaderPod() types.NamespacedName -} - -func FindPod(ctx context.Context, client client.Client, name PodNamer) (*corev1.Pod, error) { - return object.FetchObject(ctx, name.UploaderPod(), client, &corev1.Pod{}) -} diff --git a/images/virtualization-artifact/pkg/controller/uploader/uploader_service.go b/images/virtualization-artifact/pkg/controller/uploader/uploader_service.go index 4766bdd51e..2ba4311276 100644 --- a/images/virtualization-artifact/pkg/controller/uploader/uploader_service.go +++ b/images/virtualization-artifact/pkg/controller/uploader/uploader_service.go @@ -21,13 +21,11 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/deckhouse/virtualization-controller/pkg/common" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" - "github.com/deckhouse/virtualization-controller/pkg/common/object" ) type ServiceSettings struct { @@ -92,11 +90,3 @@ func (s *Service) makeSpec() *corev1.Service { return service } - -type ServiceNamer interface { - UploaderService() types.NamespacedName -} - -func FindService(ctx context.Context, client client.Client, name ServiceNamer) (*corev1.Service, error) { - return object.FetchObject(ctx, name.UploaderService(), client, &corev1.Service{}) -} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/init.go b/images/virtualization-artifact/pkg/controller/vd/internal/init.go index 236a59ac3f..11ae6694b3 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/init.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/init.go @@ -41,7 +41,7 @@ func (h *InitHandler) Handle(ctx context.Context, vd *v1alpha2.VirtualDisk) (rec // We should have different names for support migration volumes. // If the PVC name is empty, we should generate it and update the status immediately. if vd.Status.Target.PersistentVolumeClaim == "" { - name := fmt.Sprintf("vd-%s-%s", vd.UID, pwgen.LowerAlpha(5)) + name := fmt.Sprintf("d8v-vd-%s-%s", vd.UID, pwgen.LowerAlpha(5)) vdsupplements.SetPVCName(vd, name) return reconcile.Result{RequeueAfter: 100 * time.Millisecond}, reconciler.ErrStopHandlerChain } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/migration.go b/images/virtualization-artifact/pkg/controller/vd/internal/migration.go index 2d8e501a80..64e542babb 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/migration.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/migration.go @@ -651,7 +651,7 @@ func (h MigrationHandler) createTargetPersistentVolumeClaim(ctx context.Context, pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ - GenerateName: fmt.Sprintf("vd-%s-", vd.UID), + GenerateName: fmt.Sprintf("d8v-vd-%s-", vd.UID), Namespace: vd.Namespace, OwnerReferences: []metav1.OwnerReference{ service.MakeControllerOwnerReference(vd), diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/blank_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/blank_test.go index acb7f18525..be858ba5c5 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/blank_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/blank_test.go @@ -76,7 +76,7 @@ var _ = Describe("Blank", func() { CleanUpSupplementsFunc: func(_ context.Context, _ supplements.Generator) (bool, error) { return false, nil }, - ProtectFunc: func(_ context.Context, _ client.Object, _ *cdiv1.DataVolume, _ *corev1.PersistentVolumeClaim) error { + ProtectFunc: func(_ context.Context, _ supplements.Generator, _ client.Object, _ *cdiv1.DataVolume, _ *corev1.PersistentVolumeClaim) error { return nil }, } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/http.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/http.go index 8d66c7b725..cf20f0a02a 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/http.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/http.go @@ -120,18 +120,18 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (re setPhaseConditionForFinishedDisk(pvc, cb, &vd.Status.Phase, supgen) // Protect Ready Disk and underlying PVC. - err = ds.diskService.Protect(ctx, vd, nil, pvc) + err = ds.diskService.Protect(ctx, supgen, vd, nil, pvc) if err != nil { return reconcile.Result{}, err } // Unprotect import time supplements to delete them later. - err = ds.importerService.Unprotect(ctx, pod) + err = ds.importerService.Unprotect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } - err = ds.diskService.Unprotect(ctx, dv) + err = ds.diskService.Unprotect(ctx, supgen, dv) if err != nil { return reconcile.Result{}, err } @@ -185,7 +185,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (re return reconcile.Result{}, setPhaseConditionFromPodError(ctx, err, pod, vd, cb, ds.client) } - err = ds.importerService.Protect(ctx, pod) + err = ds.importerService.Protect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } @@ -321,7 +321,7 @@ func (ds HTTPDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) (re vd.Status.Progress = ds.diskService.GetProgress(dv, vd.Status.Progress, service.NewScaleOption(50, 100)) vd.Status.Capacity = ds.diskService.GetCapacity(pvc) - err = ds.diskService.Protect(ctx, vd, dv, pvc) + err = ds.diskService.Protect(ctx, supgen, vd, dv, pvc) if err != nil { return reconcile.Result{}, err } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/mock.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/mock.go index eefb839265..7e1e92e2e9 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/mock.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/mock.go @@ -248,7 +248,7 @@ var _ BlankDataSourceDiskService = &BlankDataSourceDiskServiceMock{} // GetVolumeAndAccessModesFunc: func(ctx context.Context, obj client.Object, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) { // panic("mock out the GetVolumeAndAccessModes method") // }, -// ProtectFunc: func(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { +// ProtectFunc: func(ctx context.Context, sup supplements.Generator, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { // panic("mock out the Protect method") // }, // } @@ -271,7 +271,7 @@ type BlankDataSourceDiskServiceMock struct { GetVolumeAndAccessModesFunc func(ctx context.Context, obj client.Object, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) // ProtectFunc mocks the Protect method. - ProtectFunc func(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error + ProtectFunc func(ctx context.Context, sup supplements.Generator, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error // calls tracks calls to the methods. calls struct { @@ -307,6 +307,8 @@ type BlankDataSourceDiskServiceMock struct { Protect []struct { // Ctx is the ctx argument value. Ctx context.Context + // Sup is the sup argument value. + Sup supplements.Generator // Owner is the owner argument value. Owner client.Object // Dv is the dv argument value. @@ -467,17 +469,19 @@ func (mock *BlankDataSourceDiskServiceMock) GetVolumeAndAccessModesCalls() []str } // Protect calls ProtectFunc. -func (mock *BlankDataSourceDiskServiceMock) Protect(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { +func (mock *BlankDataSourceDiskServiceMock) Protect(ctx context.Context, sup supplements.Generator, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { if mock.ProtectFunc == nil { panic("BlankDataSourceDiskServiceMock.ProtectFunc: method is nil but BlankDataSourceDiskService.Protect was just called") } callInfo := struct { Ctx context.Context + Sup supplements.Generator Owner client.Object Dv *cdiv1.DataVolume Pvc *corev1.PersistentVolumeClaim }{ Ctx: ctx, + Sup: sup, Owner: owner, Dv: dv, Pvc: pvc, @@ -485,7 +489,7 @@ func (mock *BlankDataSourceDiskServiceMock) Protect(ctx context.Context, owner c mock.lockProtect.Lock() mock.calls.Protect = append(mock.calls.Protect, callInfo) mock.lockProtect.Unlock() - return mock.ProtectFunc(ctx, owner, dv, pvc) + return mock.ProtectFunc(ctx, sup, owner, dv, pvc) } // ProtectCalls gets all the calls that were made to Protect. @@ -494,12 +498,14 @@ func (mock *BlankDataSourceDiskServiceMock) Protect(ctx context.Context, owner c // len(mockedBlankDataSourceDiskService.ProtectCalls()) func (mock *BlankDataSourceDiskServiceMock) ProtectCalls() []struct { Ctx context.Context + Sup supplements.Generator Owner client.Object Dv *cdiv1.DataVolume Pvc *corev1.PersistentVolumeClaim } { var calls []struct { Ctx context.Context + Sup supplements.Generator Owner client.Object Dv *cdiv1.DataVolume Pvc *corev1.PersistentVolumeClaim @@ -535,7 +541,7 @@ var _ ObjectRefVirtualImageDiskService = &ObjectRefVirtualImageDiskServiceMock{} // GetProgressFunc: func(dv *cdiv1.DataVolume, prevProgress string, opts ...service.GetProgressOption) string { // panic("mock out the GetProgress method") // }, -// ProtectFunc: func(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { +// ProtectFunc: func(ctx context.Context, sup supplements.Generator, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { // panic("mock out the Protect method") // }, // StartFunc: func(ctx context.Context, pvcSize resource.Quantity, sc *storagev1.StorageClass, source *cdiv1.DataVolumeSource, obj client.Object, sup supplements.DataVolumeSupplement, opts ...service.Option) error { @@ -564,7 +570,7 @@ type ObjectRefVirtualImageDiskServiceMock struct { GetProgressFunc func(dv *cdiv1.DataVolume, prevProgress string, opts ...service.GetProgressOption) string // ProtectFunc mocks the Protect method. - ProtectFunc func(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error + ProtectFunc func(ctx context.Context, sup supplements.Generator, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error // StartFunc mocks the Start method. StartFunc func(ctx context.Context, pvcSize resource.Quantity, sc *storagev1.StorageClass, source *cdiv1.DataVolumeSource, obj client.Object, sup supplements.DataVolumeSupplement, opts ...service.Option) error @@ -610,6 +616,8 @@ type ObjectRefVirtualImageDiskServiceMock struct { Protect []struct { // Ctx is the ctx argument value. Ctx context.Context + // Sup is the sup argument value. + Sup supplements.Generator // Owner is the owner argument value. Owner client.Object // Dv is the dv argument value. @@ -825,17 +833,19 @@ func (mock *ObjectRefVirtualImageDiskServiceMock) GetProgressCalls() []struct { } // Protect calls ProtectFunc. -func (mock *ObjectRefVirtualImageDiskServiceMock) Protect(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { +func (mock *ObjectRefVirtualImageDiskServiceMock) Protect(ctx context.Context, sup supplements.Generator, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { if mock.ProtectFunc == nil { panic("ObjectRefVirtualImageDiskServiceMock.ProtectFunc: method is nil but ObjectRefVirtualImageDiskService.Protect was just called") } callInfo := struct { Ctx context.Context + Sup supplements.Generator Owner client.Object Dv *cdiv1.DataVolume Pvc *corev1.PersistentVolumeClaim }{ Ctx: ctx, + Sup: sup, Owner: owner, Dv: dv, Pvc: pvc, @@ -843,7 +853,7 @@ func (mock *ObjectRefVirtualImageDiskServiceMock) Protect(ctx context.Context, o mock.lockProtect.Lock() mock.calls.Protect = append(mock.calls.Protect, callInfo) mock.lockProtect.Unlock() - return mock.ProtectFunc(ctx, owner, dv, pvc) + return mock.ProtectFunc(ctx, sup, owner, dv, pvc) } // ProtectCalls gets all the calls that were made to Protect. @@ -852,12 +862,14 @@ func (mock *ObjectRefVirtualImageDiskServiceMock) Protect(ctx context.Context, o // len(mockedObjectRefVirtualImageDiskService.ProtectCalls()) func (mock *ObjectRefVirtualImageDiskServiceMock) ProtectCalls() []struct { Ctx context.Context + Sup supplements.Generator Owner client.Object Dv *cdiv1.DataVolume Pvc *corev1.PersistentVolumeClaim } { var calls []struct { Ctx context.Context + Sup supplements.Generator Owner client.Object Dv *cdiv1.DataVolume Pvc *corev1.PersistentVolumeClaim @@ -949,7 +961,7 @@ var _ ObjectRefClusterVirtualImageDiskService = &ObjectRefClusterVirtualImageDis // GetProgressFunc: func(dv *cdiv1.DataVolume, prevProgress string, opts ...service.GetProgressOption) string { // panic("mock out the GetProgress method") // }, -// ProtectFunc: func(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { +// ProtectFunc: func(ctx context.Context, sup supplements.Generator, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { // panic("mock out the Protect method") // }, // StartFunc: func(ctx context.Context, pvcSize resource.Quantity, sc *storagev1.StorageClass, source *cdiv1.DataVolumeSource, obj client.Object, sup supplements.DataVolumeSupplement, opts ...service.Option) error { @@ -978,7 +990,7 @@ type ObjectRefClusterVirtualImageDiskServiceMock struct { GetProgressFunc func(dv *cdiv1.DataVolume, prevProgress string, opts ...service.GetProgressOption) string // ProtectFunc mocks the Protect method. - ProtectFunc func(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error + ProtectFunc func(ctx context.Context, sup supplements.Generator, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error // StartFunc mocks the Start method. StartFunc func(ctx context.Context, pvcSize resource.Quantity, sc *storagev1.StorageClass, source *cdiv1.DataVolumeSource, obj client.Object, sup supplements.DataVolumeSupplement, opts ...service.Option) error @@ -1024,6 +1036,8 @@ type ObjectRefClusterVirtualImageDiskServiceMock struct { Protect []struct { // Ctx is the ctx argument value. Ctx context.Context + // Sup is the sup argument value. + Sup supplements.Generator // Owner is the owner argument value. Owner client.Object // Dv is the dv argument value. @@ -1239,17 +1253,19 @@ func (mock *ObjectRefClusterVirtualImageDiskServiceMock) GetProgressCalls() []st } // Protect calls ProtectFunc. -func (mock *ObjectRefClusterVirtualImageDiskServiceMock) Protect(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { +func (mock *ObjectRefClusterVirtualImageDiskServiceMock) Protect(ctx context.Context, sup supplements.Generator, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { if mock.ProtectFunc == nil { panic("ObjectRefClusterVirtualImageDiskServiceMock.ProtectFunc: method is nil but ObjectRefClusterVirtualImageDiskService.Protect was just called") } callInfo := struct { Ctx context.Context + Sup supplements.Generator Owner client.Object Dv *cdiv1.DataVolume Pvc *corev1.PersistentVolumeClaim }{ Ctx: ctx, + Sup: sup, Owner: owner, Dv: dv, Pvc: pvc, @@ -1257,7 +1273,7 @@ func (mock *ObjectRefClusterVirtualImageDiskServiceMock) Protect(ctx context.Con mock.lockProtect.Lock() mock.calls.Protect = append(mock.calls.Protect, callInfo) mock.lockProtect.Unlock() - return mock.ProtectFunc(ctx, owner, dv, pvc) + return mock.ProtectFunc(ctx, sup, owner, dv, pvc) } // ProtectCalls gets all the calls that were made to Protect. @@ -1266,12 +1282,14 @@ func (mock *ObjectRefClusterVirtualImageDiskServiceMock) Protect(ctx context.Con // len(mockedObjectRefClusterVirtualImageDiskService.ProtectCalls()) func (mock *ObjectRefClusterVirtualImageDiskServiceMock) ProtectCalls() []struct { Ctx context.Context + Sup supplements.Generator Owner client.Object Dv *cdiv1.DataVolume Pvc *corev1.PersistentVolumeClaim } { var calls []struct { Ctx context.Context + Sup supplements.Generator Owner client.Object Dv *cdiv1.DataVolume Pvc *corev1.PersistentVolumeClaim @@ -1354,7 +1372,7 @@ var _ ObjectRefVirtualDiskSnapshotDiskService = &ObjectRefVirtualDiskSnapshotDis // GetCapacityFunc: func(pvc *corev1.PersistentVolumeClaim) string { // panic("mock out the GetCapacity method") // }, -// ProtectFunc: func(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { +// ProtectFunc: func(ctx context.Context, sup supplements.Generator, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { // panic("mock out the Protect method") // }, // } @@ -1371,7 +1389,7 @@ type ObjectRefVirtualDiskSnapshotDiskServiceMock struct { GetCapacityFunc func(pvc *corev1.PersistentVolumeClaim) string // ProtectFunc mocks the Protect method. - ProtectFunc func(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error + ProtectFunc func(ctx context.Context, sup supplements.Generator, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error // calls tracks calls to the methods. calls struct { @@ -1391,6 +1409,8 @@ type ObjectRefVirtualDiskSnapshotDiskServiceMock struct { Protect []struct { // Ctx is the ctx argument value. Ctx context.Context + // Sup is the sup argument value. + Sup supplements.Generator // Owner is the owner argument value. Owner client.Object // Dv is the dv argument value. @@ -1473,17 +1493,19 @@ func (mock *ObjectRefVirtualDiskSnapshotDiskServiceMock) GetCapacityCalls() []st } // Protect calls ProtectFunc. -func (mock *ObjectRefVirtualDiskSnapshotDiskServiceMock) Protect(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { +func (mock *ObjectRefVirtualDiskSnapshotDiskServiceMock) Protect(ctx context.Context, sup supplements.Generator, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error { if mock.ProtectFunc == nil { panic("ObjectRefVirtualDiskSnapshotDiskServiceMock.ProtectFunc: method is nil but ObjectRefVirtualDiskSnapshotDiskService.Protect was just called") } callInfo := struct { Ctx context.Context + Sup supplements.Generator Owner client.Object Dv *cdiv1.DataVolume Pvc *corev1.PersistentVolumeClaim }{ Ctx: ctx, + Sup: sup, Owner: owner, Dv: dv, Pvc: pvc, @@ -1491,7 +1513,7 @@ func (mock *ObjectRefVirtualDiskSnapshotDiskServiceMock) Protect(ctx context.Con mock.lockProtect.Lock() mock.calls.Protect = append(mock.calls.Protect, callInfo) mock.lockProtect.Unlock() - return mock.ProtectFunc(ctx, owner, dv, pvc) + return mock.ProtectFunc(ctx, sup, owner, dv, pvc) } // ProtectCalls gets all the calls that were made to Protect. @@ -1500,12 +1522,14 @@ func (mock *ObjectRefVirtualDiskSnapshotDiskServiceMock) Protect(ctx context.Con // len(mockedObjectRefVirtualDiskSnapshotDiskService.ProtectCalls()) func (mock *ObjectRefVirtualDiskSnapshotDiskServiceMock) ProtectCalls() []struct { Ctx context.Context + Sup supplements.Generator Owner client.Object Dv *cdiv1.DataVolume Pvc *corev1.PersistentVolumeClaim } { var calls []struct { Ctx context.Context + Sup supplements.Generator Owner client.Object Dv *cdiv1.DataVolume Pvc *corev1.PersistentVolumeClaim diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi_test.go index a301316902..a88ae1cdfa 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_cvi_test.go @@ -71,7 +71,7 @@ var _ = Describe("ObjectRef ClusterVirtualImage", func() { CleanUpSupplementsFunc: func(_ context.Context, _ supplements.Generator) (bool, error) { return false, nil }, - ProtectFunc: func(_ context.Context, _ client.Object, _ *cdiv1.DataVolume, _ *corev1.PersistentVolumeClaim) error { + ProtectFunc: func(_ context.Context, _ supplements.Generator, _ client.Object, _ *cdiv1.DataVolume, _ *corev1.PersistentVolumeClaim) error { return nil }, } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot_test.go index 18ece6c3c9..53dbe96689 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vdsnapshot_test.go @@ -80,7 +80,7 @@ var _ = Describe("ObjectRef VirtualDiskSnapshot", func() { CleanUpSupplementsFunc: func(_ context.Context, _ supplements.Generator) (bool, error) { return false, nil }, - ProtectFunc: func(_ context.Context, _ client.Object, _ *cdiv1.DataVolume, _ *corev1.PersistentVolumeClaim) error { + ProtectFunc: func(_ context.Context, _ supplements.Generator, _ client.Object, _ *cdiv1.DataVolume, _ *corev1.PersistentVolumeClaim) error { return nil }, } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi_test.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi_test.go index cd640d4161..0ecff188f1 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi_test.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/object_ref_vi_test.go @@ -71,7 +71,7 @@ var _ = Describe("ObjectRef VirtualImage", func() { CleanUpSupplementsFunc: func(_ context.Context, _ supplements.Generator) (bool, error) { return false, nil }, - ProtectFunc: func(_ context.Context, _ client.Object, _ *cdiv1.DataVolume, _ *corev1.PersistentVolumeClaim) error { + ProtectFunc: func(_ context.Context, _ supplements.Generator, _ client.Object, _ *cdiv1.DataVolume, _ *corev1.PersistentVolumeClaim) error { return nil }, } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go index 227ade7032..9bd49a5cb1 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/registry.go @@ -122,18 +122,18 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) setPhaseConditionForFinishedDisk(pvc, cb, &vd.Status.Phase, supgen) // Protect Ready Disk and underlying PVC. - err = ds.diskService.Protect(ctx, vd, nil, pvc) + err = ds.diskService.Protect(ctx, supgen, vd, nil, pvc) if err != nil { return reconcile.Result{}, err } // Unprotect import time supplements to delete them later. - err = ds.importerService.Unprotect(ctx, pod) + err = ds.importerService.Unprotect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } - err = ds.diskService.Unprotect(ctx, dv) + err = ds.diskService.Unprotect(ctx, supgen, dv) if err != nil { return reconcile.Result{}, err } @@ -195,7 +195,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) vd.Status.Progress = ds.statService.GetProgress(vd.GetUID(), pod, vd.Status.Progress, service.NewScaleOption(0, 50)) - err = ds.importerService.Protect(ctx, pod) + err = ds.importerService.Protect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } @@ -328,7 +328,7 @@ func (ds RegistryDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) vd.Status.Capacity = ds.diskService.GetCapacity(pvc) vdsupplements.SetPVCName(vd, dv.Status.ClaimName) - err = ds.diskService.Protect(ctx, vd, dv, pvc) + err = ds.diskService.Protect(ctx, supgen, vd, dv, pvc) if err != nil { return reconcile.Result{}, err } diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ready_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ready_step.go index 9825efaeb4..1889ced6ce 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ready_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/ready_step.go @@ -41,7 +41,7 @@ const readyStep = "ready" type ReadyStepDiskService interface { GetCapacity(pvc *corev1.PersistentVolumeClaim) string CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) - Protect(ctx context.Context, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error + Protect(ctx context.Context, sup supplements.Generator, owner client.Object, dv *cdiv1.DataVolume, pvc *corev1.PersistentVolumeClaim) error } type ReadyStep struct { @@ -108,13 +108,13 @@ func (s ReadyStep) Take(ctx context.Context, vd *v1alpha2.VirtualDisk) (*reconci log.Debug("PVC is Bound") - err := s.diskService.Protect(ctx, vd, nil, s.pvc) + supgen := vdsupplements.NewGenerator(vd) + err := s.diskService.Protect(ctx, supgen, vd, nil, s.pvc) if err != nil { return nil, fmt.Errorf("protect underlying pvc: %w", err) } if object.ShouldCleanupSubResources(vd) { - supgen := vdsupplements.NewGenerator(vd) _, err = s.diskService.CleanUpSupplements(ctx, supgen) if err != nil { return nil, fmt.Errorf("clean up supplements: %w", err) diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/upload.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/upload.go index c6790a7fa0..b4e72d6e3b 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/upload.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/upload.go @@ -129,18 +129,18 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) ( setPhaseConditionForFinishedDisk(pvc, cb, &vd.Status.Phase, supgen) // Protect Ready Disk and underlying PVC. - err = ds.diskService.Protect(ctx, vd, nil, pvc) + err = ds.diskService.Protect(ctx, supgen.Generator, vd, nil, pvc) if err != nil { return reconcile.Result{}, err } // Unprotect upload time supplements to delete them later. - err = ds.uploaderService.Unprotect(ctx, pod, svc, ing) + err = ds.uploaderService.Unprotect(ctx, supgen, pod, svc, ing) if err != nil { return reconcile.Result{}, err } - err = ds.diskService.Unprotect(ctx, dv) + err = ds.diskService.Unprotect(ctx, supgen, dv) if err != nil { return reconcile.Result{}, err } @@ -158,7 +158,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) ( vd.Status.Progress = "0%" - envSettings := ds.getEnvSettings(vd, supgen) + envSettings := ds.getEnvSettings(vd, supgen.Generator) var nodePlacement *provisioner.NodePlacement nodePlacement, err = getNodePlacement(ctx, ds.client, vd) @@ -229,7 +229,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) ( vd.Status.Progress = ds.statService.GetProgress(vd.GetUID(), pod, vd.Status.Progress, service.NewScaleOption(0, 50)) vd.Status.DownloadSpeed = ds.statService.GetDownloadSpeed(vd.GetUID(), pod) - err = ds.uploaderService.Protect(ctx, pod, svc, ing) + err = ds.uploaderService.Protect(ctx, supgen, pod, svc, ing) if err != nil { return reconcile.Result{}, err } @@ -278,7 +278,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) ( return reconcile.Result{}, err } - source := ds.getSource(supgen, ds.statService.GetDVCRImageName(pod)) + source := ds.getSource(supgen.Generator, ds.statService.GetDVCRImageName(pod)) var sc *storagev1.StorageClass sc, err = ds.diskService.GetStorageClass(ctx, vd.Status.StorageClassName) @@ -358,7 +358,7 @@ func (ds UploadDataSource) Sync(ctx context.Context, vd *v1alpha2.VirtualDisk) ( vd.Status.Capacity = ds.diskService.GetCapacity(pvc) vdsupplements.SetPVCName(vd, dv.Status.ClaimName) - err = ds.diskService.Protect(ctx, vd, dv, pvc) + err = ds.diskService.Protect(ctx, supgen.Generator, vd, dv, pvc) if err != nil { return reconcile.Result{}, err } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/http.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/http.go index 68e16610a5..bcd0f87cea 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/http.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/http.go @@ -93,7 +93,7 @@ func (ds HTTPDataSource) StoreToDVCR(ctx context.Context, vi *v1alpha2.VirtualIm vi.Status.Phase = v1alpha2.ImageReady - err = ds.importerService.Unprotect(ctx, pod) + err = ds.importerService.Unprotect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } @@ -167,7 +167,7 @@ func (ds HTTPDataSource) StoreToDVCR(ctx context.Context, vi *v1alpha2.VirtualIm return reconcile.Result{}, setPhaseConditionFromPodError(cb, vi, err) } - err = ds.importerService.Protect(ctx, pod) + err = ds.importerService.Protect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } @@ -224,18 +224,18 @@ func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *v1alpha2.VirtualIma setPhaseConditionForFinishedImage(pvc, cb, &vi.Status.Phase, supgen) // Protect Ready Disk and underlying PVC. - err = ds.diskService.Protect(ctx, vi, nil, pvc) + err = ds.diskService.Protect(ctx, supgen, vi, nil, pvc) if err != nil { return reconcile.Result{}, err } // Unprotect import time supplements to delete them later. - err = ds.importerService.Unprotect(ctx, pod) + err = ds.importerService.Unprotect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } - err = ds.diskService.Unprotect(ctx, dv) + err = ds.diskService.Unprotect(ctx, supgen, dv) if err != nil { return reconcile.Result{}, err } @@ -276,7 +276,7 @@ func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *v1alpha2.VirtualIma return reconcile.Result{}, setPhaseConditionFromPodError(cb, vi, err) } - err = ds.importerService.Protect(ctx, pod) + err = ds.importerService.Protect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } @@ -394,7 +394,7 @@ func (ds HTTPDataSource) StoreToPVC(ctx context.Context, vi *v1alpha2.VirtualIma vi.Status.Progress = ds.diskService.GetProgress(dv, vi.Status.Progress, service.NewScaleOption(50, 100)) - err = ds.diskService.Protect(ctx, vi, dv, pvc) + err = ds.diskService.Protect(ctx, supgen, vi, dv, pvc) if err != nil { return reconcile.Result{}, err } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/interfaces.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/interfaces.go index 3e4e7373fa..fca75b057a 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/interfaces.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/interfaces.go @@ -41,8 +41,8 @@ type Importer interface { CleanUp(ctx context.Context, sup supplements.Generator) (bool, error) CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) GetPod(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) - Protect(ctx context.Context, pod *corev1.Pod) error - Unprotect(ctx context.Context, pod *corev1.Pod) error + Protect(ctx context.Context, pod *corev1.Pod, sup supplements.Generator) error + Unprotect(ctx context.Context, pod *corev1.Pod, sup supplements.Generator) error Start(ctx context.Context, settings *importer.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error } @@ -53,8 +53,8 @@ type Uploader interface { GetPod(ctx context.Context, sup supplements.Generator) (*corev1.Pod, error) GetIngress(ctx context.Context, sup supplements.Generator) (*netv1.Ingress, error) GetService(ctx context.Context, sup supplements.Generator) (*corev1.Service, error) - Protect(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error - Unprotect(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error + Protect(ctx context.Context, sup supplements.Generator, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error + Unprotect(ctx context.Context, sup supplements.Generator, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error GetExternalURL(ctx context.Context, ing *netv1.Ingress) string GetInClusterURL(ctx context.Context, svc *corev1.Service) string } @@ -75,5 +75,6 @@ type Bounder interface { } type Disk interface { + GetPersistentVolumeClaim(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) CleanUpSupplements(ctx context.Context, sup supplements.Generator) (bool, error) } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/mock.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/mock.go index e792231e7b..39ec8ca273 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/mock.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/mock.go @@ -42,7 +42,7 @@ var _ Importer = &ImporterMock{} // GetPodSettingsWithPVCFunc: func(ownerReference *metav1.OwnerReference, generator supplements.Generator, s1 string, s2 string) *importer.PodSettings { // panic("mock out the GetPodSettingsWithPVC method") // }, -// ProtectFunc: func(ctx context.Context, pod *corev1.Pod) error { +// ProtectFunc: func(ctx context.Context, pod *corev1.Pod, sup supplements.Generator) error { // panic("mock out the Protect method") // }, // StartFunc: func(ctx context.Context, settings *importer.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { @@ -51,7 +51,7 @@ var _ Importer = &ImporterMock{} // StartWithPodSettingFunc: func(contextMoqParam context.Context, settings *importer.Settings, generator supplements.Generator, cABundle *datasource.CABundle, podSettings *importer.PodSettings) error { // panic("mock out the StartWithPodSetting method") // }, -// UnprotectFunc: func(ctx context.Context, pod *corev1.Pod) error { +// UnprotectFunc: func(ctx context.Context, pod *corev1.Pod, sup supplements.Generator) error { // panic("mock out the Unprotect method") // }, // } @@ -74,7 +74,7 @@ type ImporterMock struct { GetPodSettingsWithPVCFunc func(ownerReference *metav1.OwnerReference, generator supplements.Generator, s1 string, s2 string) *importer.PodSettings // ProtectFunc mocks the Protect method. - ProtectFunc func(ctx context.Context, pod *corev1.Pod) error + ProtectFunc func(ctx context.Context, pod *corev1.Pod, sup supplements.Generator) error // StartFunc mocks the Start method. StartFunc func(ctx context.Context, settings *importer.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error @@ -83,7 +83,7 @@ type ImporterMock struct { StartWithPodSettingFunc func(contextMoqParam context.Context, settings *importer.Settings, generator supplements.Generator, cABundle *datasource.CABundle, podSettings *importer.PodSettings) error // UnprotectFunc mocks the Unprotect method. - UnprotectFunc func(ctx context.Context, pod *corev1.Pod) error + UnprotectFunc func(ctx context.Context, pod *corev1.Pod, sup supplements.Generator) error // calls tracks calls to the methods. calls struct { @@ -125,6 +125,8 @@ type ImporterMock struct { Ctx context.Context // Pod is the pod argument value. Pod *corev1.Pod + // Sup is the sup argument value. + Sup supplements.Generator } // Start holds details about calls to the Start method. Start []struct { @@ -160,6 +162,8 @@ type ImporterMock struct { Ctx context.Context // Pod is the pod argument value. Pod *corev1.Pod + // Sup is the sup argument value. + Sup supplements.Generator } } lockCleanUp sync.RWMutex @@ -325,21 +329,23 @@ func (mock *ImporterMock) GetPodSettingsWithPVCCalls() []struct { } // Protect calls ProtectFunc. -func (mock *ImporterMock) Protect(ctx context.Context, pod *corev1.Pod) error { +func (mock *ImporterMock) Protect(ctx context.Context, pod *corev1.Pod, sup supplements.Generator) error { if mock.ProtectFunc == nil { panic("ImporterMock.ProtectFunc: method is nil but Importer.Protect was just called") } callInfo := struct { Ctx context.Context Pod *corev1.Pod + Sup supplements.Generator }{ Ctx: ctx, Pod: pod, + Sup: sup, } mock.lockProtect.Lock() mock.calls.Protect = append(mock.calls.Protect, callInfo) mock.lockProtect.Unlock() - return mock.ProtectFunc(ctx, pod) + return mock.ProtectFunc(ctx, pod, sup) } // ProtectCalls gets all the calls that were made to Protect. @@ -349,10 +355,12 @@ func (mock *ImporterMock) Protect(ctx context.Context, pod *corev1.Pod) error { func (mock *ImporterMock) ProtectCalls() []struct { Ctx context.Context Pod *corev1.Pod + Sup supplements.Generator } { var calls []struct { Ctx context.Context Pod *corev1.Pod + Sup supplements.Generator } mock.lockProtect.RLock() calls = mock.calls.Protect @@ -461,21 +469,23 @@ func (mock *ImporterMock) StartWithPodSettingCalls() []struct { } // Unprotect calls UnprotectFunc. -func (mock *ImporterMock) Unprotect(ctx context.Context, pod *corev1.Pod) error { +func (mock *ImporterMock) Unprotect(ctx context.Context, pod *corev1.Pod, sup supplements.Generator) error { if mock.UnprotectFunc == nil { panic("ImporterMock.UnprotectFunc: method is nil but Importer.Unprotect was just called") } callInfo := struct { Ctx context.Context Pod *corev1.Pod + Sup supplements.Generator }{ Ctx: ctx, Pod: pod, + Sup: sup, } mock.lockUnprotect.Lock() mock.calls.Unprotect = append(mock.calls.Unprotect, callInfo) mock.lockUnprotect.Unlock() - return mock.UnprotectFunc(ctx, pod) + return mock.UnprotectFunc(ctx, pod, sup) } // UnprotectCalls gets all the calls that were made to Unprotect. @@ -485,10 +495,12 @@ func (mock *ImporterMock) Unprotect(ctx context.Context, pod *corev1.Pod) error func (mock *ImporterMock) UnprotectCalls() []struct { Ctx context.Context Pod *corev1.Pod + Sup supplements.Generator } { var calls []struct { Ctx context.Context Pod *corev1.Pod + Sup supplements.Generator } mock.lockUnprotect.RLock() calls = mock.calls.Unprotect @@ -527,13 +539,13 @@ var _ Uploader = &UploaderMock{} // GetServiceFunc: func(ctx context.Context, sup supplements.Generator) (*corev1.Service, error) { // panic("mock out the GetService method") // }, -// ProtectFunc: func(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error { +// ProtectFunc: func(ctx context.Context, sup supplements.Generator, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error { // panic("mock out the Protect method") // }, // StartFunc: func(ctx context.Context, settings *uploader.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error { // panic("mock out the Start method") // }, -// UnprotectFunc: func(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error { +// UnprotectFunc: func(ctx context.Context, sup supplements.Generator, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error { // panic("mock out the Unprotect method") // }, // } @@ -565,13 +577,13 @@ type UploaderMock struct { GetServiceFunc func(ctx context.Context, sup supplements.Generator) (*corev1.Service, error) // ProtectFunc mocks the Protect method. - ProtectFunc func(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error + ProtectFunc func(ctx context.Context, sup supplements.Generator, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error // StartFunc mocks the Start method. StartFunc func(ctx context.Context, settings *uploader.Settings, obj client.Object, sup supplements.Generator, caBundle *datasource.CABundle, opts ...service.Option) error // UnprotectFunc mocks the Unprotect method. - UnprotectFunc func(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error + UnprotectFunc func(ctx context.Context, sup supplements.Generator, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error // calls tracks calls to the methods. calls struct { @@ -628,6 +640,8 @@ type UploaderMock struct { Protect []struct { // Ctx is the ctx argument value. Ctx context.Context + // Sup is the sup argument value. + Sup supplements.Generator // Pod is the pod argument value. Pod *corev1.Pod // Svc is the svc argument value. @@ -654,6 +668,8 @@ type UploaderMock struct { Unprotect []struct { // Ctx is the ctx argument value. Ctx context.Context + // Sup is the sup argument value. + Sup supplements.Generator // Pod is the pod argument value. Pod *corev1.Pod // Svc is the svc argument value. @@ -927,17 +943,19 @@ func (mock *UploaderMock) GetServiceCalls() []struct { } // Protect calls ProtectFunc. -func (mock *UploaderMock) Protect(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error { +func (mock *UploaderMock) Protect(ctx context.Context, sup supplements.Generator, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error { if mock.ProtectFunc == nil { panic("UploaderMock.ProtectFunc: method is nil but Uploader.Protect was just called") } callInfo := struct { Ctx context.Context + Sup supplements.Generator Pod *corev1.Pod Svc *corev1.Service Ing *netv1.Ingress }{ Ctx: ctx, + Sup: sup, Pod: pod, Svc: svc, Ing: ing, @@ -945,7 +963,7 @@ func (mock *UploaderMock) Protect(ctx context.Context, pod *corev1.Pod, svc *cor mock.lockProtect.Lock() mock.calls.Protect = append(mock.calls.Protect, callInfo) mock.lockProtect.Unlock() - return mock.ProtectFunc(ctx, pod, svc, ing) + return mock.ProtectFunc(ctx, sup, pod, svc, ing) } // ProtectCalls gets all the calls that were made to Protect. @@ -954,12 +972,14 @@ func (mock *UploaderMock) Protect(ctx context.Context, pod *corev1.Pod, svc *cor // len(mockedUploader.ProtectCalls()) func (mock *UploaderMock) ProtectCalls() []struct { Ctx context.Context + Sup supplements.Generator Pod *corev1.Pod Svc *corev1.Service Ing *netv1.Ingress } { var calls []struct { Ctx context.Context + Sup supplements.Generator Pod *corev1.Pod Svc *corev1.Service Ing *netv1.Ingress @@ -1023,17 +1043,19 @@ func (mock *UploaderMock) StartCalls() []struct { } // Unprotect calls UnprotectFunc. -func (mock *UploaderMock) Unprotect(ctx context.Context, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error { +func (mock *UploaderMock) Unprotect(ctx context.Context, sup supplements.Generator, pod *corev1.Pod, svc *corev1.Service, ing *netv1.Ingress) error { if mock.UnprotectFunc == nil { panic("UploaderMock.UnprotectFunc: method is nil but Uploader.Unprotect was just called") } callInfo := struct { Ctx context.Context + Sup supplements.Generator Pod *corev1.Pod Svc *corev1.Service Ing *netv1.Ingress }{ Ctx: ctx, + Sup: sup, Pod: pod, Svc: svc, Ing: ing, @@ -1041,7 +1063,7 @@ func (mock *UploaderMock) Unprotect(ctx context.Context, pod *corev1.Pod, svc *c mock.lockUnprotect.Lock() mock.calls.Unprotect = append(mock.calls.Unprotect, callInfo) mock.lockUnprotect.Unlock() - return mock.UnprotectFunc(ctx, pod, svc, ing) + return mock.UnprotectFunc(ctx, sup, pod, svc, ing) } // UnprotectCalls gets all the calls that were made to Unprotect. @@ -1050,12 +1072,14 @@ func (mock *UploaderMock) Unprotect(ctx context.Context, pod *corev1.Pod, svc *c // len(mockedUploader.UnprotectCalls()) func (mock *UploaderMock) UnprotectCalls() []struct { Ctx context.Context + Sup supplements.Generator Pod *corev1.Pod Svc *corev1.Service Ing *netv1.Ingress } { var calls []struct { Ctx context.Context + Sup supplements.Generator Pod *corev1.Pod Svc *corev1.Service Ing *netv1.Ingress @@ -1945,6 +1969,9 @@ var _ Disk = &DiskMock{} // CleanUpSupplementsFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { // panic("mock out the CleanUpSupplements method") // }, +// GetPersistentVolumeClaimFunc: func(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) { +// panic("mock out the GetPersistentVolumeClaim method") +// }, // } // // // use mockedDisk in code that requires Disk @@ -1955,6 +1982,9 @@ type DiskMock struct { // CleanUpSupplementsFunc mocks the CleanUpSupplements method. CleanUpSupplementsFunc func(ctx context.Context, sup supplements.Generator) (bool, error) + // GetPersistentVolumeClaimFunc mocks the GetPersistentVolumeClaim method. + GetPersistentVolumeClaimFunc func(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) + // calls tracks calls to the methods. calls struct { // CleanUpSupplements holds details about calls to the CleanUpSupplements method. @@ -1964,8 +1994,16 @@ type DiskMock struct { // Sup is the sup argument value. Sup supplements.Generator } + // GetPersistentVolumeClaim holds details about calls to the GetPersistentVolumeClaim method. + GetPersistentVolumeClaim []struct { + // Ctx is the ctx argument value. + Ctx context.Context + // Sup is the sup argument value. + Sup supplements.Generator + } } - lockCleanUpSupplements sync.RWMutex + lockCleanUpSupplements sync.RWMutex + lockGetPersistentVolumeClaim sync.RWMutex } // CleanUpSupplements calls CleanUpSupplementsFunc. @@ -2003,3 +2041,39 @@ func (mock *DiskMock) CleanUpSupplementsCalls() []struct { mock.lockCleanUpSupplements.RUnlock() return calls } + +// GetPersistentVolumeClaim calls GetPersistentVolumeClaimFunc. +func (mock *DiskMock) GetPersistentVolumeClaim(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) { + if mock.GetPersistentVolumeClaimFunc == nil { + panic("DiskMock.GetPersistentVolumeClaimFunc: method is nil but Disk.GetPersistentVolumeClaim was just called") + } + callInfo := struct { + Ctx context.Context + Sup supplements.Generator + }{ + Ctx: ctx, + Sup: sup, + } + mock.lockGetPersistentVolumeClaim.Lock() + mock.calls.GetPersistentVolumeClaim = append(mock.calls.GetPersistentVolumeClaim, callInfo) + mock.lockGetPersistentVolumeClaim.Unlock() + return mock.GetPersistentVolumeClaimFunc(ctx, sup) +} + +// GetPersistentVolumeClaimCalls gets all the calls that were made to GetPersistentVolumeClaim. +// Check the length with: +// +// len(mockedDisk.GetPersistentVolumeClaimCalls()) +func (mock *DiskMock) GetPersistentVolumeClaimCalls() []struct { + Ctx context.Context + Sup supplements.Generator +} { + var calls []struct { + Ctx context.Context + Sup supplements.Generator + } + mock.lockGetPersistentVolumeClaim.RLock() + calls = mock.calls.GetPersistentVolumeClaim + mock.lockGetPersistentVolumeClaim.RUnlock() + return calls +} diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref.go index 3017574fb1..8781a19c3d 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref.go @@ -86,7 +86,7 @@ func NewObjectRefDataSource( viObjectRefOnPvc: NewObjectRefDataVirtualImageOnPVC(recorder, statService, importerService, dvcrSettings, client, diskService), vdSyncer: NewObjectRefVirtualDisk(recorder, importerService, client, diskService, dvcrSettings, statService), vdSnapshotCRSyncer: NewObjectRefVirtualDiskSnapshotCR(importerService, statService, diskService, client, dvcrSettings, recorder), - vdSnapshotPVCSyncer: NewObjectRefVirtualDiskSnapshotPVC(importerService, statService, bounderService, client, dvcrSettings, recorder), + vdSnapshotPVCSyncer: NewObjectRefVirtualDiskSnapshotPVC(importerService, statService, bounderService, diskService, client, dvcrSettings, recorder), } } @@ -154,12 +154,12 @@ func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *v1alpha2.Virtu setPhaseConditionForFinishedImage(pvc, cb, &vi.Status.Phase, supgen) // Protect Ready Disk and underlying PVC. - err = ds.diskService.Protect(ctx, vi, nil, pvc) + err = ds.diskService.Protect(ctx, supgen, vi, nil, pvc) if err != nil { return reconcile.Result{}, err } - err = ds.diskService.Unprotect(ctx, dv) + err = ds.diskService.Unprotect(ctx, supgen, dv) if err != nil { return reconcile.Result{}, err } @@ -281,7 +281,7 @@ func (ds ObjectRefDataSource) StoreToPVC(ctx context.Context, vi *v1alpha2.Virtu vi.Status.Progress = ds.diskService.GetProgress(dv, vi.Status.Progress, service.NewScaleOption(0, 100)) vi.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName - err = ds.diskService.Protect(ctx, vi, dv, pvc) + err = ds.diskService.Protect(ctx, supgen, vi, dv, pvc) if err != nil { return reconcile.Result{}, err } @@ -354,7 +354,7 @@ func (ds ObjectRefDataSource) StoreToDVCR(ctx context.Context, vi *v1alpha2.Virt vi.Status.Phase = v1alpha2.ImageReady - err = ds.importerService.Unprotect(ctx, pod) + err = ds.importerService.Unprotect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go index 585a3de86f..344f45924a 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vd.go @@ -99,7 +99,7 @@ func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *v1alpha2.Vir vi.Status.Phase = v1alpha2.ImageReady - err = ds.importerService.Unprotect(ctx, pod) + err = ds.importerService.Unprotect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } @@ -193,7 +193,7 @@ func (ds ObjectRefVirtualDisk) StoreToDVCR(ctx context.Context, vi *v1alpha2.Vir } } - err = ds.importerService.Protect(ctx, pod) + err = ds.importerService.Protect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } @@ -242,12 +242,12 @@ func (ds ObjectRefVirtualDisk) StoreToPVC(ctx context.Context, vi *v1alpha2.Virt setPhaseConditionForFinishedImage(pvc, cb, &vi.Status.Phase, supgen) // Protect Ready Disk and underlying PVC. - err = ds.diskService.Protect(ctx, vi, nil, pvc) + err = ds.diskService.Protect(ctx, supgen, vi, nil, pvc) if err != nil { return reconcile.Result{}, err } - err = ds.diskService.Unprotect(ctx, dv) + err = ds.diskService.Unprotect(ctx, supgen, dv) if err != nil { return reconcile.Result{}, err } @@ -359,7 +359,7 @@ func (ds ObjectRefVirtualDisk) StoreToPVC(ctx context.Context, vi *v1alpha2.Virt vi.Status.Progress = ds.diskService.GetProgress(dv, vi.Status.Progress, service.NewScaleOption(0, 100)) vi.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName - err = ds.diskService.Protect(ctx, vi, dv, pvc) + err = ds.diskService.Protect(ctx, supgen, vi, dv, pvc) if err != nil { return reconcile.Result{}, err } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr.go index adf931a806..9478c42aee 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr.go @@ -22,7 +22,6 @@ import ( "fmt" vsv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -31,7 +30,6 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/common/steptaker" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" - "github.com/deckhouse/virtualization-controller/pkg/controller/importer" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vi/internal/source/step" "github.com/deckhouse/virtualization-controller/pkg/dvcr" @@ -77,12 +75,12 @@ func (ds ObjectRefVirtualDiskSnapshotCR) Sync(ctx context.Context, vi *v1alpha2. cb := conditions.NewConditionBuilder(vicondition.ReadyType).Generation(vi.Generation) defer func() { conditions.SetCondition(cb, &vi.Status.Conditions) }() - pvc, err := object.FetchObject(ctx, supgen.PersistentVolumeClaim(), ds.client, &corev1.PersistentVolumeClaim{}) + pvc, err := ds.diskService.GetPersistentVolumeClaim(ctx, supgen) if err != nil { return reconcile.Result{}, fmt.Errorf("fetch pvc: %w", err) } - pod, err := importer.FindPod(ctx, ds.client, supgen) + pod, err := ds.importer.GetPod(ctx, supgen) if err != nil { return reconcile.Result{}, fmt.Errorf("fetch pod: %w", err) } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr_test.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr_test.go index 0e406f0088..33c5db4a06 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr_test.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_cr_test.go @@ -86,6 +86,9 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { CleanUpSupplementsFunc: func(_ context.Context, _ supplements.Generator) (bool, error) { return false, nil }, + GetPodFunc: func(_ context.Context, _ supplements.Generator) (*corev1.Pod, error) { + return pod, nil + }, } stat = &StatMock{ GetDVCRImageNameFunc: func(_ *corev1.Pod) string { @@ -112,6 +115,9 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { CleanUpSupplementsFunc: func(ctx context.Context, sup supplements.Generator) (bool, error) { return false, nil }, + GetPersistentVolumeClaimFunc: func(_ context.Context, _ supplements.Generator) (*corev1.PersistentVolumeClaim, error) { + return pvc, nil + }, } settings = &dvcr.Settings{} @@ -194,6 +200,13 @@ var _ = Describe("ObjectRef VirtualImageSnapshot ContainerRegistry", func() { return nil } + diskService.GetPersistentVolumeClaimFunc = func(_ context.Context, _ supplements.Generator) (*corev1.PersistentVolumeClaim, error) { + return nil, nil + } + importer.GetPodFunc = func(_ context.Context, _ supplements.Generator) (*corev1.Pod, error) { + return nil, nil + } + vi.Status = v1alpha2.VirtualImageStatus{} client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(vdSnapshot, vs). WithInterceptorFuncs(interceptor.Funcs{ diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc.go index 189cffa1c8..794db7eaa0 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc.go @@ -21,12 +21,10 @@ import ( "errors" "fmt" - corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/deckhouse/virtualization-controller/pkg/common/annotations" - "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/common/steptaker" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" @@ -43,6 +41,7 @@ type ObjectRefVirtualDiskSnapshotPVC struct { bounder Bounder client client.Client dvcrSettings *dvcr.Settings + diskService Disk recorder eventrecord.EventRecorderLogger } @@ -50,6 +49,7 @@ func NewObjectRefVirtualDiskSnapshotPVC( importer Importer, stat Stat, bounder Bounder, + diskService Disk, client client.Client, dvcrSettings *dvcr.Settings, recorder eventrecord.EventRecorderLogger, @@ -60,6 +60,7 @@ func NewObjectRefVirtualDiskSnapshotPVC( bounder: bounder, client: client, dvcrSettings: dvcrSettings, + diskService: diskService, recorder: recorder, } } @@ -74,7 +75,7 @@ func (ds ObjectRefVirtualDiskSnapshotPVC) Sync(ctx context.Context, vi *v1alpha2 cb := conditions.NewConditionBuilder(vicondition.ReadyType).Generation(vi.Generation) defer func() { conditions.SetCondition(cb, &vi.Status.Conditions) }() - pvc, err := object.FetchObject(ctx, supgen.PersistentVolumeClaim(), ds.client, &corev1.PersistentVolumeClaim{}) + pvc, err := ds.diskService.GetPersistentVolumeClaim(ctx, supgen) if err != nil { return reconcile.Result{}, fmt.Errorf("fetch pvc: %w", err) } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc_test.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc_test.go index a28aa9aabd..90d4b4ed78 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc_test.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vdsnapshot_pvc_test.go @@ -45,18 +45,19 @@ import ( var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() { var ( - ctx context.Context - scheme *runtime.Scheme - vi *v1alpha2.VirtualImage - vs *vsv1.VolumeSnapshot - sc *storagev1.StorageClass - vdSnapshot *v1alpha2.VirtualDiskSnapshot - pvc *corev1.PersistentVolumeClaim - settings *dvcr.Settings - recorder eventrecord.EventRecorderLogger - importer *ImporterMock - bounder *BounderMock - stat *StatMock + ctx context.Context + scheme *runtime.Scheme + vi *v1alpha2.VirtualImage + vs *vsv1.VolumeSnapshot + sc *storagev1.StorageClass + vdSnapshot *v1alpha2.VirtualDiskSnapshot + pvc *corev1.PersistentVolumeClaim + settings *dvcr.Settings + recorder eventrecord.EventRecorderLogger + importer *ImporterMock + bounder *BounderMock + stat *StatMock + diskService *DiskMock ) BeforeEach(func() { @@ -102,6 +103,15 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() return "N%" }, } + diskService = &DiskMock{ + GetPersistentVolumeClaimFunc: func(ctx context.Context, sup supplements.Generator) (*corev1.PersistentVolumeClaim, error) { + // Return the test PVC if it exists + if pvc != nil && pvc.Name != "" { + return pvc, nil + } + return nil, nil + }, + } settings = &dvcr.Settings{} sc = &storagev1.StorageClass{ @@ -181,7 +191,11 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() }, }).Build() - syncer := NewObjectRefVirtualDiskSnapshotPVC(importer, stat, nil, client, settings, recorder) + diskService.GetPersistentVolumeClaimFunc = func(_ context.Context, _ supplements.Generator) (*corev1.PersistentVolumeClaim, error) { + return nil, nil + } + + syncer := NewObjectRefVirtualDiskSnapshotPVC(importer, stat, bounder, diskService, client, settings, recorder) res, err := syncer.Sync(ctx, vi) Expect(err).ToNot(HaveOccurred()) @@ -202,7 +216,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() pvc.Status.Phase = corev1.ClaimBound client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(pvc).Build() - syncer := NewObjectRefVirtualDiskSnapshotPVC(importer, stat, bounder, client, nil, recorder) + syncer := NewObjectRefVirtualDiskSnapshotPVC(importer, stat, bounder, diskService, client, nil, recorder) res, err := syncer.Sync(ctx, vi) Expect(err).ToNot(HaveOccurred()) @@ -225,7 +239,11 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() } client := fake.NewClientBuilder().WithScheme(scheme).WithObjects().Build() - syncer := NewObjectRefVirtualDiskSnapshotPVC(importer, stat, nil, client, nil, recorder) + diskService.GetPersistentVolumeClaimFunc = func(_ context.Context, _ supplements.Generator) (*corev1.PersistentVolumeClaim, error) { + return nil, nil + } + + syncer := NewObjectRefVirtualDiskSnapshotPVC(importer, stat, bounder, diskService, client, nil, recorder) res, err := syncer.Sync(ctx, vi) Expect(err).ToNot(HaveOccurred()) @@ -241,7 +259,7 @@ var _ = Describe("ObjectRef VirtualImageSnapshot PersistentVolumeClaim", func() vi.Status.Target.PersistentVolumeClaim = pvc.Name client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(pvc).Build() - syncer := NewObjectRefVirtualDiskSnapshotPVC(importer, stat, nil, client, nil, recorder) + syncer := NewObjectRefVirtualDiskSnapshotPVC(importer, stat, bounder, diskService, client, nil, recorder) res, err := syncer.Sync(ctx, vi) Expect(err).ToNot(HaveOccurred()) diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vi_on_pvc.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vi_on_pvc.go index 478109ad31..f3550dc9d9 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vi_on_pvc.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/object_ref_vi_on_pvc.go @@ -95,7 +95,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToDVCR(ctx context.Context, vi, vi vi.Status.Phase = v1alpha2.ImageReady - err = ds.importerService.Unprotect(ctx, pod) + err = ds.importerService.Unprotect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } @@ -171,7 +171,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToDVCR(ctx context.Context, vi, vi return reconcile.Result{}, setPhaseConditionFromPodError(cb, vi, err) } - err = ds.importerService.Protect(ctx, pod) + err = ds.importerService.Protect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } @@ -219,12 +219,12 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToPVC(ctx context.Context, vi, viR setPhaseConditionForFinishedImage(pvc, cb, &vi.Status.Phase, supgen) // Protect Ready Disk and underlying PVC. - err = ds.diskService.Protect(ctx, vi, nil, pvc) + err = ds.diskService.Protect(ctx, supgen, vi, nil, pvc) if err != nil { return reconcile.Result{}, err } - err = ds.diskService.Unprotect(ctx, dv) + err = ds.diskService.Unprotect(ctx, supgen, dv) if err != nil { return reconcile.Result{}, err } @@ -326,7 +326,7 @@ func (ds ObjectRefDataVirtualImageOnPVC) StoreToPVC(ctx context.Context, vi, viR vi.Status.Progress = ds.diskService.GetProgress(dv, vi.Status.Progress, service.NewScaleOption(0, 100)) vi.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName - err = ds.diskService.Protect(ctx, vi, dv, pvc) + err = ds.diskService.Protect(ctx, supgen, vi, dv, pvc) if err != nil { return reconcile.Result{}, err } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/registry.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/registry.go index 87a63b9c2e..4ca3906c3a 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/registry.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/registry.go @@ -111,18 +111,18 @@ func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *v1alpha2.Virtua setPhaseConditionForFinishedImage(pvc, cb, &vi.Status.Phase, supgen) // Protect Ready Disk and underlying PVC. - err = ds.diskService.Protect(ctx, vi, nil, pvc) + err = ds.diskService.Protect(ctx, supgen, vi, nil, pvc) if err != nil { return reconcile.Result{}, err } // Unprotect import time supplements to delete them later. - err = ds.importerService.Unprotect(ctx, pod) + err = ds.importerService.Unprotect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } - err = ds.diskService.Unprotect(ctx, dv) + err = ds.diskService.Unprotect(ctx, supgen, dv) if err != nil { return reconcile.Result{}, err } @@ -171,7 +171,7 @@ func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *v1alpha2.Virtua vi.Status.Progress = ds.statService.GetProgress(vi.GetUID(), pod, vi.Status.Progress, service.NewScaleOption(0, 50)) - err = ds.importerService.Protect(ctx, pod) + err = ds.importerService.Protect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } @@ -270,7 +270,7 @@ func (ds RegistryDataSource) StoreToPVC(ctx context.Context, vi *v1alpha2.Virtua vi.Status.Progress = ds.diskService.GetProgress(dv, vi.Status.Progress, service.NewScaleOption(50, 100)) vi.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName - err = ds.diskService.Protect(ctx, vi, dv, pvc) + err = ds.diskService.Protect(ctx, supgen, vi, dv, pvc) if err != nil { return reconcile.Result{}, err } @@ -310,7 +310,7 @@ func (ds RegistryDataSource) StoreToDVCR(ctx context.Context, vi *v1alpha2.Virtu vi.Status.Phase = v1alpha2.ImageReady - err = ds.importerService.Unprotect(ctx, pod) + err = ds.importerService.Unprotect(ctx, pod, supgen) if err != nil { return reconcile.Result{}, err } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/upload.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/upload.go index e22425a219..508790587f 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/upload.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/upload.go @@ -114,18 +114,18 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *v1alpha2.VirtualI setPhaseConditionForFinishedImage(pvc, cb, &vi.Status.Phase, supgen) // Protect Ready Disk and underlying PVC. - err = ds.diskService.Protect(ctx, vi, nil, pvc) + err = ds.diskService.Protect(ctx, supgen, vi, nil, pvc) if err != nil { return reconcile.Result{}, err } // Unprotect upload time supplements to delete them later. - err = ds.uploaderService.Unprotect(ctx, pod, svc, ing) + err = ds.uploaderService.Unprotect(ctx, supgen, pod, svc, ing) if err != nil { return reconcile.Result{}, err } - err = ds.diskService.Unprotect(ctx, dv) + err = ds.diskService.Unprotect(ctx, supgen, dv) if err != nil { return reconcile.Result{}, err } @@ -207,7 +207,7 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *v1alpha2.VirtualI vi.Status.Progress = ds.statService.GetProgress(vi.GetUID(), pod, vi.Status.Progress, service.NewScaleOption(0, 50)) vi.Status.DownloadSpeed = ds.statService.GetDownloadSpeed(vi.GetUID(), pod) - err = ds.uploaderService.Protect(ctx, pod, svc, ing) + err = ds.uploaderService.Protect(ctx, supgen, pod, svc, ing) if err != nil { return reconcile.Result{}, err } @@ -320,7 +320,7 @@ func (ds UploadDataSource) StoreToPVC(ctx context.Context, vi *v1alpha2.VirtualI vi.Status.Progress = ds.diskService.GetProgress(dv, vi.Status.Progress, service.NewScaleOption(50, 100)) vi.Status.Target.PersistentVolumeClaim = dv.Status.ClaimName - err = ds.diskService.Protect(ctx, vi, dv, pvc) + err = ds.diskService.Protect(ctx, supgen, vi, dv, pvc) if err != nil { return reconcile.Result{}, err } @@ -368,7 +368,7 @@ func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *v1alpha2.Virtual vi.Status.Phase = v1alpha2.ImageReady - err = ds.uploaderService.Unprotect(ctx, pod, svc, ing) + err = ds.uploaderService.Unprotect(ctx, supgen, pod, svc, ing) if err != nil { return reconcile.Result{}, err } @@ -448,7 +448,7 @@ func (ds UploadDataSource) StoreToDVCR(ctx context.Context, vi *v1alpha2.Virtual vi.Status.Target.RegistryURL = ds.statService.GetDVCRImageName(pod) vi.Status.DownloadSpeed = ds.statService.GetDownloadSpeed(vi.GetUID(), pod) - err = ds.uploaderService.Protect(ctx, pod, svc, ing) + err = ds.uploaderService.Protect(ctx, supgen, pod, svc, ing) if err != nil { return reconcile.Result{}, err } From 578d2f82839d5804d01f5058dbb680118e955fc4 Mon Sep 17 00:00:00 2001 From: Pavel Tishkov Date: Tue, 18 Nov 2025 13:15:18 +0300 Subject: [PATCH 6/7] feat(module): add ability to edit or remove generic vmclass (#1597) * fix(module): user may delete or edit vmclass/generic - Remove helm labels and annotations from vmclass/generic. - Create secret/module-state to track initial creation of vmclass/generic. - Do not react on delete or update of vmclass/generic. Signed-off-by: Pavel Tishkov Signed-off-by: Ivan Mikheykin Co-authored-by: Ivan Mikheykin --- .../virtualization-module-hooks/register.go | 2 +- .../pkg/hooks/install-vmclass-generic/hook.go | 326 +++++++++++++++ .../install-vmclass-generic/hook_test.go | 382 ++++++++++++++++++ .../hook.go | 102 ----- openapi/values.yaml | 4 + templates/module-state-secret.yaml | 14 + .../vmclasses-default.yaml | 34 -- tools/kubeconform/fixtures/module-values.yaml | 1 + 8 files changed, 728 insertions(+), 137 deletions(-) create mode 100644 images/hooks/pkg/hooks/install-vmclass-generic/hook.go create mode 100644 images/hooks/pkg/hooks/install-vmclass-generic/hook_test.go delete mode 100644 images/hooks/pkg/hooks/prevent-default-vmclasses-deletion/hook.go create mode 100644 templates/module-state-secret.yaml delete mode 100644 templates/virtualization-controller/vmclasses-default.yaml diff --git a/images/hooks/cmd/virtualization-module-hooks/register.go b/images/hooks/cmd/virtualization-module-hooks/register.go index 39c8b95a04..425845d7ea 100644 --- a/images/hooks/cmd/virtualization-module-hooks/register.go +++ b/images/hooks/cmd/virtualization-module-hooks/register.go @@ -23,9 +23,9 @@ import ( _ "hooks/pkg/hooks/discovery-workload-nodes" _ "hooks/pkg/hooks/drop-openshift-labels" _ "hooks/pkg/hooks/generate-secret-for-dvcr" + _ "hooks/pkg/hooks/install-vmclass-generic" _ "hooks/pkg/hooks/migrate-delete-renamed-validation-admission-policy" _ "hooks/pkg/hooks/migrate-virthandler-kvm-node-labels" - _ "hooks/pkg/hooks/prevent-default-vmclasses-deletion" _ "hooks/pkg/hooks/tls-certificates-api" _ "hooks/pkg/hooks/tls-certificates-api-proxy" _ "hooks/pkg/hooks/tls-certificates-controller" diff --git a/images/hooks/pkg/hooks/install-vmclass-generic/hook.go b/images/hooks/pkg/hooks/install-vmclass-generic/hook.go new file mode 100644 index 0000000000..0991817f3e --- /dev/null +++ b/images/hooks/pkg/hooks/install-vmclass-generic/hook.go @@ -0,0 +1,326 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package install_vmclass_generic + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + + "hooks/pkg/settings" + + "github.com/deckhouse/virtualization/api/core/v1alpha2" + + "github.com/deckhouse/module-sdk/pkg" + "github.com/deckhouse/module-sdk/pkg/registry" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" +) + +const ( + moduleStateSecretSnapshot = "module-state-snapshot" + moduleStateSecretName = "module-state" + + vmClassGenericSnapshot = "vmclass-generic-snapshot" + vmClassGenericName = "generic" + + vmClassInstallationStateSecretKey = "vmClassGenericInstallation" + vmClassInstallationStateValuesPath = "virtualization.internal.moduleState." + vmClassInstallationStateSecretKey +) + +var _ = registry.RegisterFunc(config, Reconcile) + +// This hook runs before applying templates (OnBeforeHelm) to drop helm labels +// and make vmclass unmanageable. +var config = &pkg.HookConfig{ + OnBeforeHelm: &pkg.OrderedConfig{Order: 5}, + Kubernetes: []pkg.KubernetesConfig{ + { + Name: moduleStateSecretSnapshot, + APIVersion: "v1", + Kind: "Secret", + JqFilter: `{data}`, + NameSelector: &pkg.NameSelector{ + MatchNames: []string{moduleStateSecretName}, + }, + NamespaceSelector: &pkg.NamespaceSelector{ + NameSelector: &pkg.NameSelector{ + MatchNames: []string{settings.ModuleNamespace}, + }, + }, + ExecuteHookOnSynchronization: ptr.To(false), + ExecuteHookOnEvents: ptr.To(false), + }, + { + Name: vmClassGenericSnapshot, + Kind: v1alpha2.VirtualMachineClassKind, + JqFilter: `{apiVersion, kind, "metadata": ( .metadata | {name, labels, annotations, creationTimestamp} ) }`, + NameSelector: &pkg.NameSelector{ + MatchNames: []string{vmClassGenericName}, + }, + ExecuteHookOnSynchronization: ptr.To(false), + ExecuteHookOnEvents: ptr.To(false), + }, + }, + + Queue: fmt.Sprintf("modules/%s", settings.ModuleName), +} + +// Reconcile manages the state of vmclass/generic resource: +// +// - Install a new one if there is no state in the Secret indicating that the vmclass was installed earlier. +// - Removes helm related annotations and labels from existing vmclass/generic (one time operation). +// - No actions performed if user deletes or replaces vmclass/generic. +func Reconcile(_ context.Context, input *pkg.HookInput) error { + moduleState, err := parseVMClassInstallationStateFromSnapshot(input) + if err != nil { + return err + } + + // If there is a state for vmclass/generic in the Secret, no changes to vmclass is required. + // Presence of the vmclass is not important, user may delete it and it's ok. + // The important part is to copy state from the Secret into values + // to ensure correct manifest for the Secret template (there may be no state in values, e.g. after deckhouse restart). + if moduleState != nil { + input.Values.Set(vmClassInstallationStateValuesPath, vmClassInstallationState{InstalledAt: moduleState.InstalledAt}) + return nil + } + + // Corner case: the secret is gone, but the state is present in values. + // Just return without changes to vmclass/generic, so helm will re-create + // the Secret with the module state. + stateInValues := input.Values.Get(vmClassInstallationStateValuesPath) + if stateInValues.Exists() { + return nil + } + + vmClassGeneric, err := parseVMClassGenericFromSnapshot(input) + if err != nil { + return err + } + + // No state in secret, no state in values, no vmclass/generic. + // Create vmclass/generic and set state in values, as it should be initial module installation. + if vmClassGeneric == nil { + input.Logger.Info("Install VirtualMachineClass/generic") + vmClass := vmClassGenericManifest() + input.PatchCollector.Create(vmClass) + } + // No state in secret, no state in values, but vmclass/generic is present. + // Cleanup metadata if vmclass was created by earlier versions of the module. + if isManagedByModule(vmClassGeneric) { + addPatchesToCleanupMetadata(input, vmClassGeneric) + } + + // Set state in values to prevent any further updates to vmclass/generic. + input.Values.Set(vmClassInstallationStateValuesPath, vmClassInstallationState{InstalledAt: time.Now()}) + return nil +} + +type vmClassInstallationState struct { + InstalledAt time.Time `json:"installedAt"` +} + +// parseVMClassInstallationStateFromSnapshot unmarshal vmClassInstallationState from jqFilter result. +func parseVMClassInstallationStateFromSnapshot(input *pkg.HookInput) (*vmClassInstallationState, error) { + snap := input.Snapshots.Get(moduleStateSecretSnapshot) + if len(snap) < 1 { + return nil, nil + } + + var ms corev1.Secret + err := snap[0].UnmarshalTo(&ms) + if err != nil { + return nil, err + } + + stateRaw := ms.Data[vmClassInstallationStateSecretKey] + if len(stateRaw) == 0 { + return nil, nil + } + + var s vmClassInstallationState + err = json.Unmarshal(stateRaw, &s) + if err != nil { + return nil, fmt.Errorf("restore vmclass generic state from secret: %w", err) + } + + return &s, nil +} + +// parseVMClassGenericFromSnapshot unmarshal ModuleConfig from jqFilter result. +func parseVMClassGenericFromSnapshot(input *pkg.HookInput) (*v1alpha2.VirtualMachineClass, error) { + snap := input.Snapshots.Get(vmClassGenericSnapshot) + if len(snap) < 1 { + return nil, nil + } + + var vmclass v1alpha2.VirtualMachineClass + err := snap[0].UnmarshalTo(&vmclass) + if err != nil { + return nil, err + } + return &vmclass, nil +} + +// vmClassGenericManifest returns a manifest for 'generic' vmclass +// that should work for VM on every Node in cluster. +func vmClassGenericManifest() *v1alpha2.VirtualMachineClass { + return &v1alpha2.VirtualMachineClass{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1alpha2.SchemeGroupVersion.String(), + Kind: v1alpha2.VirtualMachineClassKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: vmClassGenericName, + Labels: map[string]string{ + "app": "virtualization-controller", + "module": settings.ModuleName, + }, + }, + Spec: v1alpha2.VirtualMachineClassSpec{ + CPU: v1alpha2.CPU{ + Type: v1alpha2.CPUTypeModel, + Model: "Nehalem", + }, + SizingPolicies: []v1alpha2.SizingPolicy{ + { + Cores: &v1alpha2.SizingPolicyCores{ + Min: 1, + Max: 4, + }, + DedicatedCores: []bool{false}, + CoreFractions: []v1alpha2.CoreFractionValue{5, 10, 20, 50, 100}, + }, + { + Cores: &v1alpha2.SizingPolicyCores{ + Min: 5, + Max: 8, + }, + DedicatedCores: []bool{false}, + CoreFractions: []v1alpha2.CoreFractionValue{20, 50, 100}, + }, + { + Cores: &v1alpha2.SizingPolicyCores{ + Min: 9, + Max: 16, + }, + DedicatedCores: []bool{true, false}, + CoreFractions: []v1alpha2.CoreFractionValue{50, 100}, + }, + { + Cores: &v1alpha2.SizingPolicyCores{ + Min: 17, + Max: 1024, + }, + DedicatedCores: []bool{true, false}, + CoreFractions: []v1alpha2.CoreFractionValue{100}, + }, + }, + }, + } +} + +// isManagedByModule checks if vmclass has all labels that module set when installing vmclass. +func isManagedByModule(vmClass *v1alpha2.VirtualMachineClass) bool { + if vmClass == nil { + return false + } + + expectLabels := vmClassGenericManifest().Labels + + for label, expectValue := range expectLabels { + actualValue, exists := vmClass.Labels[label] + if !exists || actualValue != expectValue { + return false + } + } + return true +} + +const ( + heritageLabel = "heritage" + helmManagedByLabel = "app.kubernetes.io/managed-by" + helmReleaseNameAnno = "meta.helm.sh/release-name" + helmReleaseNamespaceAnno = "meta.helm.sh/release-namespace" + helmKeepResourceAnno = "helm.sh/resource-policy" +) + +// addPatchesToCleanupMetadata fills patch collector with patches if vmclass metadata +// should be cleaned. +func addPatchesToCleanupMetadata(input *pkg.HookInput, vmClass *v1alpha2.VirtualMachineClass) { + var patches []map[string]interface{} + + labelNames := []string{ + heritageLabel, + helmManagedByLabel, + } + for _, labelName := range labelNames { + if _, exists := vmClass.Labels[labelName]; exists { + patches = append(patches, map[string]interface{}{ + "op": "remove", + "path": fmt.Sprintf("/metadata/labels/%s", jsonPatchEscape(labelName)), + "value": nil, + }) + } + } + + // Ensure "keep resource" annotation on vmclass/generic, so Helm will keep it + // in the cluster even that we've deleted its manifest from templates. + if _, exists := vmClass.Annotations[helmKeepResourceAnno]; !exists { + patches = append(patches, map[string]interface{}{ + "op": "add", + "path": fmt.Sprintf("/metadata/annotations/%s", jsonPatchEscape(helmKeepResourceAnno)), + "value": nil, + }) + } + + annoNames := []string{ + helmReleaseNameAnno, + helmReleaseNamespaceAnno, + } + for _, annoName := range annoNames { + if _, exists := vmClass.Annotations[annoName]; exists { + patches = append(patches, map[string]interface{}{ + "op": "remove", + "path": fmt.Sprintf("/metadata/annotations/%s", jsonPatchEscape(annoName)), + "value": nil, + }) + } + } + + if len(patches) == 0 { + return + } + + input.Logger.Info("Patch VirtualMachineClass/generic: remove Helm labels and annotations") + input.PatchCollector.PatchWithJSON( + patches, + vmClass.APIVersion, + vmClass.Kind, + vmClass.Namespace, + vmClass.Name, + ) +} + +func jsonPatchEscape(s string) string { + return strings.NewReplacer("~", "~0", "/", "~1").Replace(s) +} diff --git a/images/hooks/pkg/hooks/install-vmclass-generic/hook_test.go b/images/hooks/pkg/hooks/install-vmclass-generic/hook_test.go new file mode 100644 index 0000000000..ad702941b2 --- /dev/null +++ b/images/hooks/pkg/hooks/install-vmclass-generic/hook_test.go @@ -0,0 +1,382 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package install_vmclass_generic + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/tidwall/gjson" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/deckhouse/deckhouse/pkg/log" + "github.com/deckhouse/module-sdk/pkg" + "github.com/deckhouse/module-sdk/testing/mock" + "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +func Test_InstallVMClassGeneric(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Create Generic VMClass Suite") +} + +var _ = Describe("Install VMClass Generic hook", func() { + var ( + snapshots *mock.SnapshotsMock + values *mock.OutputPatchableValuesCollectorMock + patchCollector *mock.PatchCollectorMock + ) + + newInput := func() *pkg.HookInput { + return &pkg.HookInput{ + Snapshots: snapshots, + Values: values, + PatchCollector: patchCollector, + Logger: log.NewNop(), + } + } + + prepareStateValuesEmpty := func() { + values.GetMock.When(vmClassInstallationStateValuesPath).Then(gjson.Result{Type: gjson.Null}) + } + + prepareStateValuesInstalled := func() { + values.GetMock.When(vmClassInstallationStateValuesPath).Then(gjson.Result{ + Type: gjson.String, + Str: `{"installedAt":"2020-01-01T00:00:00Z"}`, + }) + } + + prepareModuleStateSnapshotEmpty := func() { + snapshots.GetMock.When(moduleStateSecretSnapshot).Then([]pkg.Snapshot{}) + } + + prepareModuleStateSnapshotValid := func() { + moduleStateSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "module-state", + Namespace: "d8-virtualization", + }, + Data: map[string][]byte{ + vmClassInstallationStateSecretKey: []byte(`{"installedAt":"2020-01-01T00:00:00Z"}`), + }, + } + + snapshots.GetMock.When(moduleStateSecretSnapshot).Then([]pkg.Snapshot{ + mock.NewSnapshotMock(GinkgoT()).UnmarshalToMock.Set(func(v any) error { + secret, ok := v.(*corev1.Secret) + Expect(ok).To(BeTrue()) + *secret = *moduleStateSecret + return nil + }), + }) + } + + prepareModuleStateSnapshotNoVMClassState := func() { + moduleStateSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "module-state", + Namespace: "d8-virtualization", + }, + Data: map[string][]byte{ + "other-key": []byte(`"other-value"`), + }, + } + + snapshots.GetMock.When(moduleStateSecretSnapshot).Then([]pkg.Snapshot{ + mock.NewSnapshotMock(GinkgoT()).UnmarshalToMock.Set(func(v any) error { + secret, ok := v.(*corev1.Secret) + Expect(ok).To(BeTrue()) + *secret = *moduleStateSecret + return nil + }), + }) + } + + prepareVMClassSnapshotEmpty := func() { + snapshots.GetMock.When(vmClassGenericSnapshot).Then([]pkg.Snapshot{}) + } + + prepareVMClassSnapshotGeneric := func() { + vmClass := vmClassGenericManifest().DeepCopy() + vmClass.Annotations = map[string]string{ + helmKeepResourceAnno: "keep", + } + snapshots.GetMock.When(vmClassGenericSnapshot).Then([]pkg.Snapshot{ + mock.NewSnapshotMock(GinkgoT()).UnmarshalToMock.Set(func(v any) error { + vmClassInSnapshot, ok := v.(*v1alpha2.VirtualMachineClass) + Expect(ok).To(BeTrue()) + *vmClassInSnapshot = *vmClass + return nil + }), + }) + } + + prepareVMClassSnapshotGenericWithoutKeepResource := func() { + vmClass := vmClassGenericManifest().DeepCopy() + snapshots.GetMock.When(vmClassGenericSnapshot).Then([]pkg.Snapshot{ + mock.NewSnapshotMock(GinkgoT()).UnmarshalToMock.Set(func(v any) error { + vmClassInSnapshot, ok := v.(*v1alpha2.VirtualMachineClass) + Expect(ok).To(BeTrue()) + *vmClassInSnapshot = *vmClass + return nil + }), + }) + } + + prepareVMClassSnapshotCustom := func() { + vmClass := vmClassGenericManifest().DeepCopy() + vmClass.Labels = map[string]string{ + "created-by": "user", + } + vmClass.Annotations = nil + snapshots.GetMock.When(vmClassGenericSnapshot).Then([]pkg.Snapshot{ + mock.NewSnapshotMock(GinkgoT()).UnmarshalToMock.Set(func(v any) error { + vmClassInSnapshot, ok := v.(*v1alpha2.VirtualMachineClass) + Expect(ok).To(BeTrue()) + *vmClassInSnapshot = *vmClass + return nil + }), + }) + } + + prepareVMClassSnapshotGenericHelmManaged := func() { + vmClass := vmClassGenericManifest().DeepCopy() + // Keep app, heritage, and module labels. + vmClass.Labels[helmManagedByLabel] = "Helm" + vmClass.Annotations = map[string]string{ + helmReleaseNameAnno: "somename", + helmReleaseNamespaceAnno: "some ns", + } + snapshots.GetMock.When(vmClassGenericSnapshot).Then([]pkg.Snapshot{ + mock.NewSnapshotMock(GinkgoT()).UnmarshalToMock.Set(func(v any) error { + vmClassInSnapshot, ok := v.(*v1alpha2.VirtualMachineClass) + Expect(ok).To(BeTrue()) + *vmClassInSnapshot = *vmClass + return nil + }), + }) + } + + prepareVMClassSnapshotGenericCustomHelmManaged := func() { + vmClass := vmClassGenericManifest().DeepCopy() + vmClass.Labels = map[string]string{ + "created-by": "user", + helmManagedByLabel: "Helm", + } + vmClass.Annotations = map[string]string{ + helmReleaseNameAnno: "somename", + helmReleaseNamespaceAnno: "some ns", + } + snapshots.GetMock.When(vmClassGenericSnapshot).Then([]pkg.Snapshot{ + mock.NewSnapshotMock(GinkgoT()).UnmarshalToMock.Set(func(v any) error { + vmClassInSnapshot, ok := v.(*v1alpha2.VirtualMachineClass) + Expect(ok).To(BeTrue()) + *vmClassInSnapshot = *vmClass + return nil + }), + }) + } + + expectVMClassGeneric := func(obj interface{}) { + GinkgoHelper() + vmClass, ok := obj.(*v1alpha2.VirtualMachineClass) + Expect(ok).To(BeTrue()) + Expect(vmClass.Name).To(Equal("generic")) + Expect(vmClass.Labels).To(Equal(map[string]string{ + "app": "virtualization-controller", + "module": "virtualization", + })) + } + + BeforeEach(func() { + snapshots = mock.NewSnapshotsMock(GinkgoT()) + values = mock.NewPatchableValuesCollectorMock(GinkgoT()) + patchCollector = mock.NewPatchCollectorMock(GinkgoT()) + }) + + AfterEach(func() { + snapshots = nil + values = nil + patchCollector = nil + }) + + When("module-state secret has the vmclass state", func() { + It("should set values and not recreate or patch vmclass/generic", func() { + prepareModuleStateSnapshotValid() + + patchCollector.CreateMock.Optional() + patchCollector.PatchWithJSONMock.Optional() + values.SetMock.Return() + + Expect(Reconcile(context.Background(), newInput())).To(Succeed()) + Expect(patchCollector.CreateMock.Calls()).To(HaveLen(0)) + Expect(patchCollector.PatchWithJSONMock.Calls()).To(HaveLen(0)) + Expect(values.SetMock.Calls()).To(HaveLen(1), "should set values from the Secret") + }) + }) + + When("no module-state secret and no vmclass", func() { + BeforeEach(func() { + prepareModuleStateSnapshotEmpty() + }) + + When("no state in values and no vmclass", func() { + It("should create vmclass/generic and set values", func() { + prepareVMClassSnapshotEmpty() + prepareStateValuesEmpty() + + values.SetMock.Return() + patchCollector.CreateMock.Set(expectVMClassGeneric) + + Expect(Reconcile(context.Background(), newInput())).To(Succeed()) + Expect(patchCollector.CreateMock.Calls()).To(HaveLen(1), "should call Create once") + Expect(values.SetMock.Calls()).To(HaveLen(1), "should call values.Set once") + }) + }) + When("state is present in values", func() { + It("should not create vmclass/generic ans set values", func() { + prepareStateValuesInstalled() + + values.SetMock.Optional() + patchCollector.CreateMock.Optional() + + Expect(Reconcile(context.Background(), newInput())).To(Succeed()) + Expect(patchCollector.CreateMock.Calls()).To(HaveLen(0)) + Expect(values.SetMock.Calls()).To(HaveLen(0)) + }) + }) + }) + + When("module-state secret is present without vmclass state", func() { + BeforeEach(func() { + prepareModuleStateSnapshotNoVMClassState() + }) + + When("state is in values", func() { + It("should not change vmclass/generic", func() { + prepareStateValuesInstalled() + + values.SetMock.Optional() + patchCollector.CreateMock.Optional() + patchCollector.PatchWithJSONMock.Optional() + + Expect(Reconcile(context.Background(), newInput())).To(Succeed()) + Expect(patchCollector.CreateMock.Calls()).To(HaveLen(0)) + Expect(patchCollector.PatchWithJSONMock.Calls()).To(HaveLen(0)) + Expect(values.SetMock.Calls()).To(HaveLen(0)) + }) + }) + + When("no state in values", func() { + BeforeEach(func() { + prepareStateValuesEmpty() + }) + + When("no vmclass/generic", func() { + It("should create vmclass/generic and set values", func() { + prepareVMClassSnapshotEmpty() + + values.SetMock.Return() + patchCollector.CreateMock.Set(expectVMClassGeneric) + + Expect(Reconcile(context.Background(), newInput())).To(Succeed()) + Expect(patchCollector.CreateMock.Calls()).To(HaveLen(1)) + Expect(values.SetMock.Calls()).To(HaveLen(1)) + }) + }) + + When("vmclass/generic is present", func() { + It("should not change vmclass/generic and set values", func() { + prepareVMClassSnapshotGeneric() + + values.SetMock.Return() + patchCollector.CreateMock.Optional() + patchCollector.PatchWithJSONMock.Optional() + + Expect(Reconcile(context.Background(), newInput())).To(Succeed()) + Expect(patchCollector.CreateMock.Calls()).To(HaveLen(0)) + Expect(patchCollector.PatchWithJSONMock.Calls()).To(HaveLen(0)) + Expect(values.SetMock.Calls()).To(HaveLen(1)) + }) + }) + + When("vmclass/generic without keep-resource annotation is present", func() { + It("should not change vmclass/generic and set values", func() { + prepareVMClassSnapshotGenericWithoutKeepResource() + + values.SetMock.Return() + patchCollector.CreateMock.Optional() + patchCollector.PatchWithJSONMock.Return() + + Expect(Reconcile(context.Background(), newInput())).To(Succeed()) + Expect(patchCollector.CreateMock.Calls()).To(HaveLen(0)) + Expect(patchCollector.PatchWithJSONMock.Calls()).To(HaveLen(1)) + Expect(values.SetMock.Calls()).To(HaveLen(1)) + }) + }) + + When("vmclass/generic has helm label", func() { + It("should set values and remove helm labels", func() { + prepareVMClassSnapshotGenericHelmManaged() + + patchCollector.CreateMock.Optional() + patchCollector.PatchWithJSONMock.Return() + values.SetMock.Return() + + Expect(Reconcile(context.Background(), newInput())).To(Succeed()) + Expect(patchCollector.CreateMock.Calls()).To(HaveLen(0)) + Expect(patchCollector.PatchWithJSONMock.Calls()).To(HaveLen(1)) + Expect(values.SetMock.Calls()).To(HaveLen(1), "should set values from the Secret") + }) + }) + + When("custom vmclass/generic is present", func() { + It("should set values and not patch vmclass/generic", func() { + prepareVMClassSnapshotCustom() + + patchCollector.CreateMock.Optional() + patchCollector.PatchWithJSONMock.Optional() + values.SetMock.Return() + + Expect(Reconcile(context.Background(), newInput())).To(Succeed()) + Expect(patchCollector.CreateMock.Calls()).To(HaveLen(0)) + Expect(patchCollector.PatchWithJSONMock.Calls()).To(HaveLen(0)) + Expect(values.SetMock.Calls()).To(HaveLen(1), "should set values from the Secret") + }) + }) + + When("custom vmclass/generic has helm label", func() { + It("should set values and not remove helm values", func() { + prepareVMClassSnapshotGenericCustomHelmManaged() + + patchCollector.CreateMock.Optional() + patchCollector.PatchWithJSONMock.Optional() + values.SetMock.Return() + + Expect(Reconcile(context.Background(), newInput())).To(Succeed()) + Expect(patchCollector.CreateMock.Calls()).To(HaveLen(0)) + Expect(patchCollector.PatchWithJSONMock.Calls()).To(HaveLen(0)) + Expect(values.SetMock.Calls()).To(HaveLen(1), "should set values from the Secret") + }) + }) + }) + }) +}) diff --git a/images/hooks/pkg/hooks/prevent-default-vmclasses-deletion/hook.go b/images/hooks/pkg/hooks/prevent-default-vmclasses-deletion/hook.go deleted file mode 100644 index a47b422f38..0000000000 --- a/images/hooks/pkg/hooks/prevent-default-vmclasses-deletion/hook.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2025 Flant JSC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package prevent_default_vmclasses_deletion - -import ( - "context" - "fmt" - - "hooks/pkg/settings" - - "github.com/deckhouse/virtualization/api/core" - "github.com/deckhouse/virtualization/api/core/v1alpha2" - - "github.com/deckhouse/module-sdk/pkg" - "github.com/deckhouse/module-sdk/pkg/registry" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - removePassthroughHookName = "Prevent default VirtualMachineClasses deletion" - removePassthroughHookJQFilter = `.metadata` - // see https://helm.sh/docs/howto/charts_tips_and_tricks/#tell-helm-not-to-uninstall-a-resource - helmResourcePolicyKey = "helm.sh/resource-policy" - helmResourcePolicyKeep = "keep" - apiVersion = core.GroupName + "/" + v1alpha2.Version -) - -var _ = registry.RegisterFunc(config, Reconcile) - -var config = &pkg.HookConfig{ - OnBeforeHelm: &pkg.OrderedConfig{Order: 10}, - Kubernetes: []pkg.KubernetesConfig{ - { - Name: removePassthroughHookName, - APIVersion: apiVersion, - Kind: v1alpha2.VirtualMachineClassKind, - JqFilter: removePassthroughHookJQFilter, - - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "module": settings.ModuleName, - }, - }, - }, - }, - - Queue: fmt.Sprintf("modules/%s", settings.ModuleName), -} - -func Reconcile(_ context.Context, input *pkg.HookInput) error { - vmcs := input.Snapshots.Get(removePassthroughHookName) - - if len(vmcs) == 0 { - input.Logger.Info("No VirtualMachineClasses found, nothing to do") - return nil - } - - for _, vmc := range vmcs { - metadata := &metav1.ObjectMeta{} - if err := vmc.UnmarshalTo(metadata); err != nil { - input.Logger.Error(fmt.Sprintf("Failed to unmarshal metadata VirtualMachineClasses %v", err)) - } - - policy := metadata.GetAnnotations()[helmResourcePolicyKey] - if policy == helmResourcePolicyKeep { - input.Logger.Info(fmt.Sprintf("VirtualMachineClass %s already has helm.sh/resource-policy=keep", metadata.Name)) - continue - } - - op := "add" - if policy != "" { - op = "replace" - input.Logger.Info(fmt.Sprintf("VirtualMachineClass %s has helm.sh/resource-policy=%s, will be replaced with helm.sh/resource-policy=keep", metadata.Name, policy)) - } - patch := []interface{}{ - map[string]string{ - "op": op, - "path": "/metadata/annotations/helm.sh~1resource-policy", - "value": helmResourcePolicyKeep, - }, - } - input.PatchCollector.JSONPatch(patch, apiVersion, v1alpha2.VirtualMachineClassKind, "", metadata.Name) - input.Logger.Info(fmt.Sprintf("Added helm.sh/resource-policy=keep to VirtualMachineClass %s", metadata.Name)) - } - - return nil -} diff --git a/openapi/values.yaml b/openapi/values.yaml index a4da0203c7..b93ec2c56c 100644 --- a/openapi/values.yaml +++ b/openapi/values.yaml @@ -138,3 +138,7 @@ properties: properties: error: type: string + moduleState: + type: object + default: {} + additionalProperties: true diff --git a/templates/module-state-secret.yaml b/templates/module-state-secret.yaml new file mode 100644 index 0000000000..9397b7862b --- /dev/null +++ b/templates/module-state-secret.yaml @@ -0,0 +1,14 @@ +{{- if hasKey .Values.virtualization.internal "moduleState" }} +--- +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: module-state + namespace: d8-{{ $.Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "d8-control-plane-manager")) | nindent 2 }} +data: +{{- range $k, $v := .Values.virtualization.internal.moduleState }} + {{$k}}: {{$v | toJson | b64enc | quote}} +{{- end }} +{{- end }} diff --git a/templates/virtualization-controller/vmclasses-default.yaml b/templates/virtualization-controller/vmclasses-default.yaml deleted file mode 100644 index f2beb27764..0000000000 --- a/templates/virtualization-controller/vmclasses-default.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: virtualization.deckhouse.io/v1alpha2 -kind: VirtualMachineClass -metadata: - name: generic - {{- include "helm_lib_module_labels" (list . (dict "app" "virtualization-controller")) | nindent 2 }} -spec: - nodeSelector: - matchExpressions: - - key: node-role.kubernetes.io/control-plane - operator: DoesNotExist - cpu: - type: Model - model: Nehalem - sizingPolicies: - - cores: - min: 1 - max: 4 - dedicatedCores: [false] - coreFractions: [5, 10, 20, 50, 100] - - cores: - min: 5 - max: 8 - dedicatedCores: [false] - coreFractions: [20, 50, 100] - - cores: - min: 9 - max: 16 - dedicatedCores: [true, false] - coreFractions: [50, 100] - - cores: - min: 17 - max: 1024 - dedicatedCores: [true, false] - coreFractions: [100] diff --git a/tools/kubeconform/fixtures/module-values.yaml b/tools/kubeconform/fixtures/module-values.yaml index c817ddfd6b..9bb986b7c2 100644 --- a/tools/kubeconform/fixtures/module-values.yaml +++ b/tools/kubeconform/fixtures/module-values.yaml @@ -394,6 +394,7 @@ virtualization: - 10.0.10.0/24 - 10.0.20.0/24 - 10.0.30.0/24 + moduleState: {} virtConfig: phase: Deployed parallelMigrationsPerCluster: 2 From 54d0083f878192d7578863a493efc09382ce9c7e Mon Sep 17 00:00:00 2001 From: Pavel Tishkov Date: Tue, 18 Nov 2025 18:31:55 +0300 Subject: [PATCH 7/7] chore(module): switched metrics scraping for the virtualization-controller from scrapeconfig to servicemonitor (#1651) switched metrics scraping for the virtualization-controller from scrapeconfig to servicemonitor --------- Signed-off-by: Pavel Tishkov --- ...irtual-machine.json => propagated-vm.json} | 76 +++++----- ...tual-machines.json => propagated-vms.json} | 136 +++++++++++------- .../virtualization-controller.yaml | 4 +- .../scrape-config.yaml | 22 --- .../service-monitor.yaml | 35 +++++ werf.yaml | 1 + 6 files changed, 161 insertions(+), 113 deletions(-) rename monitoring/grafana-dashboards/main/{virtual-machine.json => propagated-vm.json} (94%) rename monitoring/grafana-dashboards/main/{virtual-machines.json => propagated-vms.json} (95%) delete mode 100644 templates/virtualization-controller/scrape-config.yaml create mode 100644 templates/virtualization-controller/service-monitor.yaml diff --git a/monitoring/grafana-dashboards/main/virtual-machine.json b/monitoring/grafana-dashboards/main/propagated-vm.json similarity index 94% rename from monitoring/grafana-dashboards/main/virtual-machine.json rename to monitoring/grafana-dashboards/main/propagated-vm.json index 9cda40f641..c74d8f2df9 100644 --- a/monitoring/grafana-dashboards/main/virtual-machine.json +++ b/monitoring/grafana-dashboards/main/propagated-vm.json @@ -158,7 +158,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "sum by (name) ((d8_virtualization_virtualmachine_status_phase{namespace=\"$namespace\", name=\"$name\"} == 1) * on(phase) group_left()\n (\n label_replace(vector(1), \"phase\", \"Degraded\", \"\", \"\") or\n label_replace(vector(2), \"phase\", \"Migrating\", \"\", \"\") or\n label_replace(vector(3), \"phase\", \"Pause\", \"\", \"\") or\n label_replace(vector(4), \"phase\", \"Pending\", \"\", \"\") or\n label_replace(vector(5), \"phase\", \"Running\", \"\", \"\") or\n label_replace(vector(6), \"phase\", \"Starting\", \"\", \"\") or\n label_replace(vector(7), \"phase\", \"Stopped\", \"\", \"\") or\n label_replace(vector(8), \"phase\", \"Stopping\", \"\", \"\") or\n label_replace(vector(9), \"phase\", \"Terminating\", \"\", \"\")\n ))", + "expr": "avg by (name) (\n sum by (name,pod) (\n (d8_virtualization_virtualmachine_status_phase{namespace=\"$namespace\", name=\"$name\"} == 1) * on(phase) group_left()\n (\n label_replace(vector(1), \"phase\", \"Degraded\", \"\", \"\") or\n label_replace(vector(2), \"phase\", \"Migrating\", \"\", \"\") or\n label_replace(vector(3), \"phase\", \"Pause\", \"\", \"\") or\n label_replace(vector(4), \"phase\", \"Pending\", \"\", \"\") or\n label_replace(vector(5), \"phase\", \"Running\", \"\", \"\") or\n label_replace(vector(6), \"phase\", \"Starting\", \"\", \"\") or\n label_replace(vector(7), \"phase\", \"Stopped\", \"\", \"\") or\n label_replace(vector(8), \"phase\", \"Stopping\", \"\", \"\") or\n label_replace(vector(9), \"phase\", \"Terminating\", \"\", \"\")\n ))\n)", "instant": true, "legendFormat": "{{name}}", "range": false, @@ -502,7 +502,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "sum(\n sum by (name, namespace) (\n d8_virtualization_virtualdisk_capacity_bytes{namespace=~\"$namespace\"} * on (name,namespace,uid) d8_virtualization_virtualdisk_status_in_use{virtualmachine=~\"$name\"}\n )\n)\n", + "expr": "sum(\n avg by (name, namespace)(\n d8_virtualization_virtualdisk_capacity_bytes{namespace=~\"$namespace\"} \n * on (name,namespace,pod) d8_virtualization_virtualdisk_status_in_use{virtualmachine=~\"$name\"}\n )\n)\n", "instant": true, "legendFormat": "__auto", "range": false, @@ -574,7 +574,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "sum by (name, namespace) (\n d8_virtualization_virtualmachine_filesystem_used_bytes{namespace=\"$namespace\", name=\"$name\", type!=\"cloudinit\"})\n/ \nsum by (name, namespace) (d8_virtualization_virtualmachine_filesystem_capacity_bytes / 100)\n\n", + "expr": "avg by (name,namespace) (\n sum by (name, namespace,pod) (\n d8_virtualization_virtualmachine_filesystem_used_bytes{namespace=\"$namespace\", name=\"$name\", type!=\"cloudinit\",mount_point!~\".*(io.containerd.snapshotter.v1.erofs|/var/lib/kubelet/pods).*\"})\n / \n sum by (name, namespace,pod) (d8_virtualization_virtualmachine_filesystem_capacity_bytes / 100)\n)\n", "instant": true, "legendFormat": "{{mount_point}}", "range": false, @@ -637,7 +637,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "sum by (name, namespace) (\n d8_virtualization_virtualmachine_filesystem_capacity_bytes{namespace=\"$namespace\", name=\"$name\", type!=\"cloudinit\"}\n)\n", + "expr": "avg by (name,namespace) (\n sum by (name, namespace,pod) (\n d8_virtualization_virtualmachine_filesystem_capacity_bytes{namespace=\"$namespace\", name=\"$name\", type!=\"cloudinit\",mount_point!~\".*(io.containerd.snapshotter.v1.erofs|/var/lib/kubelet/pods).*\"}\n )\n)\n", "instant": true, "legendFormat": "Capacity", "range": false, @@ -650,7 +650,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "sum by (name, namespace) (\n d8_virtualization_virtualmachine_filesystem_used_bytes{namespace=\"$namespace\", name=\"$name\", type!=\"cloudinit\"}\n)\n", + "expr": "avg by (name,namespace) (\n sum by (name, namespace) (\n d8_virtualization_virtualmachine_filesystem_used_bytes{namespace=\"$namespace\", name=\"$name\", type!=\"cloudinit\",mount_point!~\".*(io.containerd.snapshotter.v1.erofs|/var/lib/kubelet/pods).*\"}\n )\n)", "hide": false, "instant": true, "legendFormat": "Used", @@ -734,7 +734,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "sum by (name) (d8_virtualization_virtualmachine_agent_ready{namespace=\"$namespace\", name=\"$name\"})", + "expr": "avg by (name) (\n count by (name,pod) (\n d8_virtualization_virtualmachine_agent_ready{namespace=\"$namespace\", name=\"$name\"}\n )\n)\n", "instant": true, "legendFormat": "__auto", "range": false, @@ -817,7 +817,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "sum by (name) (\n last_over_time(\n d8_virtualization_virtualmachine_configuration_applied{namespace=\"$namespace\", name=\"$name\"}[1m]\n )\n)", + "expr": "avg by (name) (\n count by (name,pod) (\n d8_virtualization_virtualmachine_configuration_applied{namespace=\"$namespace\", name=\"$name\"}\n )\n)\n", "instant": true, "legendFormat": "__auto", "range": false, @@ -1106,7 +1106,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "sum by (namespace,name) (rate(d8_virtualization_virtualmachine_cpu_usage_milliseconds_total{namespace=\"$namespace\", name=\"$name\"}[$__rate_interval])) / 1000 \n/\n(\n sum by (namespace,name) (d8_virtualization_virtualmachine_cpu_cores{namespace=\"$namespace\", name=\"$name\"}) / 100\n)", + "expr": "sum by (namespace,name) (\n rate(\n d8_virtualization_virtualmachine_cpu_usage_milliseconds_total{namespace=\"$namespace\", name=\"$name\"}[$__rate_interval]\n )\n) / 1000 \n/\n(\n avg by (namespace,name) (\n sum by (namespace,name,pod) (d8_virtualization_virtualmachine_cpu_cores{namespace=\"$namespace\", name=\"$name\"}) / 100\n )\n)", "hide": false, "instant": false, "legendFormat": "OS Usage", @@ -1119,7 +1119,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "(\n sum by (namespace,name) (rate(d8_virtualization_virtualmachine_hypervisor_cpu_usage_milliseconds_total{namespace=\"$namespace\", name=\"$name\"}[$__rate_interval])) / 1000 \n)\n/\n(\n sum by (namespace,name) (d8_virtualization_virtualmachine_cpu_cores{namespace=\"$namespace\", name=\"$name\"}) / 100\n)\n", + "expr": "(\n sum by (namespace,name) (\n rate(\n d8_virtualization_virtualmachine_hypervisor_cpu_usage_milliseconds_total{namespace=\"$namespace\", name=\"$name\"}[$__rate_interval]\n )\n ) / 1000 \n)\n/\n(\n avg by (namespace,name) (\n sum by (namespace,name,pod) (\n d8_virtualization_virtualmachine_cpu_cores{namespace=\"$namespace\", name=\"$name\"}\n ) / 100\n )\n)\n", "hide": false, "instant": false, "legendFormat": "Hypervisor overhead", @@ -1132,7 +1132,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "(\n sum by (namespace,name) ((d8_virtualization_virtualmachine_cpu_cores{namespace=\"$namespace\", name=\"$name\"} * d8_virtualization_virtualmachine_cpu_core_fraction) / 100)\n)\n/\n(\n sum by (namespace,name) (d8_virtualization_virtualmachine_cpu_cores{namespace=\"$namespace\", name=\"$name\"}) / 100\n)\n", + "expr": "avg by (namespace,name) (\n (\n sum by (namespace,name,pod) (\n (\n d8_virtualization_virtualmachine_cpu_cores{namespace=\"$namespace\", name=\"$name\"} \n * \n d8_virtualization_virtualmachine_cpu_core_fraction\n ) / 100\n )\n )\n /\n (\n sum by (namespace,name,pod) (\n d8_virtualization_virtualmachine_cpu_cores{namespace=\"$namespace\", name=\"$name\"}\n ) / 100\n )\n)\n\n", "hide": false, "instant": false, "legendFormat": "Reservation", @@ -1656,7 +1656,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "sum by (name) (d8_virtualization_virtualmachine_configuration_applied{namespace=\"$namespace\", name=\"$name\"})", + "expr": "avg by (name) (\n count by (name,pod) (\n d8_virtualization_virtualmachine_configuration_applied{namespace=\"$namespace\", name=\"$name\"}\n )\n)\n", "instant": false, "legendFormat": "__auto", "range": true, @@ -1792,7 +1792,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "sum by (name) ((d8_virtualization_virtualmachine_status_phase{namespace=\"$namespace\", name=\"$name\"} == 1) * on(phase) group_left()\n (\n label_replace(vector(1), \"phase\", \"Degraded\", \"\", \"\") or\n label_replace(vector(2), \"phase\", \"Migrating\", \"\", \"\") or\n label_replace(vector(3), \"phase\", \"Pause\", \"\", \"\") or\n label_replace(vector(4), \"phase\", \"Pending\", \"\", \"\") or\n label_replace(vector(5), \"phase\", \"Running\", \"\", \"\") or\n label_replace(vector(6), \"phase\", \"Starting\", \"\", \"\") or\n label_replace(vector(7), \"phase\", \"Stopped\", \"\", \"\") or\n label_replace(vector(8), \"phase\", \"Stopping\", \"\", \"\") or\n label_replace(vector(9), \"phase\", \"Terminating\", \"\", \"\")\n ))", + "expr": "avg by (name) (\n sum by (name,pod) (\n (d8_virtualization_virtualmachine_status_phase{namespace=\"$namespace\", name=\"$name\"} == 1) * on(phase) group_left()\n (\n label_replace(vector(1), \"phase\", \"Degraded\", \"\", \"\") or\n label_replace(vector(2), \"phase\", \"Migrating\", \"\", \"\") or\n label_replace(vector(3), \"phase\", \"Pause\", \"\", \"\") or\n label_replace(vector(4), \"phase\", \"Pending\", \"\", \"\") or\n label_replace(vector(5), \"phase\", \"Running\", \"\", \"\") or\n label_replace(vector(6), \"phase\", \"Starting\", \"\", \"\") or\n label_replace(vector(7), \"phase\", \"Stopped\", \"\", \"\") or\n label_replace(vector(8), \"phase\", \"Stopping\", \"\", \"\") or\n label_replace(vector(9), \"phase\", \"Terminating\", \"\", \"\")\n ))\n)", "instant": false, "legendFormat": "{{name}}", "range": true, @@ -2052,7 +2052,7 @@ "h": 8, "w": 12, "x": 0, - "y": 23 + "y": 37 }, "id": 69, "options": { @@ -2106,7 +2106,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "sum by (namespace,name) ((d8_virtualization_virtualmachine_cpu_cores{namespace=\"$namespace\", name=\"$name\"} * d8_virtualization_virtualmachine_cpu_core_fraction) / 100)\n", + "expr": "avg by (namespace,name) (\n sum by (namespace,name,pod) (\n (\n d8_virtualization_virtualmachine_cpu_cores{namespace=\"$namespace\", name=\"$name\"} \n * \n d8_virtualization_virtualmachine_cpu_core_fraction\n ) / 100\n )\n)\n\n", "hide": false, "instant": false, "legendFormat": "Guaranteed cores", @@ -2119,7 +2119,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "sum by (namespace,name) (d8_virtualization_virtualmachine_cpu_cores{namespace=\"$namespace\", name=\"$name\"})\n", + "expr": "avg by (namespace,name) (\n sum by (namespace,name,pod) (\n d8_virtualization_virtualmachine_cpu_cores{namespace=\"$namespace\", name=\"$name\"}\n )\n)", "hide": false, "instant": false, "legendFormat": "Current cores", @@ -2229,7 +2229,7 @@ "h": 8, "w": 12, "x": 12, - "y": 23 + "y": 37 }, "id": 51, "options": { @@ -2257,7 +2257,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "sum by (namespace,name) ((d8_virtualization_virtualmachine_cpu_cores{namespace=\"$namespace\", name=\"$name\"} * d8_virtualization_virtualmachine_cpu_core_fraction) / 100)\n", + "expr": "avg by (namespace,name) (\n sum by (namespace,name,pod) (\n (\n d8_virtualization_virtualmachine_cpu_cores{namespace=\"$namespace\", name=\"$name\"} \n * \n d8_virtualization_virtualmachine_cpu_core_fraction\n ) / 100\n )\n)", "hide": false, "instant": false, "legendFormat": "Current cores guaranteed ", @@ -2270,7 +2270,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "sum by (namespace,name) ((d8_virtualization_virtualmachine_configuration_cpu_cores{namespace=\"$namespace\", name=\"$name\"} * d8_virtualization_virtualmachine_configuration_cpu_core_fraction) / 100)\n", + "expr": "avg by (namespace,name) (\n sum by (namespace,name,pod) (\n (\n d8_virtualization_virtualmachine_configuration_cpu_cores{namespace=\"$namespace\", name=\"$name\"} \n * \n d8_virtualization_virtualmachine_configuration_cpu_core_fraction\n ) / 100\n )\n)", "hide": false, "instant": false, "legendFormat": "Configured cores guaranteed ", @@ -2283,7 +2283,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "sum by (namespace,name) (d8_virtualization_virtualmachine_configuration_cpu_cores{namespace=\"$namespace\", name=\"$name\"})\n", + "expr": "avg by (namespace,name) (\n sum by (namespace,name,pod) (\n d8_virtualization_virtualmachine_configuration_cpu_cores{namespace=\"$namespace\", name=\"$name\"}\n )\n)", "hide": false, "instant": false, "legendFormat": "Configured cores", @@ -2296,7 +2296,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "sum by (namespace,name) (d8_virtualization_virtualmachine_cpu_cores{namespace=\"$namespace\", name=\"$name\"})\n", + "expr": "avg by (namespace,name) (\n sum by (namespace,name,pod) (\n d8_virtualization_virtualmachine_cpu_cores{namespace=\"$namespace\", name=\"$name\"}\n )\n)", "hide": false, "instant": false, "legendFormat": "Current cores", @@ -2432,7 +2432,7 @@ "h": 8, "w": 24, "x": 0, - "y": 31 + "y": 45 }, "id": 72, "options": { @@ -2486,7 +2486,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "sum by (namespace,name) ((d8_virtualization_virtualmachine_cpu_cores{namespace=\"$namespace\", name=\"$name\"} * d8_virtualization_virtualmachine_cpu_core_fraction) / 100)\n", + "expr": "avg by (namespace,name) (\nsum by (namespace,name,pod) ((d8_virtualization_virtualmachine_cpu_cores{namespace=\"$namespace\", name=\"$name\"} * d8_virtualization_virtualmachine_cpu_core_fraction) / 100)\n)", "hide": false, "instant": false, "legendFormat": "Guaranteed cores", @@ -2499,7 +2499,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "sum by (namespace,name) (d8_virtualization_virtualmachine_cpu_cores{namespace=\"$namespace\", name=\"$name\"})\n", + "expr": "avg by (namespace,name) (\nsum by (namespace,name,pod) (d8_virtualization_virtualmachine_cpu_cores{namespace=\"$namespace\", name=\"$name\"})\n)", "hide": false, "instant": false, "legendFormat": "Current cores", @@ -3226,7 +3226,7 @@ "h": 12, "w": 24, "x": 0, - "y": 25 + "y": 55 }, "id": 83, "options": { @@ -3252,7 +3252,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "d8_virtualization_virtualdisk_capacity_bytes{namespace=~\"$namespace\"} * on (name,uid,namespace) d8_virtualization_virtualdisk_status_in_use{virtualmachine=~\"$name\"}", + "expr": "avg by (namespace,name) (\nd8_virtualization_virtualdisk_capacity_bytes{namespace=~\"$namespace\"} * on (name,pod,namespace) d8_virtualization_virtualdisk_status_in_use{virtualmachine=~\"$name\"}\n)", "hide": false, "instant": false, "legendFormat": "{{name}}", @@ -3367,7 +3367,7 @@ "h": 14, "w": 12, "x": 0, - "y": 37 + "y": 67 }, "id": 44, "maxPerRow": 2, @@ -3505,7 +3505,7 @@ "h": 8, "w": 12, "x": 0, - "y": 60 + "y": 82 }, "id": 7, "options": { @@ -3607,7 +3607,7 @@ "h": 8, "w": 12, "x": 12, - "y": 60 + "y": 82 }, "id": 8, "options": { @@ -3709,7 +3709,7 @@ "h": 8, "w": 12, "x": 0, - "y": 68 + "y": 90 }, "id": 12, "options": { @@ -3811,7 +3811,7 @@ "h": 8, "w": 12, "x": 12, - "y": 68 + "y": 90 }, "id": 11, "options": { @@ -3913,7 +3913,7 @@ "h": 8, "w": 12, "x": 0, - "y": 76 + "y": 98 }, "id": 10, "options": { @@ -4015,7 +4015,7 @@ "h": 8, "w": 12, "x": 12, - "y": 76 + "y": 98 }, "id": 9, "options": { @@ -4691,8 +4691,8 @@ { "current": { "selected": false, - "text": "bunch-00", - "value": "bunch-00" + "text": "default", + "value": "default" }, "datasource": { "type": "prometheus", @@ -4719,8 +4719,8 @@ { "current": { "selected": false, - "text": "vm-001", - "value": "vm-001" + "text": "node", + "value": "node" }, "datasource": { "type": "prometheus", @@ -4781,7 +4781,7 @@ "type": "prometheus", "uid": "${ds_prometheus}" }, - "definition": "label_values(d8_virtualization_virtualmachine_filesystem_capacity_bytes{namespace=\"$namespace\", name=\"$name\"},mount_point)", + "definition": "label_values(d8_virtualization_virtualmachine_filesystem_capacity_bytes{namespace=\"$namespace\", name=\"$name\", mount_point!~\".*(/var/lib/containerd/io.containerd.snapshotter.v1.erofs|/var/lib/kubelet/pods).*\"},mount_point)", "hide": 2, "includeAll": true, "label": "Mountpoint", @@ -4790,7 +4790,7 @@ "options": [], "query": { "qryType": 1, - "query": "label_values(d8_virtualization_virtualmachine_filesystem_capacity_bytes{namespace=\"$namespace\", name=\"$name\"},mount_point)", + "query": "label_values(d8_virtualization_virtualmachine_filesystem_capacity_bytes{namespace=\"$namespace\", name=\"$name\", mount_point!~\".*(/var/lib/containerd/io.containerd.snapshotter.v1.erofs|/var/lib/kubelet/pods).*\"},mount_point)", "refId": "PrometheusVariableQueryEditor-VariableQuery" }, "refresh": 2, diff --git a/monitoring/grafana-dashboards/main/virtual-machines.json b/monitoring/grafana-dashboards/main/propagated-vms.json similarity index 95% rename from monitoring/grafana-dashboards/main/virtual-machines.json rename to monitoring/grafana-dashboards/main/propagated-vms.json index 022743eb06..b375d7d5d8 100644 --- a/monitoring/grafana-dashboards/main/virtual-machines.json +++ b/monitoring/grafana-dashboards/main/propagated-vms.json @@ -128,7 +128,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "count (d8_virtualization_virtualmachine_status_phase{namespace=~\"$namespace\"}==1)", + "expr": "avg by (namespace) (\n count by (namespace,pod) (\n d8_virtualization_virtualmachine_status_phase{namespace=~\"$namespace\"}==1\n )\n) or absent(d8_virtualization_virtualmachine_status_phase{namespace=~\"$namespace\"}==1) * vector(0)", "hide": false, "instant": true, "legendFormat": "Total VM count", @@ -142,7 +142,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "count by (phase) (d8_virtualization_virtualmachine_status_phase{namespace=~\"$namespace\"}==1)", + "expr": "avg by (namespace) (\n count by (namespace,pod) (\n d8_virtualization_virtualmachine_status_phase{namespace=~\"$namespace\"}==1\n )\n) or absent(d8_virtualization_virtualmachine_status_phase{namespace=~\"$namespace\"}==1) * vector(0)\n\n", "instant": true, "legendFormat": "__auto", "range": false, @@ -155,7 +155,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "count (d8_virtualization_virtualmachine_agent_ready{namespace=~\"$namespace\"}==1) or vector(0)", + "expr": "avg by (namespace) (\n count by (namespace,pod) (\n d8_virtualization_virtualmachine_agent_ready{namespace=~\"$namespace\"}==1\n ) \n) or absent(d8_virtualization_virtualmachine_agent_ready{namespace=~\"$namespace\"}==1) * vector(0)\n", "hide": false, "instant": true, "legendFormat": "Agent ready", @@ -287,7 +287,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "count (d8_virtualization_virtualdisk_status_phase{namespace=~\"$namespace\"}==1)", + "expr": "avg by (namespace) (\n count by (namespace,pod) (\n d8_virtualization_virtualdisk_status_phase{namespace=~\"$namespace\"}==1\n )\n)", "hide": false, "instant": true, "legendFormat": "Total disk count", @@ -301,7 +301,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "count by (phase) (d8_virtualization_virtualdisk_status_phase{namespace=~\"$namespace\"}==1)", + "expr": "avg by (namespace) (\n count by (namespace,pod) (\n d8_virtualization_virtualdisk_status_phase{namespace=~\"$namespace\"}==1\n )\n)", "instant": true, "legendFormat": "__auto", "range": false, @@ -581,7 +581,7 @@ "values": false }, "showPercentChange": false, - "textMode": "auto", + "textMode": "value_and_name", "wideLayout": true }, "pluginVersion": "10.4.19", @@ -593,7 +593,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "sum (\nd8_virtualization_virtualimage_status_phase{namespace=~\"$namespace\"}==1\n) or vector(0)\n", + "expr": "avg by (namespace) (\n count by (namespace,pod) (\n d8_virtualization_virtualimage_status_phase{namespace=~\"$namespace\"}==1\n )\n) or absent(d8_virtualization_virtualimage_status_phase{namespace=~\"$namespace\"}==1) * vector(0)", "hide": false, "instant": true, "legendFormat": "VirtualImages", @@ -607,7 +607,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "sum (\nd8_virtualization_virtualdisksnapshot_status_phase{namespace=~\"$namespace\"}==1\n) or vector(0)\n", + "expr": "avg by (namespace) (\n count by (namespace,pod) (\n d8_virtualization_virtualdisksnapshot_status_phase{namespace=~\"$namespace\"}==1\n )\n) or absent(d8_virtualization_virtualdisksnapshot_status_phase{namespace=~\"$namespace\"}==1) * vector(0)", "hide": false, "instant": true, "legendFormat": "VirtualDiskSnapshots", @@ -621,7 +621,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "sum (\nd8_virtualization_virtualmachinesnapshot_status_phase{namespace=~\"$namespace\"}==1\n) or vector(0)\n", + "expr": "avg by (namespace) (\n count by (namespace,pod) (\n d8_virtualization_virtualmachinesnapshot_status_phase{namespace=~\"$namespace\"}==1\n )\n) or absent(d8_virtualization_virtualmachinesnapshot_status_phase{namespace=~\"$namespace\"}==1) * vector(0)", "hide": false, "instant": true, "legendFormat": "VirtualMachineSnapshots", @@ -733,7 +733,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "sum (\nkube_resourcequota{namespace=~\"$namespace\", resource=\"requests.storage\", type=\"hard\"}\n) \n", + "expr": "sum (\nkube_resourcequota{namespace=~\"$namespace\", resource=\"requests.storage\", type=\"hard\"}\n) or vector(0)\n", "hide": false, "instant": true, "legendFormat": "Storage", @@ -1398,7 +1398,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "max by (namespace, name, phase) (d8_virtualization_virtualmachine_status_phase{namespace=~\"$namespace\"})==1", + "expr": "avg by (namespace, name, phase) (d8_virtualization_virtualmachine_status_phase{namespace=~\"$namespace\"})==1", "format": "table", "hide": false, "instant": true, @@ -1413,7 +1413,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "max by (namespace, name, phase) (d8_virtualization_virtualmachine_agent_ready{namespace=~\"$namespace\"})", + "expr": "avg by (namespace, name, phase) (d8_virtualization_virtualmachine_agent_ready{namespace=~\"$namespace\"})", "format": "table", "hide": false, "instant": true, @@ -1445,7 +1445,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "sum by (namespace, name) (d8_virtualization_virtualmachine_cpu_cores{namespace=~\"$namespace\"})", + "expr": "avg by (namespace, name) (d8_virtualization_virtualmachine_cpu_cores{namespace=~\"$namespace\"})", "format": "table", "hide": false, "instant": true, @@ -1611,7 +1611,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "count by (namespace, name) (\n label_replace(\n d8_virtualization_virtualdisk_status_in_use{namespace=~\"$namespace\"}, \n \"name\", \n \"$1\", \n \"virtualmachine\", \n \"(.+)\"\n )\n) * on(namespace, name) group_left() (\n d8_virtualization_virtualmachine_status_phase{namespace=~\"$namespace\"} == 1\n)", + "expr": "avg by (namespace, name) (\n count by (namespace, name,pod) (\n label_replace(\n d8_virtualization_virtualdisk_status_in_use{namespace=~\"$namespace\"}, \n \"name\", \n \"$1\", \n \"virtualmachine\", \n \"(.+)\"\n )\n ) * on(namespace, name,pod) group_left() (\n d8_virtualization_virtualmachine_status_phase{namespace=~\"$namespace\"} == 1\n )\n)", "format": "table", "hide": false, "instant": true, @@ -1628,7 +1628,7 @@ }, "editorMode": "code", "exemplar": false, - "expr": "count by (namespace, name) (\n label_replace(\n d8_virtualization_virtualmachinesnapshot_info{namespace=~\"$namespace\"}, \n \"name\", \n \"$1\", \n \"virtualmachine\", \n \"(.+)\"\n )\n) * on(namespace, name) group_left() (\n d8_virtualization_virtualmachine_status_phase{namespace=~\"$namespace\"} == 1\n)", + "expr": "avg by (namespace, name) (\n count by (namespace, name, pod) (\n label_replace(\n d8_virtualization_virtualmachinesnapshot_info{namespace=~\"$namespace\"}, \n \"name\", \n \"$1\", \n \"virtualmachine\", \n \"(.+)\"\n )\n ) * on(namespace, name,pod) group_left() (\n d8_virtualization_virtualmachine_status_phase{namespace=~\"$namespace\"} == 1\n )\n)", "format": "table", "hide": false, "instant": true, @@ -1773,8 +1773,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" } ] }, @@ -1811,7 +1810,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "sum by (name,namespace) ((d8_virtualization_virtualmachine_status_phase{namespace=~\"$namespace\"} == 1) * on(phase) group_left()\n (\n label_replace(vector(1), \"phase\", \"Degraded\", \"\", \"\") or\n label_replace(vector(2), \"phase\", \"Migrating\", \"\", \"\") or\n label_replace(vector(3), \"phase\", \"Pause\", \"\", \"\") or\n label_replace(vector(4), \"phase\", \"Pending\", \"\", \"\") or\n label_replace(vector(5), \"phase\", \"Running\", \"\", \"\") or\n label_replace(vector(6), \"phase\", \"Starting\", \"\", \"\") or\n label_replace(vector(7), \"phase\", \"Stopped\", \"\", \"\") or\n label_replace(vector(8), \"phase\", \"Stopping\", \"\", \"\") or\n label_replace(vector(9), \"phase\", \"Terminating\", \"\", \"\")\n ))", + "expr": "avg by (name,namespace) (\n sum by (name,namespace,pod) ((d8_virtualization_virtualmachine_status_phase{namespace=~\"$namespace\"} == 1) * on(phase) group_left()\n (\n label_replace(vector(1), \"phase\", \"Degraded\", \"\", \"\") or\n label_replace(vector(2), \"phase\", \"Migrating\", \"\", \"\") or\n label_replace(vector(3), \"phase\", \"Pause\", \"\", \"\") or\n label_replace(vector(4), \"phase\", \"Pending\", \"\", \"\") or\n label_replace(vector(5), \"phase\", \"Running\", \"\", \"\") or\n label_replace(vector(6), \"phase\", \"Starting\", \"\", \"\") or\n label_replace(vector(7), \"phase\", \"Stopped\", \"\", \"\") or\n label_replace(vector(8), \"phase\", \"Stopping\", \"\", \"\") or\n label_replace(vector(9), \"phase\", \"Terminating\", \"\", \"\")\n ))\n)", "instant": false, "legendFormat": "{{ name }}", "range": true, @@ -2053,7 +2052,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "sum ((d8_virtualization_virtualmachine_cpu_cores{namespace=~\"$namespace\"} * d8_virtualization_virtualmachine_cpu_core_fraction) / 100)\n", + "expr": "avg by (namespace) (\n sum by (namespace,pod) (\n (\n d8_virtualization_virtualmachine_cpu_cores{namespace=~\"$namespace\"} \n * \n d8_virtualization_virtualmachine_cpu_core_fraction\n ) / 100\n )\n) ", "hide": false, "instant": false, "legendFormat": "Guaranteed cores", @@ -2066,7 +2065,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "sum (d8_virtualization_virtualmachine_cpu_cores{namespace=~\"$namespace\"})\n", + "expr": "avg by (namespace) (\n sum by (namespace,pod) (\n d8_virtualization_virtualmachine_cpu_cores{namespace=~\"$namespace\"}\n )\n)\n\n", "hide": false, "instant": false, "legendFormat": "Current cores", @@ -2180,7 +2179,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2330,7 +2330,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "sum (d8_virtualization_virtualmachine_memory_size_bytes{namespace=~\"$namespace\"})", + "expr": "avg by (namespace) (\n sum by (namespace,pod) (\n d8_virtualization_virtualmachine_memory_size_bytes{namespace=~\"$namespace\"}\n )\n) ", "hide": false, "instant": false, "legendFormat": "Configured memory", @@ -2382,7 +2382,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "sum (d8_virtualization_virtualmachine_configuration_memory_runtime_overhead_bytes{namespace=~\"$namespace\"})\n", + "expr": "avg by (namespace) (\n sum by (namespace,pod) (\n d8_virtualization_virtualmachine_configuration_memory_runtime_overhead_bytes{namespace=~\"$namespace\"}\n )\n) ", "hide": false, "instant": false, "legendFormat": "Hypervisor Memory Overhead", @@ -2632,7 +2632,7 @@ "h": 12, "w": 12, "x": 0, - "y": 34 + "y": 23 }, "id": 104, "maxPerRow": 6, @@ -2649,8 +2649,8 @@ "showHeader": true, "sortBy": [ { - "desc": true, - "displayName": "Phase" + "desc": false, + "displayName": "VirtualDisk" } ] }, @@ -2846,6 +2846,30 @@ } } ] + }, + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "Quota" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] } ] }, @@ -2853,7 +2877,7 @@ "h": 12, "w": 12, "x": 12, - "y": 34 + "y": 23 }, "id": 100, "options": { @@ -2866,8 +2890,8 @@ "displayMode": "table", "placement": "right", "showLegend": true, - "sortBy": "Last", - "sortDesc": true + "sortBy": "Name", + "sortDesc": false }, "tooltip": { "mode": "multi", @@ -2881,7 +2905,7 @@ "uid": "${ds_prometheus}" }, "editorMode": "code", - "expr": "d8_virtualization_virtualdisk_capacity_bytes{namespace=~\"$namespace\"}", + "expr": "avg by (name,namespace) (\n sum by (name,namespace,pod) (\n d8_virtualization_virtualdisk_capacity_bytes{namespace=~\"$namespace\"}\n )\n) ", "hide": false, "instant": false, "legendFormat": "{{ name }}", @@ -3011,7 +3035,7 @@ "h": 8, "w": 12, "x": 0, - "y": 46 + "y": 35 }, "id": 44, "maxPerRow": 2, @@ -3177,7 +3201,7 @@ "h": 8, "w": 12, "x": 12, - "y": 46 + "y": 35 }, "id": 90, "maxPerRow": 2, @@ -3281,7 +3305,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3297,7 +3322,7 @@ "h": 8, "w": 12, "x": 0, - "y": 23 + "y": 24 }, "id": 7, "options": { @@ -3383,7 +3408,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3399,7 +3425,7 @@ "h": 8, "w": 12, "x": 12, - "y": 23 + "y": 24 }, "id": 8, "options": { @@ -3485,7 +3511,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3501,7 +3528,7 @@ "h": 8, "w": 12, "x": 0, - "y": 31 + "y": 32 }, "id": 12, "options": { @@ -3587,7 +3614,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3603,7 +3631,7 @@ "h": 8, "w": 12, "x": 12, - "y": 31 + "y": 32 }, "id": 9, "options": { @@ -3689,7 +3717,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3705,7 +3734,7 @@ "h": 8, "w": 12, "x": 0, - "y": 39 + "y": 40 }, "id": 10, "options": { @@ -3791,7 +3820,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -3807,7 +3837,7 @@ "h": 8, "w": 12, "x": 12, - "y": 39 + "y": 40 }, "id": 11, "options": { @@ -3908,7 +3938,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4060,7 +4091,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4199,7 +4231,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4338,7 +4371,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4546,8 +4580,8 @@ { "current": { "selected": false, - "text": "virtlab-pt-0", - "value": "virtlab-pt-0" + "text": "node", + "value": "node" }, "datasource": { "type": "prometheus", @@ -4581,6 +4615,6 @@ "timezone": "browser", "title": "Namespace / Virtual Machines", "uid": "aenrewb6nk0sgf", - "version": 1, + "version": 5, "weekStart": "" } diff --git a/monitoring/prometheus-rules/virtualization-controller.yaml b/monitoring/prometheus-rules/virtualization-controller.yaml index c5fb9aa1ca..bbbe3b1930 100644 --- a/monitoring/prometheus-rules/virtualization-controller.yaml +++ b/monitoring/prometheus-rules/virtualization-controller.yaml @@ -1,7 +1,7 @@ - name: kubernetes.virtualization.controller_state rules: - alert: D8VirtualizationControllerTargetDown - expr: max by (job) (up{job="scrapeconfig/d8-monitoring/virtualization-controller"}) == 0 + expr: max by (job) (up{job="virtualization-controller-metrics"}) == 0 for: 1m labels: severity_level: "6" @@ -19,7 +19,7 @@ 2. Or check the Pod logs: `kubectl -n d8-virtualization logs deploy/virtualization-controller` - alert: D8VirtualizationControllerTargetAbsent - expr: absent(up{job="scrapeconfig/d8-monitoring/virtualization-controller"}) == 1 + expr: absent(up{job="virtualization-controller-metrics"}) == 1 labels: severity_level: "6" tier: cluster diff --git a/templates/virtualization-controller/scrape-config.yaml b/templates/virtualization-controller/scrape-config.yaml deleted file mode 100644 index 68e8e4ebf9..0000000000 --- a/templates/virtualization-controller/scrape-config.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if (.Values.global.enabledModules | has "operator-prometheus-crd") }} ---- -apiVersion: monitoring.coreos.com/v1alpha1 -kind: ScrapeConfig -metadata: - name: virtualization-controller - namespace: d8-monitoring - {{- include "helm_lib_module_labels" (list . (dict "app" "virtualization-controller" "prometheus" "main")) | nindent 2 }} -spec: - honorLabels: true - authorization: - credentials: - key: token - name: prometheus-token - scheme: HTTPS - tlsConfig: - insecureSkipVerify: true - staticConfigs: - - targets: ['virtualization-controller-metrics.d8-{{ .Chart.Name }}.svc.{{ .Values.global.discovery.clusterDomain }}.:8080'] - metricsPath: '/metrics' - -{{- end }} diff --git a/templates/virtualization-controller/service-monitor.yaml b/templates/virtualization-controller/service-monitor.yaml new file mode 100644 index 0000000000..ba4d780375 --- /dev/null +++ b/templates/virtualization-controller/service-monitor.yaml @@ -0,0 +1,35 @@ +{{- if (.Values.global.enabledModules | has "operator-prometheus-crd") }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: virtualization-controller + namespace: d8-monitoring + {{- include "helm_lib_module_labels" (list . (dict "app" "virtualization-controller" "prometheus" "main")) | nindent 2 }} +spec: + endpoints: + - bearerTokenSecret: + key: token + name: prometheus-token + path: /metrics + port: metrics + scheme: https + tlsConfig: + insecureSkipVerify: true + metricRelabelings: + # exported_namespace -> namespace (only for d8_virtualization.+) + - action: replace + sourceLabels: + - __name__ + - exported_namespace + regex: 'd8_virtualization.+;(.*)' + targetLabel: namespace + replacement: '${1}' + namespaceSelector: + matchNames: + - d8-{{ .Chart.Name }} + selector: + matchLabels: + app: "virtualization-controller" + +{{- end }} diff --git a/werf.yaml b/werf.yaml index 9b5e733b53..2a7c4d5570 100644 --- a/werf.yaml +++ b/werf.yaml @@ -73,6 +73,7 @@ shell: --- image: bundle fromImage: builder/scratch +cacheVersion: 2025-11-18 import: - image: images-digests add: /