From b0f5a1d863a0b81a8073e81556f9538e9b2291e8 Mon Sep 17 00:00:00 2001 From: subhamkrai Date: Tue, 12 Sep 2023 15:24:48 +0530 Subject: [PATCH] rook: generate rook csv using local script Let's generate rook csv using local bash script that will be inside ocs-operator code and fetch from rook image. It gives us advantage of not depending on rook image for their csv and we can pass branch of commit for specific csv. Also for rook, it doesn't make sense to have csv codes/fiels in rook upstream and the rook csv is used only for the downstream. So, in future rook will remove the csv files from rook image. Signed-off-by: subhamkrai --- Makefile | 4 + .../rook_olm_files/metadata-common.yaml | 440 ++++++++++++++++++ .../rook_olm_files/metadata-ocp.yaml | 19 + hack/generate-rook-csv.sh | 91 ++++ 4 files changed, 554 insertions(+) create mode 100644 deploy/csv-templates/rook_olm_files/metadata-common.yaml create mode 100644 deploy/csv-templates/rook_olm_files/metadata-ocp.yaml create mode 100644 hack/generate-rook-csv.sh diff --git a/Makefile b/Makefile index 44a6139090..29186093df 100644 --- a/Makefile +++ b/Makefile @@ -62,6 +62,10 @@ gen-release-csv: operator-sdk manifests kustomize @echo "Generating unified CSV from sourced component-level operators" hack/generate-unified-csv.sh +gen-rook-csv: + @echo "Generating rook CSV" + hack/generate-rook-csv.sh + gen-latest-deploy-yaml: @echo "Generating latest deployment yaml file" hack/gen-deployment-yaml.sh diff --git a/deploy/csv-templates/rook_olm_files/metadata-common.yaml b/deploy/csv-templates/rook_olm_files/metadata-common.yaml new file mode 100644 index 0000000000..d590491a3d --- /dev/null +++ b/deploy/csv-templates/rook_olm_files/metadata-common.yaml @@ -0,0 +1,440 @@ +spec: + replaces: rook-ceph.v1.1.1 + customresourcedefinitions: + owned: + - kind: CephCluster + name: cephclusters.ceph.rook.io + version: v1 + displayName: Ceph Cluster + description: Represents a Ceph cluster. + - kind: CephBlockPool + name: cephblockpools.ceph.rook.io + version: v1 + displayName: Ceph Block Pool + description: Represents a Ceph Block Pool. + - kind: CephObjectStore + name: cephobjectstores.ceph.rook.io + version: v1 + displayName: Ceph Object Store + description: Represents a Ceph Object Store. + specDescriptors: + - description: Coding Chunks + displayName: Coding Chunks + path: dataPool.erasureCoded.codingChunks + x-descriptors: + - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:dataPool" + - "urn:alm:descriptor:com.tectonic.ui:number" + - description: Data Chunks + displayName: Data Chunks + path: dataPool.erasureCoded.dataChunks + x-descriptors: + - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:dataPool" + - "urn:alm:descriptor:com.tectonic.ui:number" + - description: failureDomain + displayName: failureDomain + path: dataPool.failureDomain + x-descriptors: + - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:dataPool" + - "urn:alm:descriptor:com.tectonic.ui:text" + - description: Size + displayName: Size + path: dataPool.replicated.size + x-descriptors: + - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:dataPool" + - "urn:alm:descriptor:com.tectonic.ui:number" + - description: Annotations + displayName: Annotations + path: gateway.annotations + x-descriptors: + - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:gateway" + - "urn:alm:descriptor:io.kubernetes:annotations" + - description: Instances + displayName: Instances + path: gateway.instances + x-descriptors: + - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:gateway" + - "urn:alm:descriptor:com.tectonic.ui:number" + - description: Resources + displayName: Resources + path: gateway.resources + x-descriptors: + - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:gateway" + - "urn:alm:descriptor:com.tectonic.ui:resourceRequirements" + - description: placement + displayName: placement + path: gateway.placement + x-descriptors: + - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:gateway" + - "urn:alm:descriptor:io.kubernetes:placement" + - description: securePort + displayName: securePort + path: gateway.securePort + x-descriptors: + - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:gateway" + - "urn:alm:descriptor:io.kubernetes:securePort" + - description: sslCertificateRef + displayName: sslCertificateRef + path: gateway.sslCertificateRef + x-descriptors: + - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:gateway" + - "urn:alm:descriptor:io.kubernetes:sslCertificateRef" + - description: Type + displayName: Type + path: gateway.type + x-descriptors: + - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:gateway" + - "urn:alm:descriptor:com.tectonic.ui:text" + - description: Coding Chunks + displayName: Coding Chunks + path: metadataPool.erasureCoded.codingChunks + x-descriptors: + - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:metadataPool" + - "urn:alm:descriptor:com.tectonic.ui:number" + - description: Data Chunks + displayName: Data Chunks + path: metadataPool.erasureCoded.dataChunks + x-descriptors: + - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:metadataPool" + - "urn:alm:descriptor:com.tectonic.ui:number" + - description: failureDomain + displayName: failureDomain + path: metadataPool.failureDomain + x-descriptors: + - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:metadataPool" + - "urn:alm:descriptor:com.tectonic.ui:text" + - description: Size + displayName: Size + path: metadataPool.replicated.size + x-descriptors: + - "urn:alm:descriptor:com.tectonic.ui:fieldGroup:metadataPool" + - "urn:alm:descriptor:com.tectonic.ui:number" + - kind: CephObjectStoreUser + name: cephobjectstoreusers.ceph.rook.io + version: v1 + displayName: Ceph Object Store User + description: Represents a Ceph Object Store User. + - kind: CephNFS + name: cephnfses.ceph.rook.io + version: v1 + displayName: Ceph NFS + description: Represents a cluster of Ceph NFS ganesha gateways. + - kind: CephClient + name: cephclients.ceph.rook.io + version: v1 + displayName: Ceph Client + description: Represents a Ceph User. + - kind: CephFilesystem + name: cephfilesystems.ceph.rook.io + version: v1 + displayName: Ceph Filesystem + description: Represents a Ceph Filesystem. + - kind: CephFilesystemMirror + name: cephfilesystemmirrors.ceph.rook.io + version: v1 + displayName: Ceph Filesystem Mirror + description: Represents a Ceph Filesystem Mirror. + - kind: CephRBDMirror + name: cephrbdmirrors.ceph.rook.io + version: v1 + displayName: Ceph RBD Mirror + description: Represents a Ceph RBD Mirror. + - kind: CephObjectRealm + name: cephobjectrealms.ceph.rook.io + version: v1 + displayName: Ceph Object Store Realm + description: Represents a Ceph Object Store Realm. + - kind: CephObjectZoneGroup + name: cephobjectzonegroups.ceph.rook.io + version: v1 + displayName: Ceph Object Store Zone Group + description: Represents a Ceph Object Store Zone Group. + - kind: CephObjectZone + name: cephobjectzones.ceph.rook.io + version: v1 + displayName: Ceph Object Store Zone + description: Represents a Ceph Object Store Zone. + - kind: CephBucketNotification + name: cephbucketnotifications.ceph.rook.io + version: v1 + displayName: Ceph Bucket Notification + description: Represents a Ceph Bucket Notification. + - kind: CephBucketTopic + name: cephbuckettopics.ceph.rook.io + version: v1 + displayName: Ceph Bucket Topic + description: Represents a Ceph Bucket Topic. + - kind: CephFilesystemSubVolumeGroup + name: cephfilesystemsubvolumegroups.ceph.rook.io + version: v1 + displayName: Ceph Filesystem SubVolumeGroup + description: Represents a Ceph Filesystem SubVolumeGroup. + - kind: CephBlockPoolRadosNamespace + name: cephblockpoolradosnamespaces.ceph.rook.io + version: v1 + displayName: Ceph BlockPool Rados Namespace + description: Represents a Ceph BlockPool Rados Namespace. + - kind: CephCOSIDriver + name: cephcosidrivers.ceph.rook.io + version: v1 + displayName: Ceph COSI Driver + description: Represents a Ceph COSI Driver. + displayName: Rook-Ceph + description: | + + The Rook-Ceph storage operator packages, deploys, manages, upgrades and scales Ceph storage for providing persistent storage to infrastructure services (Logging, Metrics, Registry) as well as stateful applications in Kubernetes clusters. + + ## Rook-Ceph Storage Operator + + Rook runs as a cloud-native service in Kubernetes clusters for optimal integration with applications in need of storage, and handles the heavy-lifting behind the scenes such as provisioning and management. + Rook orchestrates battle-tested open-source storage technology Ceph, which has years of production deployments and runs some of the worlds largest clusters. + + Ceph is a massively scalable, software-defined, cloud native storage platform that offers block, file and object storage services. + Ceph can be used to back a wide variety of applications including relational databases, NoSQL databases, CI/CD tool-sets, messaging, AI/ML and analytics applications. + Ceph is a proven storage platform that backs some of the world's largest storage deployments and has a large vibrant open source community backing the project. + + ## Supported features + * **High Availability and resiliency** - Ceph has no single point of failures (SPOF) and all its components work natively in a highly available fashion + * **Data Protection** - Ceph periodically scrub for inconsistent objects and repair them if necessary, making sure your replicas are always coherent + * **Consistent storage platform across hybrid cloud** - Ceph can be deployed anywhere (on-premise or bare metal) and thus offers a similar experience regardless + * **Block, File & Object storage service** - Ceph can expose your data through several storage interfaces, solving all the application use cases + * **Scale up/down** - addition and removal of storage is fully covered by the operator. + * **Dashboard** - The Operator deploys a dashboard for monitoring and introspecting your cluster. + + ## Before you start + https://rook.io/docs/rook/v1.0/k8s-pre-reqs.html + + keywords: + [ + "rook", + "ceph", + "storage", + "object storage", + "open source", + "block storage", + "shared filesystem", + ] + minKubeVersion: 1.10.0 + labels: + alm-owner-etcd: rookoperator + operated-by: rookoperator + selector: + matchLabels: + alm-owner-etcd: rookoperator + operated-by: rookoperator + links: + - name: Blog + url: https://blog.rook.io + - name: Documentation + url: https://rook.github.io/docs/rook/v1.0/ + icon: + - base64data: PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0idXRmLTgiPz4KPCEtLSBHZW5lcmF0b3I6IEFkb2JlIElsbHVzdHJhdG9yIDIzLjAuMiwgU1ZHIEV4cG9ydCBQbHVnLUluIC4gU1ZHIFZlcnNpb246IDYuMDAgQnVpbGQgMCkgIC0tPgo8c3ZnIHZlcnNpb249IjEuMSIgaWQ9IkxheWVyXzEiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgeG1sbnM6eGxpbms9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkveGxpbmsiIHg9IjBweCIgeT0iMHB4IgoJIHZpZXdCb3g9IjAgMCA3MCA3MCIgc3R5bGU9ImVuYWJsZS1iYWNrZ3JvdW5kOm5ldyAwIDAgNzAgNzA7IiB4bWw6c3BhY2U9InByZXNlcnZlIj4KPHN0eWxlIHR5cGU9InRleHQvY3NzIj4KCS5zdDB7ZmlsbDojMkIyQjJCO30KPC9zdHlsZT4KPGc+Cgk8Zz4KCQk8Zz4KCQkJPHBhdGggY2xhc3M9InN0MCIgZD0iTTUwLjUsNjcuNkgxOS45Yy04LDAtMTQuNS02LjUtMTQuNS0xNC41VjI5LjJjMC0xLjEsMC45LTIuMSwyLjEtMi4xaDU1LjRjMS4xLDAsMi4xLDAuOSwyLjEsMi4xdjIzLjkKCQkJCUM2NSw2MS4xLDU4LjUsNjcuNiw1MC41LDY3LjZ6IE05LjYsMzEuMnYyMS45YzAsNS43LDQuNiwxMC4zLDEwLjMsMTAuM2gzMC42YzUuNywwLDEwLjMtNC42LDEwLjMtMTAuM1YzMS4ySDkuNnoiLz4KCQk8L2c+CgkJPGc+CgkJCTxwYXRoIGNsYXNzPSJzdDAiIGQ9Ik00Mi40LDU2LjdIMjhjLTEuMSwwLTIuMS0wLjktMi4xLTIuMXYtNy4yYzAtNS4xLDQuMi05LjMsOS4zLTkuM3M5LjMsNC4yLDkuMyw5LjN2Ny4yCgkJCQlDNDQuNSw1NS43LDQzLjYsNTYuNyw0Mi40LDU2Ljd6IE0zMCw1Mi41aDEwLjN2LTUuMmMwLTIuOS0yLjMtNS4yLTUuMi01LjJjLTIuOSwwLTUuMiwyLjMtNS4yLDUuMlY1Mi41eiIvPgoJCTwvZz4KCQk8Zz4KCQkJPHBhdGggY2xhc3M9InN0MCIgZD0iTTYyLjksMjMuMkM2Mi45LDIzLjIsNjIuOSwyMy4yLDYyLjksMjMuMmwtMTEuMSwwYy0xLjEsMC0yLjEtMC45LTIuMS0yLjFjMC0xLjEsMC45LTIuMSwyLjEtMi4xCgkJCQljMCwwLDAsMCwwLDBsOS4xLDBWNi43aC02Ljl2My41YzAsMC41LTAuMiwxLjEtMC42LDEuNWMtMC40LDAuNC0wLjksMC42LTEuNSwwLjZsMCwwbC0xMS4xLDBjLTEuMSwwLTIuMS0wLjktMi4xLTIuMVY2LjdoLTYuOQoJCQkJdjMuNWMwLDEuMS0wLjksMi4xLTIuMSwyLjFsLTExLjEsMGMtMC41LDAtMS4xLTAuMi0xLjUtMC42Yy0wLjQtMC40LTAuNi0wLjktMC42LTEuNVY2LjdIOS42djEyLjRoOWMxLjEsMCwyLjEsMC45LDIuMSwyLjEKCQkJCXMtMC45LDIuMS0yLjEsMi4xaC0xMWMtMS4xLDAtMi4xLTAuOS0yLjEtMi4xVjQuNmMwLTEuMSwwLjktMi4xLDIuMS0yLjFoMTEuMWMxLjEsMCwyLjEsMC45LDIuMSwyLjF2My41bDcsMFY0LjYKCQkJCWMwLTEuMSwwLjktMi4xLDIuMS0yLjFoMTEuMWMxLjEsMCwyLjEsMC45LDIuMSwyLjF2My41bDYuOSwwVjQuNmMwLTEuMSwwLjktMi4xLDIuMS0yLjFoMTEuMUM2NCwyLjYsNjUsMy41LDY1LDQuNnYxNi41CgkJCQljMCwwLjUtMC4yLDEuMS0wLjYsMS41QzY0LDIzLDYzLjQsMjMuMiw2Mi45LDIzLjJ6Ii8+CgkJPC9nPgoJPC9nPgo8L2c+Cjwvc3ZnPg== + mediatype: image/svg+xml + installModes: + - type: OwnNamespace + supported: true + - type: SingleNamespace + supported: true + - type: MultiNamespace + supported: false + - type: AllNamespaces + supported: false + +metadata: + annotations: + tectonic-visibility: ocs + repository: https://github.com/rook/rook + containerImage: "{{.RookOperatorImage}}" + alm-examples: |- + [ + { + "apiVersion": "ceph.rook.io/v1", + "kind": "CephCluster", + "metadata": { + "name": "my-rook-ceph", + "namespace": "my-rook-ceph" + }, + "spec": { + "cephVersion": { + "image": "quay.io/ceph/ceph:v17.2.6" + }, + "dataDirHostPath": "/var/lib/rook", + "mon": { + "count": 3 + }, + "dashboard": { + "enabled": true + }, + "network": { + "hostNetwork": false + }, + "rbdMirroring": { + "workers": 0 + }, + "storage": { + "useAllNodes": true, + "useAllDevices": true + } + } + }, + { + "apiVersion": "ceph.rook.io/v1", + "kind": "CephBlockPool", + "metadata": { + "name": "replicapool", + "namespace": "my-rook-ceph" + }, + "spec": { + "failureDomain": "host", + "replicated": { + "size": 3 + }, + "annotations": null + } + }, + { + "apiVersion": "ceph.rook.io/v1", + "kind": "CephObjectStore", + "metadata": { + "name": "my-store", + "namespace": "my-rook-ceph" + }, + "spec": { + "metadataPool": { + "failureDomain": "host", + "replicated": { + "size": 3 + } + }, + "dataPool": { + "failureDomain": "host", + "replicated": { + "size": 3 + } + }, + "gateway": { + "type": "s3", + "sslCertificateRef": null, + "port": 8080, + "securePort": null, + "instances": 1, + "placement": null, + "annotations": null, + "resources": null + } + } + }, + { + "apiVersion": "ceph.rook.io/v1", + "kind": "CephObjectStoreUser", + "metadata": { + "name": "my-user", + "namespace": "my-rook-ceph" + }, + "spec": { + "store": "my-store", + "displayName": "my display name" + } + }, + { + "apiVersion": "ceph.rook.io/v1", + "kind": "CephNFS", + "metadata": { + "name": "my-nfs", + "namespace": "rook-ceph" + }, + "spec": { + "rados": { + "pool": "myfs-data0", + "namespace": "nfs-ns" + }, + "server": { + "active": 3, + "placement": null, + "annotations": null, + "resources": null + } + } + }, + { + "apiVersion": "ceph.rook.io/v1", + "kind": "CephClient", + "metadata": { + "name": "cinder", + "namespace": "rook-ceph" + }, + "spec": { + "caps": { + "mon": "profile rbd", + "osd": "profile rbd pool=volumes, profile rbd pool=vms, profile rbd-read-only pool=images" + } + } + }, + { + "apiVersion": "ceph.rook.io/v1", + "kind": "CephFilesystem", + "metadata": { + "name": "myfs", + "namespace": "rook-ceph" + }, + "spec": { + "dataPools": [ + { + "compressionMode": "", + "crushRoot": "", + "deviceClass": "", + "erasureCoded": { + "algorithm": "", + "codingChunks": 0, + "dataChunks": 0 + }, + "failureDomain": "host", + "replicated": { + "requireSafeReplicaSize": false, + "size": 1, + "targetSizeRatio": 0.5 + } + } + ], + "metadataPool": { + "compressionMode": "", + "crushRoot": "", + "deviceClass": "", + "erasureCoded": { + "algorithm": "", + "codingChunks": 0, + "dataChunks": 0 + }, + "failureDomain": "", + "replicated": { + "requireSafeReplicaSize": false, + "size": 1, + "targetSizeRatio": 0 + } + }, + "metadataServer": { + "activeCount": 1, + "activeStandby": true, + "placement": {}, + "resources": {} + }, + "preservePoolsOnDelete": false, + "preserveFilesystemOnDelete": false + } + }, + { + "apiVersion": "ceph.rook.io/v1", + "kind": "CephRBDMirror", + "metadata": { + "name": "my-rbd-mirror", + "namespace": "rook-ceph" + }, + "spec": { + "annotations": null, + "count": 1, + "placement": { + "topologyKey": "kubernetes.io/hostname" + }, + "resources": null + } + } + ] diff --git a/deploy/csv-templates/rook_olm_files/metadata-ocp.yaml b/deploy/csv-templates/rook_olm_files/metadata-ocp.yaml new file mode 100644 index 0000000000..97472c9239 --- /dev/null +++ b/deploy/csv-templates/rook_olm_files/metadata-ocp.yaml @@ -0,0 +1,19 @@ +spec: + install: + spec: + clusterPermissions: + - rules: + - verbs: + - use + apiGroups: + - security.openshift.io + resources: + - securitycontextconstraints + resourceNames: + - privileged + serviceAccountName: rook-ceph-system + maintainers: + - name: Red Hat, Inc. + email: customerservice@redhat.com + provider: + name: Red Hat, Inc. diff --git a/hack/generate-rook-csv.sh b/hack/generate-rook-csv.sh new file mode 100644 index 0000000000..e621a17a4f --- /dev/null +++ b/hack/generate-rook-csv.sh @@ -0,0 +1,91 @@ +#!/usr/bin/env bash +set -e + +############# +# VARIABLES # +############# + +operator_sdk="${OPERATOR_SDK:-operator-sdk}" +yq="${YQv3:-yq}" +PLATFORM=$(go env GOARCH) + +YQ_CMD_DELETE=("$yq" delete -i) +YQ_CMD_MERGE_OVERWRITE=("$yq" merge --inplace --overwrite --prettyPrint) +YQ_CMD_MERGE=("$yq" merge --arrays=append --inplace) +YQ_CMD_WRITE=("$yq" write --inplace -P) +CSV_FILE_NAME="deploy/csv-templates/crds/rook/manifests/rook-ceph.clusterserviceversion.yaml" +CEPH_EXTERNAL_SCRIPT_FILE="https://raw.githubusercontent.com/rook/rook/master/deploy/examples/create-external-cluster-resources.py" +wget --no-verbose $CEPH_EXTERNAL_SCRIPT_FILE +ASSEMBLE_FILE_COMMON="deploy/csv-templates/rook_olm_files/metadata-common.yaml" +ASSEMBLE_FILE_OCP="deploy/csv-templates/rook_olm_files/metadata-ocp.yaml" +############# +# FUNCTIONS # +############# + +function generate_csv() { + # If you want to use specific branch, tags or commit has refer to this doc https://github.com/kubernetes-sigs/kustomize/blob/master/examples/remoteBuild.md#remote-directories + kubectl kustomize https://github.com/rook/rook//deploy/examples | "$operator_sdk" generate bundle --package="rook-ceph" --output-dir="deploy/csv-templates/crds/rook/" --extra-service-accounts=rook-ceph-system,rook-csi-rbd-provisioner-sa,rook-csi-rbd-plugin-sa,rook-csi-cephfs-provisioner-sa,rook-csi-nfs-provisioner-sa,rook-csi-nfs-plugin-sa,rook-csi-cephfs-plugin-sa,rook-ceph-system,rook-ceph-rgw,rook-ceph-purge-osd,rook-ceph-osd,rook-ceph-mgr,rook-ceph-cmd-reporter + + directory="deploy/csv-templates/crds/rook/manifests" + filename=$(basename "$CSV_FILE_NAME") + + # Below line removes the ObjectBucket from owned CRD section and converts yaml to json file + yq read --prettyPrint --tojson "$CSV_FILE_NAME" | jq 'del(.spec.customresourcedefinitions.owned[] | select(.kind | test("ObjectBucket.*")))' >"$directory"/temp.json + # Belo lines convert the json to yaml + yq read --prettyPrint "$directory"/temp.json >"$CSV_FILE_NAME" + # Below lines remove the extra json file + rm -f "$directory"/temp.json + + for file in "$directory"/*; do + if [ -f "$file" ]; then + filename=$(basename "$file") + # We want to remove the description from rook-crd files only + if [[ "$filename" == "ceph.rook.io"* ]]; then + # Below lines converts the file to json format and then remove the description field from the file. + yq read --prettyPrint --tojson "$file" | jq 'walk(if type == "object" then with_entries(select(.key != "description")) else . end)' >"$directory"/temp.json + # Belo lines convert the json to yaml + yq read --prettyPrint "$directory"/temp.json >"$directory"/"$filename" + # Below lines remove the extra json file + rm -f "$directory/temp.json" + fi + fi + done + + # cleanup to get the expected state before merging the real data from assembles + "${YQ_CMD_DELETE[@]}" "$CSV_FILE_NAME" 'spec.installModes[*]' + "${YQ_CMD_DELETE[@]}" "$CSV_FILE_NAME" 'spec.keywords[0]' + "${YQ_CMD_DELETE[@]}" "$CSV_FILE_NAME" 'spec.maintainers[0]' + + "${YQ_CMD_MERGE_OVERWRITE[@]}" "$CSV_FILE_NAME" <(cat $ASSEMBLE_FILE_COMMON) + "${YQ_CMD_WRITE[@]}" "$CSV_FILE_NAME" metadata.annotations.externalClusterScript "$(base64