From 2c4ab46957f1e3efafd8df9d6ae6e508859d05ef Mon Sep 17 00:00:00 2001 From: Sid Shukla Date: Wed, 15 Mar 2023 19:35:48 +0100 Subject: [PATCH] Propogate project to the NutanixMachineTemplate --- pkg/providers/nutanix/client.go | 2 + pkg/providers/nutanix/config/cp-template.yaml | 10 + pkg/providers/nutanix/config/md-template.yaml | 10 + pkg/providers/nutanix/mocks/client.go | 30 +++ pkg/providers/nutanix/template.go | 12 ++ pkg/providers/nutanix/template_test.go | 43 ++++ .../testdata/eksa-cluster-project.yaml | 79 +++++++ .../testdata/expected_results_project.yaml | 197 ++++++++++++++++++ .../testdata/expected_results_project_md.yaml | 84 ++++++++ .../testdata/machineConfig_project.yaml | 27 +++ pkg/providers/nutanix/validator.go | 47 +++++ 11 files changed, 541 insertions(+) create mode 100644 pkg/providers/nutanix/testdata/eksa-cluster-project.yaml create mode 100644 pkg/providers/nutanix/testdata/expected_results_project.yaml create mode 100644 pkg/providers/nutanix/testdata/expected_results_project_md.yaml create mode 100644 pkg/providers/nutanix/testdata/machineConfig_project.yaml diff --git a/pkg/providers/nutanix/client.go b/pkg/providers/nutanix/client.go index bf98de9e89857..897b616b82582 100644 --- a/pkg/providers/nutanix/client.go +++ b/pkg/providers/nutanix/client.go @@ -13,5 +13,7 @@ type Client interface { ListImage(ctx context.Context, getEntitiesRequest *v3.DSMetadata) (*v3.ImageListIntentResponse, error) GetCluster(ctx context.Context, uuid string) (*v3.ClusterIntentResponse, error) ListCluster(ctx context.Context, getEntitiesRequest *v3.DSMetadata) (*v3.ClusterListIntentResponse, error) + GetProject(ctx context.Context, uuid string) (*v3.Project, error) + ListProject(ctx context.Context, getEntitiesRequest *v3.DSMetadata) (*v3.ProjectListResponse, error) GetCurrentLoggedInUser(ctx context.Context) (*v3.UserIntentResponse, error) } diff --git a/pkg/providers/nutanix/config/cp-template.yaml b/pkg/providers/nutanix/config/cp-template.yaml index f2a24b662b357..4176dc64e5ddd 100644 --- a/pkg/providers/nutanix/config/cp-template.yaml +++ b/pkg/providers/nutanix/config/cp-template.yaml @@ -257,6 +257,16 @@ spec: - type: uuid uuid: "{{.subnetUUID}}" {{ end }} +{{- if .projectIDType}} + project: +{{- if (eq .projectIDType "name") }} + type: name + name: "{{.projectName}}" +{{- else if (eq .projectIDType "uuid") }} + type: uuid + uuid: "{{.projectUUID}}" +{{ end }} +{{ end }} --- {{- if .registryAuth }} apiVersion: v1 diff --git a/pkg/providers/nutanix/config/md-template.yaml b/pkg/providers/nutanix/config/md-template.yaml index f183443ea81c9..dea95e13d571d 100644 --- a/pkg/providers/nutanix/config/md-template.yaml +++ b/pkg/providers/nutanix/config/md-template.yaml @@ -69,6 +69,16 @@ spec: - type: uuid uuid: "{{.subnetUUID}}" {{ end }} +{{- if .projectIDType}} + project: +{{- if (eq .projectIDType "name") }} + type: name + name: "{{.projectName}}" +{{- else if (eq .projectIDType "uuid") }} + type: uuid + uuid: "{{.projectUUID}}" +{{ end }} +{{ end }} --- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 kind: KubeadmConfigTemplate diff --git a/pkg/providers/nutanix/mocks/client.go b/pkg/providers/nutanix/mocks/client.go index 18688c03d095f..c1ce5886fb3a9 100644 --- a/pkg/providers/nutanix/mocks/client.go +++ b/pkg/providers/nutanix/mocks/client.go @@ -80,6 +80,21 @@ func (mr *MockClientMockRecorder) GetImage(ctx, uuid interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImage", reflect.TypeOf((*MockClient)(nil).GetImage), ctx, uuid) } +// GetProject mocks base method. +func (m *MockClient) GetProject(ctx context.Context, uuid string) (*v3.Project, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProject", ctx, uuid) + ret0, _ := ret[0].(*v3.Project) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProject indicates an expected call of GetProject. +func (mr *MockClientMockRecorder) GetProject(ctx, uuid interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProject", reflect.TypeOf((*MockClient)(nil).GetProject), ctx, uuid) +} + // GetSubnet mocks base method. func (m *MockClient) GetSubnet(ctx context.Context, uuid string) (*v3.SubnetIntentResponse, error) { m.ctrl.T.Helper() @@ -125,6 +140,21 @@ func (mr *MockClientMockRecorder) ListImage(ctx, getEntitiesRequest interface{}) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListImage", reflect.TypeOf((*MockClient)(nil).ListImage), ctx, getEntitiesRequest) } +// ListProject mocks base method. +func (m *MockClient) ListProject(ctx context.Context, getEntitiesRequest *v3.DSMetadata) (*v3.ProjectListResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListProject", ctx, getEntitiesRequest) + ret0, _ := ret[0].(*v3.ProjectListResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListProject indicates an expected call of ListProject. +func (mr *MockClientMockRecorder) ListProject(ctx, getEntitiesRequest interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListProject", reflect.TypeOf((*MockClient)(nil).ListProject), ctx, getEntitiesRequest) +} + // ListSubnet mocks base method. func (m *MockClient) ListSubnet(ctx context.Context, getEntitiesRequest *v3.DSMetadata) (*v3.SubnetListIntentResponse, error) { m.ctrl.T.Helper() diff --git a/pkg/providers/nutanix/template.go b/pkg/providers/nutanix/template.go index ad79899a5bfc8..acfdf69f3f852 100644 --- a/pkg/providers/nutanix/template.go +++ b/pkg/providers/nutanix/template.go @@ -220,6 +220,12 @@ func buildTemplateMapCP( values["etcdSshUsername"] = etcdMachineSpec.Users[0].Name } + if controlPlaneMachineSpec.Project != nil { + values["projectIDType"] = controlPlaneMachineSpec.Project.Type + values["projectName"] = controlPlaneMachineSpec.Project.Name + values["projectUUID"] = controlPlaneMachineSpec.Project.UUID + } + return values, nil } @@ -277,6 +283,12 @@ func buildTemplateMapMD(clusterSpec *cluster.Spec, workerNodeGroupMachineSpec v1 } } + if workerNodeGroupMachineSpec.Project != nil { + values["projectIDType"] = workerNodeGroupMachineSpec.Project.Type + values["projectName"] = workerNodeGroupMachineSpec.Project.Name + values["projectUUID"] = workerNodeGroupMachineSpec.Project.UUID + } + return values, nil } diff --git a/pkg/providers/nutanix/template_test.go b/pkg/providers/nutanix/template_test.go index 4837bb45f547a..870c6a62d0701 100644 --- a/pkg/providers/nutanix/template_test.go +++ b/pkg/providers/nutanix/template_test.go @@ -25,6 +25,9 @@ var nutanixDatacenterConfigSpec string //go:embed testdata/machineConfig.yaml var nutanixMachineConfigSpec string +//go:embed testdata/machineConfig_project.yaml +var nutanixMachineConfigSpecWithProject string + func fakemarshal(v interface{}) ([]byte, error) { return []byte{}, errors.New("marshalling failed") } @@ -200,6 +203,46 @@ func TestNewNutanixTemplateBuilderRegistryMirrorConfigNoRegistryCredsSet(t *test assert.Error(t, err) } +func TestNewNutanixTemplateBuilderProject(t *testing.T) { + t.Setenv(constants.EksaNutanixUsernameKey, "admin") + t.Setenv(constants.EksaNutanixPasswordKey, "password") + creds := GetCredsFromEnv() + + dcConf, _, _ := minimalNutanixConfigSpec(t) + machineConf := &anywherev1.NutanixMachineConfig{} + err := yaml.Unmarshal([]byte(nutanixMachineConfigSpecWithProject), machineConf) + require.NoError(t, err) + + workerConfs := map[string]anywherev1.NutanixMachineConfigSpec{ + "eksa-unit-test": machineConf.Spec, + } + builder := NewNutanixTemplateBuilder(&dcConf.Spec, &machineConf.Spec, &machineConf.Spec, workerConfs, creds, time.Now) + assert.NotNil(t, builder) + + buildSpec := test.NewFullClusterSpec(t, "testdata/eksa-cluster-project.yaml") + cpSpec, err := builder.GenerateCAPISpecControlPlane(buildSpec) + assert.NoError(t, err) + assert.NotNil(t, cpSpec) + + expectedControlPlaneSpec, err := os.ReadFile("testdata/expected_results_project.yaml") + require.NoError(t, err) + assert.Equal(t, expectedControlPlaneSpec, cpSpec) + + workloadTemplateNames := map[string]string{ + "eksa-unit-test": "eksa-unit-test", + } + kubeadmconfigTemplateNames := map[string]string{ + "eksa-unit-test": "eksa-unit-test", + } + workerSpec, err := builder.GenerateCAPISpecWorkers(buildSpec, workloadTemplateNames, kubeadmconfigTemplateNames) + assert.NoError(t, err) + assert.NotNil(t, workerSpec) + + expectedWorkersSpec, err := os.ReadFile("testdata/expected_results_project_md.yaml") + require.NoError(t, err) + assert.Equal(t, expectedWorkersSpec, workerSpec) +} + func minimalNutanixConfigSpec(t *testing.T) (*anywherev1.NutanixDatacenterConfig, *anywherev1.NutanixMachineConfig, map[string]anywherev1.NutanixMachineConfigSpec) { dcConf := &anywherev1.NutanixDatacenterConfig{} err := yaml.Unmarshal([]byte(nutanixDatacenterConfigSpec), dcConf) diff --git a/pkg/providers/nutanix/testdata/eksa-cluster-project.yaml b/pkg/providers/nutanix/testdata/eksa-cluster-project.yaml new file mode 100644 index 0000000000000..dab5041fe0b16 --- /dev/null +++ b/pkg/providers/nutanix/testdata/eksa-cluster-project.yaml @@ -0,0 +1,79 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: eksa-unit-test + namespace: default +spec: + kubernetesVersion: "1.19" + controlPlaneConfiguration: + name: eksa-unit-test + count: 3 + endpoint: + host: test-ip + machineGroupRef: + name: eksa-unit-test + kind: NutanixMachineConfig + workerNodeGroupConfigurations: + - count: 4 + name: eksa-unit-test + machineGroupRef: + name: eksa-unit-test + kind: NutanixMachineConfig + externalEtcdConfiguration: + name: eksa-unit-test + count: 3 + machineGroupRef: + name: eksa-unit-test + kind: NutanixMachineConfig + datacenterRef: + kind: NutanixDatacenterConfig + name: eksa-unit-test + clusterNetwork: + cni: "cilium" + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixDatacenterConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + endpoint: "prism.nutanix.com" + port: 9440 + credentialRef: + kind: Secret + name: "nutanix-credentials" +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + image: + type: "name" + name: "prism-image" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + project: + type: "name" + name: "prism-project" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" +--- diff --git a/pkg/providers/nutanix/testdata/expected_results_project.yaml b/pkg/providers/nutanix/testdata/expected_results_project.yaml new file mode 100644 index 0000000000000..0245a744e420a --- /dev/null +++ b/pkg/providers/nutanix/testdata/expected_results_project.yaml @@ -0,0 +1,197 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixCluster +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + prismCentral: + address: "prism.nutanix.com" + port: 9440 + insecure: false + credentialRef: + name: "capx-eksa-unit-test" + kind: Secret + controlPlaneEndpoint: + host: "test-ip" + port: 6443 +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + clusterNetwork: + services: + cidrBlocks: [10.96.0.0/12] + pods: + cidrBlocks: [192.168.0.0/16] + serviceDomain: "cluster.local" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: "eksa-unit-test" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixCluster + name: "eksa-unit-test" +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlane +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + replicas: 3 + version: "v1.19.8-eks-1-19-4" + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "" + kubeadmConfigSpec: + clusterConfiguration: + imageRepository: "public.ecr.aws/eks-distro/kubernetes" + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" + dns: + imageRepository: public.ecr.aws/eks-distro/coredns + imageTag: v1.8.0-eks-1-19-4 + etcd: + external: + endpoints: [] + caFile: "/etc/kubernetes/pki/etcd/ca.crt" + certFile: "/etc/kubernetes/pki/apiserver-etcd-client.crt" + keyFile: "/etc/kubernetes/pki/apiserver-etcd-client.key" + files: + - content: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - name: kube-vip + image: + imagePullPolicy: IfNotPresent + args: + - manager + env: + - name: vip_arp + value: "true" + - name: address + value: "test-ip" + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: svc_enable + value: "false" + - name: lb_enable + value: "false" + securityContext: + capabilities: + add: + - NET_ADMIN + - SYS_TIME + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + resources: {} + hostNetwork: true + volumes: + - name: kubeconfig + hostPath: + type: FileOrCreate + path: /etc/kubernetes/admin.conf + status: {} + owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + read-only-port: "0" + anonymous-auth: "false" + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: "{{ ds.meta_data.hostname }}" + users: + - name: "mySshUsername" + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - "mySshAuthorizedKey" + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback" >/etc/hosts + - echo "127.0.0.1 localhost" >>/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }}" >> /etc/hosts + # This section should be removed once these packages are added to the image builder process + - apt update + - apt install -y nfs-common open-iscsi + - systemctl enable --now iscsid + postKubeadmCommands: + - echo export KUBECONFIG=/etc/kubernetes/admin.conf >> /root/.bashrc + useExperimentalRetryJoin: true +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "" + namespace: "eksa-system" +spec: + template: + spec: + providerID: "nutanix://eksa-unit-test-m1" + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + systemDiskSize: 40Gi + image: + type: name + name: "prism-image" + + cluster: + type: name + name: "prism-cluster" + subnet: + - type: name + name: "prism-subnet" + project: + type: name + name: "prism-project" + +--- diff --git a/pkg/providers/nutanix/testdata/expected_results_project_md.yaml b/pkg/providers/nutanix/testdata/expected_results_project_md.yaml new file mode 100644 index 0000000000000..a22a38d6e4292 --- /dev/null +++ b/pkg/providers/nutanix/testdata/expected_results_project_md.yaml @@ -0,0 +1,84 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + name: "eksa-unit-test-eksa-unit-test" + namespace: "eksa-system" +spec: + clusterName: "eksa-unit-test" + replicas: 4 + selector: + matchLabels: {} + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: "eksa-unit-test" + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: "eksa-unit-test" + clusterName: "eksa-unit-test" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: NutanixMachineTemplate + name: "eksa-unit-test" + version: "v1.19.8-eks-1-19-4" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: NutanixMachineTemplate +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + template: + spec: + providerID: "nutanix://eksa-unit-test-m1" + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + systemDiskSize: 40Gi + image: + type: name + name: "prism-image" + + cluster: + type: name + name: "prism-cluster" + subnet: + - type: name + name: "prism-subnet" + project: + type: name + name: "prism-project" + +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: "eksa-unit-test" + namespace: "eksa-system" +spec: + template: + spec: + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + # We have to pin the cgroupDriver to cgroupfs as kubeadm >=1.21 defaults to systemd + # kind will implement systemd support in: https://github.com/kubernetes-sigs/kind/issues/1726 + #cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + tls-cipher-suites: TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + name: '{{ ds.meta_data.hostname }}' + users: + - name: "mySshUsername" + lockPassword: false + sudo: ALL=(ALL) NOPASSWD:ALL + sshAuthorizedKeys: + - "mySshAuthorizedKey" + +--- diff --git a/pkg/providers/nutanix/testdata/machineConfig_project.yaml b/pkg/providers/nutanix/testdata/machineConfig_project.yaml new file mode 100644 index 0000000000000..2099573906dd0 --- /dev/null +++ b/pkg/providers/nutanix/testdata/machineConfig_project.yaml @@ -0,0 +1,27 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: NutanixMachineConfig +metadata: + name: eksa-unit-test + namespace: default +spec: + vcpusPerSocket: 1 + vcpuSockets: 4 + memorySize: 8Gi + image: + type: "name" + name: "prism-image" + cluster: + type: "name" + name: "prism-cluster" + subnet: + type: "name" + name: "prism-subnet" + project: + type: "name" + name: "prism-project" + systemDiskSize: 40Gi + osFamily: "ubuntu" + users: + - name: "mySshUsername" + sshAuthorizedKeys: + - "mySshAuthorizedKey" diff --git a/pkg/providers/nutanix/validator.go b/pkg/providers/nutanix/validator.go index b3f3ddd91b896..a9b71d62ed9ce 100644 --- a/pkg/providers/nutanix/validator.go +++ b/pkg/providers/nutanix/validator.go @@ -164,6 +164,12 @@ func (v *Validator) ValidateMachineConfig(ctx context.Context, config *anywherev return err } + if config.Spec.Project != nil { + if err := v.validateProjectConfig(ctx, *config.Spec.Project); err != nil { + return err + } + } + if err := v.validateImageConfig(ctx, config.Spec.Image); err != nil { return err } @@ -252,6 +258,31 @@ func (v *Validator) validateSubnetConfig(ctx context.Context, identifier anywher return nil } +func (v *Validator) validateProjectConfig(ctx context.Context, identifier anywherev1.NutanixResourceIdentifier) error { + switch identifier.Type { + case anywherev1.NutanixIdentifierName: + if identifier.Name == nil || *identifier.Name == "" { + return fmt.Errorf("missing image name") + } + projectName := *identifier.Name + if _, err := findProjectUUIDByName(ctx, v.client, projectName); err != nil { + return fmt.Errorf("failed to find image with name %q: %v", projectName, err) + } + case anywherev1.NutanixIdentifierUUID: + if identifier.UUID == nil || *identifier.UUID == "" { + return fmt.Errorf("missing image uuid") + } + projectUUID := *identifier.UUID + if _, err := v.client.GetProject(ctx, projectUUID); err != nil { + return fmt.Errorf("failed to find project with uuid %s: %v", projectUUID, err) + } + default: + return fmt.Errorf("invalid project identifier type: %s; valid types are: %q and %q", identifier.Type, anywherev1.NutanixIdentifierName, anywherev1.NutanixIdentifierUUID) + } + + return nil +} + // findSubnetUUIDByName retrieves the subnet uuid by the given subnet name. func findSubnetUUIDByName(ctx context.Context, v3Client Client, subnetName string) (*string, error) { res, err := v3Client.ListSubnet(ctx, &v3.DSMetadata{ @@ -319,6 +350,22 @@ func findImageUUIDByName(ctx context.Context, v3Client Client, imageName string) return res.Entities[0].Metadata.UUID, nil } +// findProjectUUIDByName retrieves the project uuid by the given image name. +func findProjectUUIDByName(ctx context.Context, v3Client Client, projectName string) (*string, error) { + res, err := v3Client.ListProject(ctx, &v3.DSMetadata{ + Filter: utils.StringPtr(fmt.Sprintf("name==%s", projectName)), + }) + if err != nil || len(res.Entities) == 0 { + return nil, fmt.Errorf("failed to find project by name %q: %v", projectName, err) + } + + if len(res.Entities) > 1 { + return nil, fmt.Errorf("found more than one (%v) project with name %q", len(res.Entities), projectName) + } + + return res.Entities[0].Metadata.UUID, nil +} + func (v *Validator) validateUpgradeRolloutStrategy(clusterSpec *cluster.Spec) error { if clusterSpec.Cluster.Spec.ControlPlaneConfiguration.UpgradeRolloutStrategy != nil { return fmt.Errorf("Upgrade rollout strategy customization is not supported for nutanix provider")