From bda21a00c57e8cd2055c433ac97f28ee82c33c86 Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Mon, 27 Jun 2022 11:36:21 -0500 Subject: [PATCH 01/22] Support Cloudstack multiple endpoint for preflight checks --- go.mod | 1 + internal/test/cleanup/cleanup.go | 20 +- pkg/dependencies/factory.go | 20 +- pkg/dependencies/factory_test.go | 7 + .../cloudstack_config_multiple_profiles.ini | 11 + pkg/executables/builder.go | 4 +- pkg/executables/cmk.go | 135 +- pkg/executables/cmk_test.go | 95 +- pkg/executables/config/cmk.ini | 15 +- pkg/providers/cloudstack/cloudstack.go | 62 +- pkg/providers/cloudstack/cloudstack_test.go | 18 +- pkg/providers/cloudstack/decoder/decoder.go | 71 +- .../cloudstack/decoder/decoder_test.go | 251 ++- pkg/providers/cloudstack/mocks/client.go | 30 +- .../cloudstack_config_invalid_format.ini | 5 + .../cloudstack_config_invalid_verifyssl.ini | 5 + .../cloudstack_config_missing_apikey.ini | 4 + .../cloudstack_config_missing_apiurl.ini | 4 + .../cloudstack_config_missing_secretkey.ini | 4 + .../cloudstack_config_missing_verifyssl.ini | 4 + .../cloudstack_config_multiple_profiles.ini | 11 + .../cloudstack_config_no_sections.ini | 4 + .../testdata/cloudstack_config_valid.ini | 5 + .../cluster_main_with_availability_zones.yaml | 139 ++ pkg/providers/cloudstack/validator.go | 189 +- pkg/providers/cloudstack/validator_test.go | 194 +- release/pkg/generate_spec.go | 1 - .../testdata/release-0.9-bundle-release.yaml | 1646 +++++++++++++++++ 28 files changed, 2497 insertions(+), 458 deletions(-) create mode 100644 pkg/dependencies/testdata/cloudstack_config_multiple_profiles.ini create mode 100644 pkg/providers/cloudstack/testdata/cloudstack_config_invalid_format.ini create mode 100644 pkg/providers/cloudstack/testdata/cloudstack_config_invalid_verifyssl.ini create mode 100644 pkg/providers/cloudstack/testdata/cloudstack_config_missing_apikey.ini create mode 100644 pkg/providers/cloudstack/testdata/cloudstack_config_missing_apiurl.ini create mode 100644 pkg/providers/cloudstack/testdata/cloudstack_config_missing_secretkey.ini create mode 100644 pkg/providers/cloudstack/testdata/cloudstack_config_missing_verifyssl.ini create mode 100644 pkg/providers/cloudstack/testdata/cloudstack_config_multiple_profiles.ini create mode 100644 pkg/providers/cloudstack/testdata/cloudstack_config_no_sections.ini create mode 100644 pkg/providers/cloudstack/testdata/cloudstack_config_valid.ini create mode 100644 pkg/providers/cloudstack/testdata/cluster_main_with_availability_zones.yaml create mode 100644 release/pkg/test/testdata/release-0.9-bundle-release.yaml diff --git a/go.mod b/go.mod index ed2a5edb1a6d..a4f504d65e01 100644 --- a/go.mod +++ b/go.mod @@ -19,6 +19,7 @@ require ( github.com/golang/mock v1.6.0 github.com/google/go-github/v35 v35.3.0 github.com/google/uuid v1.3.0 + github.com/hashicorp/go-multierror v1.1.1 github.com/mrajashree/etcdadm-controller v1.0.0-rc3 github.com/onsi/gomega v1.19.0 github.com/pkg/errors v0.9.1 diff --git a/internal/test/cleanup/cleanup.go b/internal/test/cleanup/cleanup.go index 4bf0b12e7229..aa999b367640 100644 --- a/internal/test/cleanup/cleanup.go +++ b/internal/test/cleanup/cleanup.go @@ -6,6 +6,7 @@ import ( "strconv" "github.com/aws/aws-sdk-go/aws/session" + "github.com/hashicorp/go-multierror" "github.com/aws/eks-anywhere/internal/pkg/ec2" "github.com/aws/eks-anywhere/internal/pkg/s3" @@ -85,7 +86,7 @@ func VsphereRmVms(ctx context.Context, clusterName string, opts ...executables.G return govc.CleanupVms(ctx, clusterName, false) } -func CleanUpCloudstackTestResources(ctx context.Context, clusterName string, dryRun bool) error { +func CleanUpCloudstackTestResources(ctx context.Context, clusterName string, dryRun bool) (retErr error) { executableBuilder, close, err := executables.NewExecutableBuilder(ctx, executables.DefaultEksaImage()) if err != nil { return fmt.Errorf("unable to initialize executables: %v", err) @@ -99,12 +100,23 @@ func CleanUpCloudstackTestResources(ctx context.Context, clusterName string, dry if err != nil { return fmt.Errorf("building cmk executable: %v", err) } - cmk := executableBuilder.BuildCmkExecutable(tmpWriter, *execConfig) - defer cmk.Close(ctx) + for _, config := range execConfig.Profiles { + cmk := executableBuilder.BuildCmkExecutable(tmpWriter, config) + if err := cleanupCloudStackVms(ctx, cmk, clusterName, dryRun); err != nil { + retErr = multierror.Append(retErr, err) + } + cmk.Close(ctx) + } + return retErr +} +func cleanupCloudStackVms(ctx context.Context, cmk *executables.Cmk, clusterName string, dryRun bool) error { if err := cmk.ValidateCloudStackConnection(ctx); err != nil { return fmt.Errorf("validating cloudstack connection with cloudmonkey: %v", err) } - return cmk.CleanupVms(ctx, clusterName, dryRun) + if err := cmk.CleanupVms(ctx, clusterName, dryRun); err != nil { + return fmt.Errorf("cleaning up VMs with cloudmonkey: %v", err) + } + return nil } diff --git a/pkg/dependencies/factory.go b/pkg/dependencies/factory.go index 49309cfc719c..be5dcdda5937 100644 --- a/pkg/dependencies/factory.go +++ b/pkg/dependencies/factory.go @@ -50,7 +50,7 @@ type Dependencies struct { DockerClient *executables.Docker Kubectl *executables.Kubectl Govc *executables.Govc - Cmk *executables.Cmk + Cmks map[string]*executables.Cmk SnowAwsClient aws.Clients SnowConfigManager *snow.ConfigManager Writer filewriter.FileWriter @@ -266,12 +266,17 @@ func (f *Factory) WithProvider(clusterConfigFile string, clusterConfig *v1alpha1 return fmt.Errorf("unable to get machine config from file %s: %v", clusterConfigFile, err) } + cmkClientMap := cloudstack.CmkClientMap{} + for name, cmk := range f.dependencies.Cmks { + cmkClientMap[name] = cmk + } + f.dependencies.Provider = cloudstack.NewProvider( datacenterConfig, machineConfigs, clusterConfig, f.dependencies.Kubectl, - f.dependencies.Cmk, + cmkClientMap, f.dependencies.Writer, time.Now, skipIpCheck, @@ -414,16 +419,21 @@ func (f *Factory) WithCmk() *Factory { f.WithExecutableBuilder().WithWriter() f.buildSteps = append(f.buildSteps, func(ctx context.Context) error { - if f.dependencies.Cmk != nil { + if f.dependencies.Cmks != nil { return nil } + f.dependencies.Cmks = map[string]*executables.Cmk{} + execConfig, err := decoder.ParseCloudStackSecret() if err != nil { return fmt.Errorf("building cmk executable: %v", err) } - f.dependencies.Cmk = f.executableBuilder.BuildCmkExecutable(f.dependencies.Writer, *execConfig) - f.dependencies.closers = append(f.dependencies.closers, f.dependencies.Cmk) + for _, profileConfig := range execConfig.Profiles { + cmk := f.executableBuilder.BuildCmkExecutable(f.dependencies.Writer, profileConfig) + f.dependencies.Cmks[profileConfig.Name] = cmk + f.dependencies.closers = append(f.dependencies.closers, cmk) + } return nil }) diff --git a/pkg/dependencies/factory_test.go b/pkg/dependencies/factory_test.go index 8385df10a729..fe12b41eb0de 100644 --- a/pkg/dependencies/factory_test.go +++ b/pkg/dependencies/factory_test.go @@ -2,6 +2,7 @@ package dependencies_test import ( "context" + "encoding/base64" "os" "testing" @@ -13,6 +14,7 @@ import ( "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/config" "github.com/aws/eks-anywhere/pkg/dependencies" + "github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder" "github.com/aws/eks-anywhere/pkg/version" "github.com/aws/eks-anywhere/release/api/v1alpha1" ) @@ -78,6 +80,10 @@ func TestFactoryBuildWithClusterManagerWithoutCliConfig(t *testing.T) { } func TestFactoryBuildWithMultipleDependencies(t *testing.T) { + configString := test.ReadFile(t, "testdata/cloudstack_config_multiple_profiles.ini") + encodedConfig := base64.StdEncoding.EncodeToString([]byte(configString)) + t.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, encodedConfig) + tt := newTest(t) deps, err := dependencies.NewFactory(). UseExecutableImage("image:1"). @@ -96,6 +102,7 @@ func TestFactoryBuildWithMultipleDependencies(t *testing.T) { WithCAPIManager(). WithManifestReader(). WithUnAuthKubeClient(). + WithCmk(). Build(context.Background()) tt.Expect(err).To(BeNil()) diff --git a/pkg/dependencies/testdata/cloudstack_config_multiple_profiles.ini b/pkg/dependencies/testdata/cloudstack_config_multiple_profiles.ini new file mode 100644 index 000000000000..ead54c30f37c --- /dev/null +++ b/pkg/dependencies/testdata/cloudstack_config_multiple_profiles.ini @@ -0,0 +1,11 @@ +[Global] +verify-ssl = false +api-key = test-key1 +secret-key = test-secret1 +api-url = http://127.16.0.1:8080/client/api + +[Instance2] +verify-ssl = true +api-key = test-key2 +secret-key = test-secret2 +api-url = http://127.16.0.2:8080/client/api diff --git a/pkg/executables/builder.go b/pkg/executables/builder.go index 5c94c66ef9b7..37bdcee06539 100644 --- a/pkg/executables/builder.go +++ b/pkg/executables/builder.go @@ -41,8 +41,8 @@ func (b *ExecutableBuilder) BuildGovcExecutable(writer filewriter.FileWriter, op return NewGovc(b.buildExecutable(govcPath), writer, opts...) } -func (b *ExecutableBuilder) BuildCmkExecutable(writer filewriter.FileWriter, execConfig decoder.CloudStackExecConfig) *Cmk { - return NewCmk(b.buildExecutable(cmkPath), writer, execConfig) +func (b *ExecutableBuilder) BuildCmkExecutable(writer filewriter.FileWriter, config decoder.CloudStackProfileConfig) *Cmk { + return NewCmk(b.buildExecutable(cmkPath), writer, config) } func (b *ExecutableBuilder) BuildAwsCli() *AwsCli { diff --git a/pkg/executables/cmk.go b/pkg/executables/cmk.go index 53cce0553f89..5940a32bd351 100644 --- a/pkg/executables/cmk.go +++ b/pkg/executables/cmk.go @@ -23,7 +23,7 @@ var cmkConfigTemplate string const ( cmkPath = "cmk" - cmkConfigFileName = "cmk_tmp.ini" + cmkConfigFileNameTemplate = "cmk_%s.ini" Shared = "Shared" defaultCloudStackPreflightTimeout = "30" rootDomain = "ROOT" @@ -34,15 +34,7 @@ const ( type Cmk struct { writer filewriter.FileWriter executable Executable - config decoder.CloudStackExecConfig -} - -type cmkExecConfig struct { - CloudStackApiKey string - CloudStackSecretKey string - CloudStackManagementUrl string - CloudMonkeyVerifyCert string - CloudMonkeyTimeout string + config decoder.CloudStackProfileConfig } func (c *Cmk) Close(ctx context.Context) error { @@ -195,39 +187,34 @@ func (c *Cmk) ValidateAffinityGroupsPresent(ctx context.Context, domainId string return nil } -func (c *Cmk) ValidateZonesPresent(ctx context.Context, zones []v1alpha1.CloudStackZone) ([]v1alpha1.CloudStackResourceIdentifier, error) { - var zoneIdentifiers []v1alpha1.CloudStackResourceIdentifier - for _, zone := range zones { - command := newCmkCommand("list zones") - if len(zone.Id) > 0 { - applyCmkArgs(&command, withCloudStackId(zone.Id)) - } else { - applyCmkArgs(&command, withCloudStackName(zone.Name)) - } - result, err := c.exec(ctx, command...) - if err != nil { - return nil, fmt.Errorf("getting zones info - %s: %v", result.String(), err) - } - if result.Len() == 0 { - return nil, fmt.Errorf("zone %s not found", zone) - } +func (c *Cmk) ValidateZonePresent(ctx context.Context, zone v1alpha1.CloudStackZone) (string, error) { + command := newCmkCommand("list zones") + if len(zone.Id) > 0 { + applyCmkArgs(&command, withCloudStackId(zone.Id)) + } else { + applyCmkArgs(&command, withCloudStackName(zone.Name)) + } + result, err := c.exec(ctx, command...) + if err != nil { + return "", fmt.Errorf("getting zones info - %s: %v", result.String(), err) + } + if result.Len() == 0 { + return "", fmt.Errorf("zone %s not found", zone) + } - response := struct { - CmkZones []cmkResourceIdentifier `json:"zone"` - }{} - if err = json.Unmarshal(result.Bytes(), &response); err != nil { - return nil, fmt.Errorf("parsing response into json: %v", err) - } - cmkZones := response.CmkZones - if len(cmkZones) > 1 { - return nil, fmt.Errorf("duplicate zone %s found", zone) - } else if len(zones) == 0 { - return nil, fmt.Errorf("zone %s not found", zone) - } else { - zoneIdentifiers = append(zoneIdentifiers, v1alpha1.CloudStackResourceIdentifier{Name: cmkZones[0].Name, Id: cmkZones[0].Id}) - } + response := struct { + CmkZones []cmkResourceIdentifier `json:"zone"` + }{} + if err = json.Unmarshal(result.Bytes(), &response); err != nil { + return "", fmt.Errorf("parsing response into json: %v", err) + } + cmkZones := response.CmkZones + if len(cmkZones) > 1 { + return "", fmt.Errorf("duplicate zone %s found", zone) + } else if len(cmkZones) == 0 { + return "", fmt.Errorf("zone %s not found", zone) } - return zoneIdentifiers, nil + return cmkZones[0].Id, nil } func (c *Cmk) ValidateDomainPresent(ctx context.Context, domain string) (v1alpha1.CloudStackResourceIdentifier, error) { @@ -273,10 +260,10 @@ func (c *Cmk) ValidateDomainPresent(ctx context.Context, domain string) (v1alpha return domainIdentifier, nil } -func (c *Cmk) ValidateNetworkPresent(ctx context.Context, domainId string, zone v1alpha1.CloudStackZone, zones []v1alpha1.CloudStackResourceIdentifier, account string, multipleZone bool) error { +func (c *Cmk) ValidateNetworkPresent(ctx context.Context, domainId string, network v1alpha1.CloudStackResourceIdentifier, zoneId string, account string, multipleZone bool) error { command := newCmkCommand("list networks") - if len(zone.Network.Id) > 0 { - applyCmkArgs(&command, withCloudStackId(zone.Network.Id)) + if len(network.Id) > 0 { + applyCmkArgs(&command, withCloudStackId(network.Id)) } if multipleZone { applyCmkArgs(&command, withCloudStackNetworkType(Shared)) @@ -289,16 +276,6 @@ func (c *Cmk) ValidateNetworkPresent(ctx context.Context, domainId string, zone applyCmkArgs(&command, withCloudStackAccount(account)) } } - var zoneId string - var err error - if len(zone.Id) > 0 { - zoneId = zone.Id - } else { - zoneId, err = getZoneIdByName(zones, zone.Name) - if err != nil { - return fmt.Errorf("getting zone id by name %s: %v", zone.Name, err) - } - } applyCmkArgs(&command, withCloudStackZoneId(zoneId)) result, err := c.exec(ctx, command...) if err != nil { @@ -306,9 +283,9 @@ func (c *Cmk) ValidateNetworkPresent(ctx context.Context, domainId string, zone } if result.Len() == 0 { if multipleZone { - return fmt.Errorf("%s network %s not found in zone %s", Shared, zone.Network, zone) + return fmt.Errorf("%s network %s not found in zone %s", Shared, network, zoneId) } else { - return fmt.Errorf("network %s not found in zone %s", zone.Network, zone) + return fmt.Errorf("network %s not found in zone %s", network, zoneId) } } @@ -324,37 +301,33 @@ func (c *Cmk) ValidateNetworkPresent(ctx context.Context, domainId string, zone // if network id and name are both provided, the following code is to confirm name matches return value retrieved by id. // if only name is provided, the following code is to only get networks with specified name. - if len(zone.Network.Name) > 0 { + if len(network.Name) > 0 { networks = []cmkResourceIdentifier{} for _, net := range response.CmkNetworks { - if net.Name == zone.Network.Name { + if net.Name == network.Name { networks = append(networks, net) } } } if len(networks) > 1 { - return fmt.Errorf("duplicate network %s found", zone.Network) + return fmt.Errorf("duplicate network %s found", network) } else if len(networks) == 0 { if multipleZone { - return fmt.Errorf("%s network %s not found in zoneRef %s", Shared, zone.Network, zone) + return fmt.Errorf("%s network %s not found in zoneRef %s", Shared, network, zoneId) } else { - return fmt.Errorf("network %s not found in zoneRef %s", zone.Network, zone) + return fmt.Errorf("network %s not found in zoneRef %s", network, zoneId) } } return nil } -func getZoneIdByName(zones []v1alpha1.CloudStackResourceIdentifier, zoneName string) (string, error) { - for _, zoneIdentifier := range zones { - if zoneName == zoneIdentifier.Name { - return zoneIdentifier.Id, nil - } +func (c *Cmk) ValidateAccountPresent(ctx context.Context, account string, domainId string) error { + // If account is not specified then no need to check its presence + if len(account) == 0 { + return nil } - return "", fmt.Errorf("zoneId not found for zone %s", zoneName) -} -func (c *Cmk) ValidateAccountPresent(ctx context.Context, account string, domainId string) error { command := newCmkCommand("list accounts") applyCmkArgs(&command, withCloudStackName(account), withCloudStackDomainId(domainId)) result, err := c.exec(ctx, command...) @@ -380,7 +353,7 @@ func (c *Cmk) ValidateAccountPresent(ctx context.Context, account string, domain return nil } -func NewCmk(executable Executable, writer filewriter.FileWriter, config decoder.CloudStackExecConfig) *Cmk { +func NewCmk(executable Executable, writer filewriter.FileWriter, config decoder.CloudStackProfileConfig) *Cmk { return &Cmk{ writer: writer, executable: executable, @@ -388,6 +361,10 @@ func NewCmk(executable Executable, writer filewriter.FileWriter, config decoder. } } +func (c *Cmk) GetManagementApiEndpoint() string { + return c.config.ManagementUrl +} + // ValidateCloudStackConnection Calls `cmk sync` to ensure that the endpoint and credentials + domain are valid func (c *Cmk) ValidateCloudStackConnection(ctx context.Context) error { command := newCmkCommand("sync") @@ -440,14 +417,15 @@ func (c *Cmk) CleanupVms(ctx context.Context, clusterName string, dryRun bool) e func (c *Cmk) exec(ctx context.Context, args ...string) (stdout bytes.Buffer, err error) { if err != nil { - return bytes.Buffer{}, fmt.Errorf("failed get environment map: %v", err) + return stdout, fmt.Errorf("failed get environment map: %v", err) } + configFile, err := c.buildCmkConfigFile() if err != nil { - return bytes.Buffer{}, fmt.Errorf("failed cmk validations: %v", err) + return stdout, fmt.Errorf("failed cmk validations: %v", err) } - argsWithConfigFile := append([]string{"-c", configFile}, args...) + argsWithConfigFile := append([]string{"-c", configFile}, args...) return c.executable.Execute(ctx, argsWithConfigFile...) } @@ -461,15 +439,8 @@ func (c *Cmk) buildCmkConfigFile() (configFile string, err error) { } cloudstackPreflightTimeout = timeout } - - cmkConfig := &cmkExecConfig{ - CloudStackApiKey: c.config.ApiKey, - CloudStackSecretKey: c.config.SecretKey, - CloudStackManagementUrl: c.config.ManagementUrl, - CloudMonkeyVerifyCert: c.config.VerifySsl, - CloudMonkeyTimeout: cloudstackPreflightTimeout, - } - writtenFileName, err := t.WriteToFile(cmkConfigTemplate, cmkConfig, cmkConfigFileName) + c.config.Timeout = cloudstackPreflightTimeout + writtenFileName, err := t.WriteToFile(cmkConfigTemplate, c.config, fmt.Sprintf(cmkConfigFileNameTemplate, c.config.Name)) if err != nil { return "", fmt.Errorf("creating file for cmk config: %v", err) } diff --git a/pkg/executables/cmk_test.go b/pkg/executables/cmk_test.go index 05eb31d73c28..21feeb79350a 100644 --- a/pkg/executables/cmk_test.go +++ b/pkg/executables/cmk_test.go @@ -19,23 +19,41 @@ import ( ) const ( - cmkConfigFileName = "cmk_tmp.ini" - accountName = "account1" - rootDomain = "ROOT" - rootDomainId = "5300cdac-74d5-11ec-8696-c81f66d3e965" - domain = "foo/domain1" - domainName = "domain1" - domainId = "7700cdac-74d5-11ec-8696-c81f66d3e965" - domain2 = "foo/bar/domain1" - domain2Name = "domain1" - domain2Id = "8800cdac-74d5-11ec-8696-c81f66d3e965" - zoneId = "4e3b338d-87a6-4189-b931-a1747edeea8f" + cmkConfigFileName = "cmk_test_name.ini" + cmkConfigFileName2 = "cmk_test_name_2.ini" + accountName = "account1" + rootDomain = "ROOT" + rootDomainId = "5300cdac-74d5-11ec-8696-c81f66d3e965" + domain = "foo/domain1" + domainName = "domain1" + domainId = "7700cdac-74d5-11ec-8696-c81f66d3e965" + domain2 = "foo/bar/domain1" + domain2Name = "domain1" + domain2Id = "8800cdac-74d5-11ec-8696-c81f66d3e965" + zoneId = "4e3b338d-87a6-4189-b931-a1747edeea8f" ) var execConfig = decoder.CloudStackExecConfig{ - ApiKey: "test", - SecretKey: "test", - ManagementUrl: "http://1.1.1.1:8080/client/api", + Profiles: []decoder.CloudStackProfileConfig{ + { + Name: "test_name", + ApiKey: "test", + SecretKey: "test", + ManagementUrl: "http://1.1.1.1:8080/client/api", + }, + }, +} + +var execConfigWithMultipleProfiles = decoder.CloudStackExecConfig{ + Profiles: []decoder.CloudStackProfileConfig{ + execConfig.Profiles[0], + { + Name: "test_name_2", + ApiKey: "test_2", + SecretKey: "test_2", + ManagementUrl: "http://1.1.1.1:8080/client/api_2", + }, + }, } var zones = []v1alpha1.CloudStackZone{ @@ -93,11 +111,36 @@ func TestValidateCloudStackConnectionSuccess(t *testing.T) { configFilePath, _ := filepath.Abs(filepath.Join(writer.Dir(), "generated", cmkConfigFileName)) expectedArgs := []string{"-c", configFilePath, "sync"} executable.EXPECT().Execute(ctx, expectedArgs).Return(bytes.Buffer{}, nil) - c := executables.NewCmk(executable, writer, execConfig) + c := executables.NewCmk(executable, writer, execConfig.Profiles[0]) + err := c.ValidateCloudStackConnection(ctx) + if err != nil { + t.Fatalf("Cmk.ValidateCloudStackConnection() error = %v, want nil", err) + } +} + +func TestValidateMultipleCloudStackProfiles(t *testing.T) { + _, writer := test.NewWriter(t) + ctx := context.Background() + mockCtrl := gomock.NewController(t) + + executable := mockexecutables.NewMockExecutable(mockCtrl) + configFilePath, _ := filepath.Abs(filepath.Join(writer.Dir(), "generated", cmkConfigFileName)) + expectedArgs := []string{"-c", configFilePath, "sync"} + executable.EXPECT().Execute(ctx, expectedArgs).Return(bytes.Buffer{}, nil) + configFilePath2, _ := filepath.Abs(filepath.Join(writer.Dir(), "generated", cmkConfigFileName2)) + expectedArgs2 := []string{"-c", configFilePath2, "sync"} + executable.EXPECT().Execute(ctx, expectedArgs2).Return(bytes.Buffer{}, nil) + + c := executables.NewCmk(executable, writer, execConfigWithMultipleProfiles.Profiles[0]) err := c.ValidateCloudStackConnection(ctx) if err != nil { t.Fatalf("Cmk.ValidateCloudStackConnection() error = %v, want nil", err) } + c = executables.NewCmk(executable, writer, execConfigWithMultipleProfiles.Profiles[1]) + err = c.ValidateCloudStackConnection(ctx) + if err != nil { + t.Fatalf("Cmk.ValidateCloudStackConnection() error = %v, want nil", err) + } } func TestValidateCloudStackConnectionError(t *testing.T) { @@ -109,7 +152,7 @@ func TestValidateCloudStackConnectionError(t *testing.T) { configFilePath, _ := filepath.Abs(filepath.Join(writer.Dir(), "generated", cmkConfigFileName)) expectedArgs := []string{"-c", configFilePath, "sync"} executable.EXPECT().Execute(ctx, expectedArgs).Return(bytes.Buffer{}, errors.New("cmk test error")) - c := executables.NewCmk(executable, writer, execConfig) + c := executables.NewCmk(executable, writer, execConfig.Profiles[0]) err := c.ValidateCloudStackConnection(ctx) if err == nil { t.Fatalf("Cmk.ValidateCloudStackConnection() didn't throw expected error") @@ -223,7 +266,7 @@ func TestCmkCleanupVms(t *testing.T) { executable.EXPECT().Execute(ctx, argsList). Return(*bytes.NewBufferString(fileContent), tt.cmkResponseError) } - cmk := executables.NewCmk(executable, writer, execConfig) + cmk := executables.NewCmk(executable, writer, execConfig.Profiles[0]) err := tt.cmkFunc(*cmk, ctx) if tt.wantErr && err != nil || !tt.wantErr && err == nil { return @@ -404,7 +447,7 @@ func TestCmkListOperations(t *testing.T) { "list", "zones", fmt.Sprintf("name=\"%s\"", resourceName.Name), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateZonesPresent(ctx, []v1alpha1.CloudStackZone{zones[0]}) + _, err := cmk.ValidateZonePresent(ctx, zones[0]) return err }, cmkResponseError: nil, @@ -420,7 +463,7 @@ func TestCmkListOperations(t *testing.T) { "list", "zones", fmt.Sprintf("id=\"%s\"", resourceId.Id), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateZonesPresent(ctx, []v1alpha1.CloudStackZone{zones[2]}) + _, err := cmk.ValidateZonePresent(ctx, zones[2]) return err }, cmkResponseError: nil, @@ -436,7 +479,7 @@ func TestCmkListOperations(t *testing.T) { "list", "zones", fmt.Sprintf("name=\"%s\"", resourceName.Name), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateZonesPresent(ctx, zones) + _, err := cmk.ValidateZonePresent(ctx, zones[0]) return err }, cmkResponseError: nil, @@ -452,7 +495,7 @@ func TestCmkListOperations(t *testing.T) { "list", "zones", fmt.Sprintf("name=\"%s\"", resourceName.Name), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateZonesPresent(ctx, zones) + _, err := cmk.ValidateZonePresent(ctx, zones[0]) return err }, cmkResponseError: nil, @@ -468,7 +511,7 @@ func TestCmkListOperations(t *testing.T) { "list", "networks", fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), fmt.Sprintf("zoneid=\"%s\"", "TEST_RESOURCE"), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateNetworkPresent(ctx, domainId, zones[2], []v1alpha1.CloudStackResourceIdentifier{}, accountName, false) + return cmk.ValidateNetworkPresent(ctx, domainId, zones[2].Network, zones[2].Id, accountName, false) }, cmkResponseError: nil, wantErr: false, @@ -483,7 +526,7 @@ func TestCmkListOperations(t *testing.T) { "list", "networks", fmt.Sprintf("id=\"%s\"", resourceId.Id), fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), fmt.Sprintf("zoneid=\"%s\"", "TEST_RESOURCE"), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateNetworkPresent(ctx, domainId, zones[3], []v1alpha1.CloudStackResourceIdentifier{}, accountName, false) + return cmk.ValidateNetworkPresent(ctx, domainId, zones[3].Network, zones[3].Id, accountName, false) }, cmkResponseError: nil, wantErr: false, @@ -498,7 +541,7 @@ func TestCmkListOperations(t *testing.T) { "list", "networks", fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), fmt.Sprintf("zoneid=\"%s\"", "TEST_RESOURCE"), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateNetworkPresent(ctx, domainId, zones[2], []v1alpha1.CloudStackResourceIdentifier{}, accountName, false) + return cmk.ValidateNetworkPresent(ctx, domainId, zones[2].Network, zones[2].Id, accountName, false) }, cmkResponseError: nil, wantErr: true, @@ -513,7 +556,7 @@ func TestCmkListOperations(t *testing.T) { "list", "networks", fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), fmt.Sprintf("zoneid=\"%s\"", "TEST_RESOURCE"), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateNetworkPresent(ctx, domainId, zones[2], []v1alpha1.CloudStackResourceIdentifier{}, accountName, false) + return cmk.ValidateNetworkPresent(ctx, domainId, zones[2].Network, zones[2].Id, accountName, false) }, cmkResponseError: nil, wantErr: true, @@ -851,7 +894,7 @@ func TestCmkListOperations(t *testing.T) { executable := mockexecutables.NewMockExecutable(mockCtrl) executable.EXPECT().Execute(ctx, tt.argumentsExecCall). Return(*bytes.NewBufferString(fileContent), tt.cmkResponseError) - cmk := executables.NewCmk(executable, writer, execConfig) + cmk := executables.NewCmk(executable, writer, execConfig.Profiles[0]) err := tt.cmkFunc(*cmk, ctx) if tt.wantErr && err != nil || !tt.wantErr && err == nil { return diff --git a/pkg/executables/config/cmk.ini b/pkg/executables/config/cmk.ini index ce1dc8326209..86fe4af55cfa 100644 --- a/pkg/executables/config/cmk.ini +++ b/pkg/executables/config/cmk.ini @@ -1,13 +1,12 @@ prompt = CMK asyncblock = true -timeout = {{ .CloudMonkeyTimeout }} +timeout = {{ .Timeout }} output = json -verifycert = {{ .CloudMonkeyVerifyCert }} -profile = cluster +verifycert = {{ .VerifySsl }} +profile = {{ .Name }} autocomplete = true - -[cluster] -url = {{ .CloudStackManagementUrl }} -apikey = {{ .CloudStackApiKey }} -secretkey = {{ .CloudStackSecretKey }} +[{{ .Name }}] +url = {{ .ManagementUrl }} +apikey = {{ .ApiKey }} +secretkey = {{ .SecretKey }} \ No newline at end of file diff --git a/pkg/providers/cloudstack/cloudstack.go b/pkg/providers/cloudstack/cloudstack.go index 8dcb12c7d6bb..1288483856cc 100644 --- a/pkg/providers/cloudstack/cloudstack.go +++ b/pkg/providers/cloudstack/cloudstack.go @@ -201,20 +201,20 @@ type ProviderKubectlClient interface { SetEksaControllerEnvVar(ctx context.Context, envVar, envVarVal, kubeconfig string) error } -func NewProvider(datacenterConfig *v1alpha1.CloudStackDatacenterConfig, machineConfigs map[string]*v1alpha1.CloudStackMachineConfig, clusterConfig *v1alpha1.Cluster, providerKubectlClient ProviderKubectlClient, providerCmkClient ProviderCmkClient, writer filewriter.FileWriter, now types.NowFunc, skipIpCheck bool) *cloudstackProvider { +func NewProvider(datacenterConfig *v1alpha1.CloudStackDatacenterConfig, machineConfigs map[string]*v1alpha1.CloudStackMachineConfig, clusterConfig *v1alpha1.Cluster, providerKubectlClient ProviderKubectlClient, providerCmkClients CmkClientMap, writer filewriter.FileWriter, now types.NowFunc, skipIpCheck bool) *cloudstackProvider { return NewProviderCustomNet( datacenterConfig, machineConfigs, clusterConfig, providerKubectlClient, - providerCmkClient, + providerCmkClients, writer, now, skipIpCheck, ) } -func NewProviderCustomNet(datacenterConfig *v1alpha1.CloudStackDatacenterConfig, machineConfigs map[string]*v1alpha1.CloudStackMachineConfig, clusterConfig *v1alpha1.Cluster, providerKubectlClient ProviderKubectlClient, providerCmkClient ProviderCmkClient, writer filewriter.FileWriter, now types.NowFunc, skipIpCheck bool) *cloudstackProvider { +func NewProviderCustomNet(datacenterConfig *v1alpha1.CloudStackDatacenterConfig, machineConfigs map[string]*v1alpha1.CloudStackMachineConfig, clusterConfig *v1alpha1.Cluster, providerKubectlClient ProviderKubectlClient, providerCmkClients CmkClientMap, writer filewriter.FileWriter, now types.NowFunc, skipIpCheck bool) *cloudstackProvider { var controlPlaneMachineSpec, etcdMachineSpec *v1alpha1.CloudStackMachineConfigSpec workerNodeGroupMachineSpecs := make(map[string]v1alpha1.CloudStackMachineConfigSpec, len(machineConfigs)) if clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef != nil && machineConfigs[clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef.Name] != nil { @@ -240,7 +240,7 @@ func NewProviderCustomNet(datacenterConfig *v1alpha1.CloudStackDatacenterConfig, now: now, }, skipIpCheck: skipIpCheck, - validator: NewValidator(providerCmkClient), + validator: NewValidator(providerCmkClients), } } @@ -361,12 +361,17 @@ func (p *cloudstackProvider) validateEnv(ctx context.Context) error { if err != nil { return fmt.Errorf("failed to parse environment variable exec config: %v", err) } - if len(execConfig.ManagementUrl) <= 0 { - return errors.New("cloudstack management api url is not set or is empty") + if len(execConfig.Profiles) <= 0 { + return errors.New("cloudstack instances are not defined") } - if err := p.validateManagementApiEndpoint(execConfig.ManagementUrl); err != nil { - return errors.New("CloudStackDatacenterConfig managementApiEndpoint is invalid") + + for _, instance := range execConfig.Profiles { + if err := p.validateManagementApiEndpoint(instance.ManagementUrl); err != nil { + return fmt.Errorf("CloudStack instance %s's managementApiEndpoint %s is invalid", + instance.Name, instance.ManagementUrl) + } } + if _, ok := os.LookupEnv(eksaLicense); !ok { if err := os.Setenv(eksaLicense, ""); err != nil { return fmt.Errorf("unable to set %s: %v", eksaLicense, err) @@ -375,23 +380,27 @@ func (p *cloudstackProvider) validateEnv(ctx context.Context) error { return nil } -func (p *cloudstackProvider) SetupAndValidateCreateCluster(ctx context.Context, clusterSpec *cluster.Spec) error { - err := p.validateEnv(ctx) - if err != nil { - return fmt.Errorf("failed setup and validations: %v", err) - } - - cloudStackClusterSpec := NewSpec(clusterSpec, p.machineConfigs, p.datacenterConfig) - - if err := p.validator.validateCloudStackAccess(ctx); err != nil { +func (p *cloudstackProvider) validateClusterSpec(ctx context.Context, clusterSpec *cluster.Spec) (err error) { + if err := p.validator.validateCloudStackAccess(ctx, p.datacenterConfig); err != nil { return err } if err := p.validator.ValidateCloudStackDatacenterConfig(ctx, p.datacenterConfig); err != nil { return err } - if err := p.validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec); err != nil { + if err := p.validator.ValidateClusterMachineConfigs(ctx, NewSpec(clusterSpec, p.machineConfigs, p.datacenterConfig)); err != nil { return err } + return nil +} + +func (p *cloudstackProvider) SetupAndValidateCreateCluster(ctx context.Context, clusterSpec *cluster.Spec) error { + if err := p.validateEnv(ctx); err != nil { + return fmt.Errorf("failed setup and validations: %v", err) + } + + if err := p.validateClusterSpec(ctx, clusterSpec); err != nil { + return fmt.Errorf("failed cluster spec validation: %v", err) + } if err := p.setupSSHAuthKeysForCreate(); err != nil { return fmt.Errorf("failed setup and validations: %v", err) @@ -424,28 +433,19 @@ func (p *cloudstackProvider) SetupAndValidateCreateCluster(ctx context.Context, } func (p *cloudstackProvider) SetupAndValidateUpgradeCluster(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error { - err := p.validateEnv(ctx) - if err != nil { + if err := p.validateEnv(ctx); err != nil { return fmt.Errorf("failed setup and validations: %v", err) } - cloudStackClusterSpec := NewSpec(clusterSpec, p.machineConfigs, p.datacenterConfig) - if err := p.validator.validateCloudStackAccess(ctx); err != nil { - return err - } - if err := p.validator.ValidateCloudStackDatacenterConfig(ctx, p.datacenterConfig); err != nil { - return err - } - if err := p.validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec); err != nil { - return err + if err := p.validateClusterSpec(ctx, clusterSpec); err != nil { + return fmt.Errorf("failed cluster spec validation: %v", err) } if err := p.setupSSHAuthKeysForUpgrade(); err != nil { return fmt.Errorf("failed setup and validations: %v", err) } - err = p.validateMachineConfigsNameUniqueness(ctx, cluster, clusterSpec) - if err != nil { + if err := p.validateMachineConfigsNameUniqueness(ctx, cluster, clusterSpec); err != nil { return fmt.Errorf("failed validate machineconfig uniqueness: %v", err) } return nil diff --git a/pkg/providers/cloudstack/cloudstack_test.go b/pkg/providers/cloudstack/cloudstack_test.go index e51875e904f5..2956d054a288 100644 --- a/pkg/providers/cloudstack/cloudstack_test.go +++ b/pkg/providers/cloudstack/cloudstack_test.go @@ -33,12 +33,12 @@ const ( /* Generated from ini file (like the following) then b64 encoded: `cat fake-cloud-config.ini | base64 | tr -d '\n'` [Global] - api-key = test-key - secret-key = test-secret - api-url = http://127.16.0.1:8080/client/api - verify-ssl = true + verify-ssl = false + api-key = test-key1 + secret-key = test-secret1 + api-url = http://127.16.0.1:8080/client/api */ - expectedCloudStackCloudConfig = "W0dsb2JhbF0KYXBpLWtleSAgICA9IHRlc3Qta2V5CnNlY3JldC1rZXkgPSB0ZXN0LXNlY3JldAphcGktdXJsICAgID0gaHR0cDovLzEyNy4xNi4wLjE6ODA4MC9jbGllbnQvYXBpCnZlcmlmeS1zc2wgPSB0cnVlCg==" + expectedCloudStackCloudConfig = "W0dsb2JhbF0KdmVyaWZ5LXNzbCA9IGZhbHNlCmFwaS1rZXkgPSB0ZXN0LWtleTEKc2VjcmV0LWtleSA9IHRlc3Qtc2VjcmV0MQphcGktdXJsID0gaHR0cDovLzEyNy4xNi4wLjE6ODA4MC9jbGllbnQvYXBpCg==" ) func givenClusterConfig(t *testing.T, fileName string) *v1alpha1.Cluster { @@ -62,12 +62,13 @@ func givenWildcardCmk(mockCtrl *gomock.Controller) ProviderCmkClient { cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateZonesPresent(gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateZonePresent(gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateCloudStackConnection(gomock.Any()).AnyTimes() cmk.EXPECT().ValidateDomainPresent(gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateAccountPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateNetworkPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().GetManagementApiEndpoint().AnyTimes().Return("http://127.16.0.1:8080/client/api") return cmk } @@ -193,12 +194,15 @@ func newProviderWithKubectl(t *testing.T, datacenterConfig *v1alpha1.CloudStackD func newProvider(t *testing.T, datacenterConfig *v1alpha1.CloudStackDatacenterConfig, machineConfigs map[string]*v1alpha1.CloudStackMachineConfig, clusterConfig *v1alpha1.Cluster, kubectl ProviderKubectlClient, cmk ProviderCmkClient) *cloudstackProvider { _, writer := test.NewWriter(t) + cmks := CmkClientMap{} + cmks["Global"] = cmk + return NewProviderCustomNet( datacenterConfig, machineConfigs, clusterConfig, kubectl, - cmk, + cmks, writer, test.FakeNow, false, diff --git a/pkg/providers/cloudstack/decoder/decoder.go b/pkg/providers/cloudstack/decoder/decoder.go index 4bcab1c6c21b..f6bbd6969afd 100644 --- a/pkg/providers/cloudstack/decoder/decoder.go +++ b/pkg/providers/cloudstack/decoder/decoder.go @@ -13,6 +13,7 @@ const ( EksacloudStackCloudConfigB64SecretKey = "EKSA_CLOUDSTACK_B64ENCODED_SECRET" CloudStackCloudConfigB64SecretKey = "CLOUDSTACK_B64ENCODED_SECRET" EksaCloudStackHostPathToMount = "EKSA_CLOUDSTACK_HOST_PATHS_TO_MOUNT" + CloudStackGlobalAZ = "Global" ) // ParseCloudStackSecret parses the input b64 string into the ini object to extract out the api key, secret key, and url @@ -29,39 +30,57 @@ func ParseCloudStackSecret() (*CloudStackExecConfig, error) { if err != nil { return nil, fmt.Errorf("failed to extract values from %s with ini: %v", EksacloudStackCloudConfigB64SecretKey, err) } - section, err := cfg.GetSection("Global") - if err != nil { - return nil, fmt.Errorf("failed to extract section 'Global' from %s: %v", EksacloudStackCloudConfigB64SecretKey, err) - } - apiKey, err := section.GetKey("api-key") - if err != nil { - return nil, fmt.Errorf("failed to extract value of 'api-key' from %s: %v", EksacloudStackCloudConfigB64SecretKey, err) - } - secretKey, err := section.GetKey("secret-key") - if err != nil { - return nil, fmt.Errorf("failed to extract value of 'secret-key' from %s: %v", EksacloudStackCloudConfigB64SecretKey, err) - } - apiUrl, err := section.GetKey("api-url") - if err != nil { - return nil, fmt.Errorf("failed to extract value of 'api-url' from %s: %v", EksacloudStackCloudConfigB64SecretKey, err) - } - verifySsl, err := section.GetKey("verify-ssl") - verifySslValue := "true" - if err == nil { - verifySslValue = verifySsl.Value() - if _, err := strconv.ParseBool(verifySslValue); err != nil { - return nil, fmt.Errorf("'verify-ssl' has invalid boolean string %s: %v", verifySslValue, err) + + cloudstackProfiles := []CloudStackProfileConfig{} + sections := cfg.Sections() + for _, section := range sections { + if section.Name() == "DEFAULT" { + continue } + + apiKey, err := section.GetKey("api-key") + if err != nil { + return nil, fmt.Errorf("failed to extract value of 'api-key' from %s: %v", section.Name(), err) + } + secretKey, err := section.GetKey("secret-key") + if err != nil { + return nil, fmt.Errorf("failed to extract value of 'secret-key' from %s: %v", EksacloudStackCloudConfigB64SecretKey, err) + } + apiUrl, err := section.GetKey("api-url") + if err != nil { + return nil, fmt.Errorf("failed to extract value of 'api-url' from %s: %v", EksacloudStackCloudConfigB64SecretKey, err) + } + verifySslValue := "true" + if verifySsl, err := section.GetKey("verify-ssl"); err == nil { + verifySslValue = verifySsl.Value() + if _, err := strconv.ParseBool(verifySslValue); err != nil { + return nil, fmt.Errorf("'verify-ssl' has invalid boolean string %s: %v", verifySslValue, err) + } + } + cloudstackProfiles = append(cloudstackProfiles, CloudStackProfileConfig{ + Name: section.Name(), + ApiKey: apiKey.Value(), + SecretKey: secretKey.Value(), + ManagementUrl: apiUrl.Value(), + VerifySsl: verifySslValue, + }) } + + if len(cloudstackProfiles) == 0 { + return nil, fmt.Errorf("no instance found from %s", EksacloudStackCloudConfigB64SecretKey) + } + return &CloudStackExecConfig{ - ApiKey: apiKey.Value(), - SecretKey: secretKey.Value(), - ManagementUrl: apiUrl.Value(), - VerifySsl: verifySslValue, + Profiles: cloudstackProfiles, }, nil } type CloudStackExecConfig struct { + Profiles []CloudStackProfileConfig +} + +type CloudStackProfileConfig struct { + Name string ApiKey string SecretKey string ManagementUrl string diff --git a/pkg/providers/cloudstack/decoder/decoder_test.go b/pkg/providers/cloudstack/decoder/decoder_test.go index b57945be23d1..2dca15dc66d1 100644 --- a/pkg/providers/cloudstack/decoder/decoder_test.go +++ b/pkg/providers/cloudstack/decoder/decoder_test.go @@ -2,31 +2,17 @@ package decoder_test import ( _ "embed" + "encoding/base64" "os" + "reflect" "testing" . "github.com/onsi/gomega" + "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder" ) -const ( - apiKey = "test-key" - secretKey = "test-secret" - apiUrl = "http://127.16.0.1:8080/client/api" - verifySsl = "false" - defaultVerifySsl = "true" - validCloudStackCloudConfig = "W0dsb2JhbF0KYXBpLWtleSA9IHRlc3Qta2V5CnNlY3JldC1rZXkgPSB0ZXN0LXNlY3JldAphcGktdXJsID0gaHR0cDovLzEyNy4xNi4wLjE6ODA4MC9jbGllbnQvYXBpCnZlcmlmeS1zc2wgPSBmYWxzZQo=" - missingApiKey = "W0dsb2JhbF0Kc2VjcmV0LWtleSA9IHRlc3Qtc2VjcmV0CmFwaS11cmwgPSBodHRwOi8vMTI3LjE2LjAuMTo4MDgwL2NsaWVudC9hcGkKdmVyaWZ5LXNzbCA9IGZhbHNlCg==" - missingSecretKey = "W0dsb2JhbF0KYXBpLWtleSA9IHRlc3Qta2V5CmFwaS11cmwgPSBodHRwOi8vMTI3LjE2LjAuMTo4MDgwL2NsaWVudC9hcGkKdmVyaWZ5LXNzbCA9IGZhbHNlCg==" - missingApiUrl = "W0dsb2JhbF0KYXBpLWtleSA9IHRlc3Qta2V5CnNlY3JldC1rZXkgPSB0ZXN0LXNlY3JldAp2ZXJpZnktc3NsID0gZmFsc2UK" - missingVerifySsl = "W0dsb2JhbF0KYXBpLWtleSA9IHRlc3Qta2V5CnNlY3JldC1rZXkgPSB0ZXN0LXNlY3JldAphcGktdXJsID0gaHR0cDovLzEyNy4xNi4wLjE6ODA4MC9jbGllbnQvYXBpCg==" - invalidVerifySslValue = "W0dsb2JhbF0KYXBpLWtleSA9IHRlc3Qta2V5CnNlY3JldC1rZXkgPSB0ZXN0LXNlY3JldAphcGktdXJsID0gaHR0cDovLzEyNy4xNi4wLjE6ODA4MC9jbGllbnQvYXBpCnZlcmlmeS1zc2wgPSBUVFRUVAo=" - missingGlobalSection = "YXBpLWtleSA9IHRlc3Qta2V5CnNlY3JldC1rZXkgPSB0ZXN0LXNlY3JldAphcGktdXJsID0gaHR0cDovLzEyNy4xNi4wLjE6ODA4MC9jbGllbnQvYXBpCnZlcmlmeS1zc2wgPSBmYWxzZQo=" - invalidINI = "W0dsb2JhbF0KYXBpLWtleSA7IHRlc3Qta2V5CnNlY3JldC1rZXkgOyB0ZXN0LXNlY3JldAphcGktdXJsIDsgaHR0cDovLzEyNy4xNi4wLjE6ODA4MC9jbGllbnQvYXBpCnZlcmlmeS1zc2wgOyBmYWxzZQo=" - invalidEncoding = "=====W0dsb2JhbF0KYXBpLWtleSA7IHRlc3Qta2V5CnNlY3JldC1rZXkgOyB0ZXN0LXNlY3JldAphcGktdXJsIDsgaHR0cDovLzEyNy4xNi4wLjE6ODA4MC9jbGllbnQvYXBpCnZlcmlmeS1zc2wgOyBmYWxzZQo======" -) - type testContext struct { oldCloudStackCloudConfigSecret string isCloudStackCloudConfigSecretSet bool @@ -42,130 +28,139 @@ func (tctx *testContext) restoreContext() { } } -func TestValidConfigShouldSucceedtoParse(t *testing.T) { - var tctx testContext - tctx.backupContext() - - g := NewWithT(t) - os.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, validCloudStackCloudConfig) - execConfig, err := decoder.ParseCloudStackSecret() - g.Expect(err).To(BeNil(), "An error occurred when parsing a valid secret") - g.Expect(execConfig.ApiKey).To(Equal(apiKey)) - g.Expect(execConfig.SecretKey).To(Equal(secretKey)) - g.Expect(execConfig.ManagementUrl).To(Equal(apiUrl)) - g.Expect(execConfig.VerifySsl).To(Equal(verifySsl)) - - tctx.restoreContext() -} - -func TestMissingApiKeyShouldFailToParse(t *testing.T) { - var tctx testContext - tctx.backupContext() - - g := NewWithT(t) - os.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, missingApiKey) - _, err := decoder.ParseCloudStackSecret() - g.Expect(err).ToNot(BeNil()) - - tctx.restoreContext() -} - -func TestMissingSecretKeyShouldFailToParse(t *testing.T) { - var tctx testContext - tctx.backupContext() - - g := NewWithT(t) - os.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, missingSecretKey) - _, err := decoder.ParseCloudStackSecret() - g.Expect(err).ToNot(BeNil()) - - tctx.restoreContext() -} - -func TestMissingApiUrlShouldFailToParse(t *testing.T) { - var tctx testContext - tctx.backupContext() - - g := NewWithT(t) - os.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, missingApiUrl) - _, err := decoder.ParseCloudStackSecret() - g.Expect(err).ToNot(BeNil()) - - tctx.restoreContext() -} - -func TestMissingVerifySslShouldSetDefaultValue(t *testing.T) { - var tctx testContext - tctx.backupContext() - - g := NewWithT(t) - os.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, missingVerifySsl) - execConfig, err := decoder.ParseCloudStackSecret() - g.Expect(err).To(BeNil(), "An error occurred when parsing a valid secret") - g.Expect(execConfig.ApiKey).To(Equal(apiKey)) - g.Expect(execConfig.SecretKey).To(Equal(secretKey)) - g.Expect(execConfig.ManagementUrl).To(Equal(apiUrl)) - g.Expect(execConfig.VerifySsl).To(Equal(defaultVerifySsl)) +func TestCloudStackConfigDecoder(t *testing.T) { + tests := []struct { + name string + configFile string + wantErr bool + wantConfig *decoder.CloudStackExecConfig + }{ + { + name: "Valid config", + configFile: "../testdata/cloudstack_config_valid.ini", + wantErr: false, + wantConfig: &decoder.CloudStackExecConfig{ + Profiles: []decoder.CloudStackProfileConfig{ + { + Name: decoder.CloudStackGlobalAZ, + ApiKey: "test-key1", + SecretKey: "test-secret1", + ManagementUrl: "http://127.16.0.1:8080/client/api", + VerifySsl: "false", + Timeout: "", + }, + }, + }, + }, + { + name: "Multiple profiles config", + configFile: "../testdata/cloudstack_config_multiple_profiles.ini", + wantErr: false, + wantConfig: &decoder.CloudStackExecConfig{ + Profiles: []decoder.CloudStackProfileConfig{ + { + Name: decoder.CloudStackGlobalAZ, + ApiKey: "test-key1", + SecretKey: "test-secret1", + ManagementUrl: "http://127.16.0.1:8080/client/api", + VerifySsl: "false", + }, + { + Name: "Instance2", + ApiKey: "test-key2", + SecretKey: "test-secret2", + ManagementUrl: "http://127.16.0.2:8080/client/api", + VerifySsl: "true", + Timeout: "", + }, + }, + }, + }, + { + name: "Missing apikey", + configFile: "../testdata/cloudstack_config_missing_apikey.ini", + wantErr: true, + }, + { + name: "Missing secretkey", + configFile: "../testdata/cloudstack_config_missing_secretkey.ini", + wantErr: true, + }, + { + name: "Missing apiurl", + configFile: "../testdata/cloudstack_config_missing_apiurl.ini", + wantErr: true, + }, + { + name: "Missing verifyssl", + configFile: "../testdata/cloudstack_config_missing_verifyssl.ini", + wantErr: false, + wantConfig: &decoder.CloudStackExecConfig{ + Profiles: []decoder.CloudStackProfileConfig{ + { + Name: decoder.CloudStackGlobalAZ, + ApiKey: "test-key1", + SecretKey: "test-secret1", + ManagementUrl: "http://127.16.0.1:8080/client/api", + VerifySsl: "true", + Timeout: "", + }, + }, + }, + }, + { + name: "Invalid INI format", + configFile: "../testdata/cloudstack_config_invalid_format.ini", + wantErr: true, + }, + { + name: "Invalid veryfyssl value", + configFile: "../testdata/cloudstack_config_invalid_verifyssl.ini", + wantErr: true, + }, + { + name: "No sections", + configFile: "../testdata/cloudstack_config_no_sections.ini", + wantErr: true, + }, + } - tctx.restoreContext() + for _, tc := range tests { + t.Run(tc.name, func(tt *testing.T) { + g := NewWithT(t) + configString := test.ReadFile(t, tc.configFile) + encodedConfig := base64.StdEncoding.EncodeToString([]byte(configString)) + tt.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, encodedConfig) + + gotConfig, err := decoder.ParseCloudStackSecret() + if tc.wantErr { + g.Expect(err).NotTo(BeNil()) + } else { + g.Expect(err).To(BeNil()) + if !reflect.DeepEqual(tc.wantConfig, gotConfig) { + t.Errorf("%v got = %v, want %v", tc.name, gotConfig, tc.wantConfig) + } + } + }) + } } -func TestInvalidVerifySslShouldFailToParse(t *testing.T) { - var tctx testContext - tctx.backupContext() - +func TestCloudStackConfigDecoderInvalidEncoding(t *testing.T) { g := NewWithT(t) - os.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, invalidVerifySslValue) - _, err := decoder.ParseCloudStackSecret() - g.Expect(err).ToNot(BeNil()) + t.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, "xxx") - tctx.restoreContext() -} - -func TestMissingGlobalSectionShouldFailToParse(t *testing.T) { - var tctx testContext - tctx.backupContext() - - g := NewWithT(t) - os.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, missingGlobalSection) _, err := decoder.ParseCloudStackSecret() - g.Expect(err).ToNot(BeNil()) - - tctx.restoreContext() + g.Expect(err).NotTo(BeNil()) } -func TestInvalidINIShouldFailToParse(t *testing.T) { +func TestCloudStackConfigDecoderNoEnvVariable(t *testing.T) { var tctx testContext tctx.backupContext() + os.Clearenv() g := NewWithT(t) - os.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, invalidINI) - _, err := decoder.ParseCloudStackSecret() - g.Expect(err).ToNot(BeNil()) - - tctx.restoreContext() -} - -func TestMissingEnvVariableShouldFailToParse(t *testing.T) { - var tctx testContext - tctx.backupContext() - - g := NewWithT(t) - os.Unsetenv(decoder.EksacloudStackCloudConfigB64SecretKey) - _, err := decoder.ParseCloudStackSecret() - g.Expect(err).ToNot(BeNil()) - tctx.restoreContext() -} - -func TestInvalidEncodingShouldFailToParse(t *testing.T) { - var tctx testContext - tctx.backupContext() - - g := NewWithT(t) - os.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, invalidEncoding) _, err := decoder.ParseCloudStackSecret() - g.Expect(err).ToNot(BeNil()) - + g.Expect(err).NotTo(BeNil()) tctx.restoreContext() } diff --git a/pkg/providers/cloudstack/mocks/client.go b/pkg/providers/cloudstack/mocks/client.go index afd957431e58..e95dc89d4f53 100644 --- a/pkg/providers/cloudstack/mocks/client.go +++ b/pkg/providers/cloudstack/mocks/client.go @@ -41,6 +41,20 @@ func (m *MockProviderCmkClient) EXPECT() *MockProviderCmkClientMockRecorder { return m.recorder } +// GetManagementApiEndpoint mocks base method. +func (m *MockProviderCmkClient) GetManagementApiEndpoint() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetManagementApiEndpoint") + ret0, _ := ret[0].(string) + return ret0 +} + +// GetManagementApiEndpoint indicates an expected call of GetManagementApiEndpoint. +func (mr *MockProviderCmkClientMockRecorder) GetManagementApiEndpoint() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetManagementApiEndpoint", reflect.TypeOf((*MockProviderCmkClient)(nil).GetManagementApiEndpoint)) +} + // ValidateAccountPresent mocks base method. func (m *MockProviderCmkClient) ValidateAccountPresent(arg0 context.Context, arg1, arg2 string) error { m.ctrl.T.Helper() @@ -113,7 +127,7 @@ func (mr *MockProviderCmkClientMockRecorder) ValidateDomainPresent(arg0, arg1 in } // ValidateNetworkPresent mocks base method. -func (m *MockProviderCmkClient) ValidateNetworkPresent(arg0 context.Context, arg1 string, arg2 v1alpha1.CloudStackZone, arg3 []v1alpha1.CloudStackResourceIdentifier, arg4 string, arg5 bool) error { +func (m *MockProviderCmkClient) ValidateNetworkPresent(arg0 context.Context, arg1 string, arg2 v1alpha1.CloudStackResourceIdentifier, arg3, arg4 string, arg5 bool) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ValidateNetworkPresent", arg0, arg1, arg2, arg3, arg4, arg5) ret0, _ := ret[0].(error) @@ -154,19 +168,19 @@ func (mr *MockProviderCmkClientMockRecorder) ValidateTemplatePresent(arg0, arg1, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateTemplatePresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateTemplatePresent), arg0, arg1, arg2, arg3, arg4) } -// ValidateZonesPresent mocks base method. -func (m *MockProviderCmkClient) ValidateZonesPresent(arg0 context.Context, arg1 []v1alpha1.CloudStackZone) ([]v1alpha1.CloudStackResourceIdentifier, error) { +// ValidateZonePresent mocks base method. +func (m *MockProviderCmkClient) ValidateZonePresent(arg0 context.Context, arg1 v1alpha1.CloudStackZone) (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidateZonesPresent", arg0, arg1) - ret0, _ := ret[0].([]v1alpha1.CloudStackResourceIdentifier) + ret := m.ctrl.Call(m, "ValidateZonePresent", arg0, arg1) + ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } -// ValidateZonesPresent indicates an expected call of ValidateZonesPresent. -func (mr *MockProviderCmkClientMockRecorder) ValidateZonesPresent(arg0, arg1 interface{}) *gomock.Call { +// ValidateZonePresent indicates an expected call of ValidateZonePresent. +func (mr *MockProviderCmkClientMockRecorder) ValidateZonePresent(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateZonesPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateZonesPresent), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateZonePresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateZonePresent), arg0, arg1) } // MockProviderKubectlClient is a mock of ProviderKubectlClient interface. diff --git a/pkg/providers/cloudstack/testdata/cloudstack_config_invalid_format.ini b/pkg/providers/cloudstack/testdata/cloudstack_config_invalid_format.ini new file mode 100644 index 000000000000..39efacf46d27 --- /dev/null +++ b/pkg/providers/cloudstack/testdata/cloudstack_config_invalid_format.ini @@ -0,0 +1,5 @@ +[Global] +verify-ssl ; false +api-key ; test-key1 +secret-key ; test-secret1 +api-url ; http://127.16.0.1:8080/client/api diff --git a/pkg/providers/cloudstack/testdata/cloudstack_config_invalid_verifyssl.ini b/pkg/providers/cloudstack/testdata/cloudstack_config_invalid_verifyssl.ini new file mode 100644 index 000000000000..f4b16eafe5a9 --- /dev/null +++ b/pkg/providers/cloudstack/testdata/cloudstack_config_invalid_verifyssl.ini @@ -0,0 +1,5 @@ +[Global] +verify-ssl = xxx +api-key = test-key1 +secret-key = test-secret1 +api-url = http://127.16.0.1:8080/client/api diff --git a/pkg/providers/cloudstack/testdata/cloudstack_config_missing_apikey.ini b/pkg/providers/cloudstack/testdata/cloudstack_config_missing_apikey.ini new file mode 100644 index 000000000000..b87524efbe0c --- /dev/null +++ b/pkg/providers/cloudstack/testdata/cloudstack_config_missing_apikey.ini @@ -0,0 +1,4 @@ +[Global] +verify-ssl = false +secret-key = test-secret1 +api-url = http://127.16.0.1:8080/client/api diff --git a/pkg/providers/cloudstack/testdata/cloudstack_config_missing_apiurl.ini b/pkg/providers/cloudstack/testdata/cloudstack_config_missing_apiurl.ini new file mode 100644 index 000000000000..97944ad77d9c --- /dev/null +++ b/pkg/providers/cloudstack/testdata/cloudstack_config_missing_apiurl.ini @@ -0,0 +1,4 @@ +[Global] +verify-ssl = false +api-key = test-key1 +secret-key = test-secret1 diff --git a/pkg/providers/cloudstack/testdata/cloudstack_config_missing_secretkey.ini b/pkg/providers/cloudstack/testdata/cloudstack_config_missing_secretkey.ini new file mode 100644 index 000000000000..30f825c18049 --- /dev/null +++ b/pkg/providers/cloudstack/testdata/cloudstack_config_missing_secretkey.ini @@ -0,0 +1,4 @@ +[Global] +verify-ssl = false +api-key = test-key1 +api-url = http://127.16.0.1:8080/client/api diff --git a/pkg/providers/cloudstack/testdata/cloudstack_config_missing_verifyssl.ini b/pkg/providers/cloudstack/testdata/cloudstack_config_missing_verifyssl.ini new file mode 100644 index 000000000000..8be593bb22df --- /dev/null +++ b/pkg/providers/cloudstack/testdata/cloudstack_config_missing_verifyssl.ini @@ -0,0 +1,4 @@ +[Global] +api-key = test-key1 +secret-key = test-secret1 +api-url = http://127.16.0.1:8080/client/api diff --git a/pkg/providers/cloudstack/testdata/cloudstack_config_multiple_profiles.ini b/pkg/providers/cloudstack/testdata/cloudstack_config_multiple_profiles.ini new file mode 100644 index 000000000000..ead54c30f37c --- /dev/null +++ b/pkg/providers/cloudstack/testdata/cloudstack_config_multiple_profiles.ini @@ -0,0 +1,11 @@ +[Global] +verify-ssl = false +api-key = test-key1 +secret-key = test-secret1 +api-url = http://127.16.0.1:8080/client/api + +[Instance2] +verify-ssl = true +api-key = test-key2 +secret-key = test-secret2 +api-url = http://127.16.0.2:8080/client/api diff --git a/pkg/providers/cloudstack/testdata/cloudstack_config_no_sections.ini b/pkg/providers/cloudstack/testdata/cloudstack_config_no_sections.ini new file mode 100644 index 000000000000..3266d4477b71 --- /dev/null +++ b/pkg/providers/cloudstack/testdata/cloudstack_config_no_sections.ini @@ -0,0 +1,4 @@ +verify-ssl = false +api-key = test-key1 +secret-key = test-secret1 +api-url = http://127.16.0.1:8080/client/api diff --git a/pkg/providers/cloudstack/testdata/cloudstack_config_valid.ini b/pkg/providers/cloudstack/testdata/cloudstack_config_valid.ini new file mode 100644 index 000000000000..87243f4b0d30 --- /dev/null +++ b/pkg/providers/cloudstack/testdata/cloudstack_config_valid.ini @@ -0,0 +1,5 @@ +[Global] +verify-ssl = false +api-key = test-key1 +secret-key = test-secret1 +api-url = http://127.16.0.1:8080/client/api diff --git a/pkg/providers/cloudstack/testdata/cluster_main_with_availability_zones.yaml b/pkg/providers/cloudstack/testdata/cluster_main_with_availability_zones.yaml new file mode 100644 index 000000000000..48a983213f9b --- /dev/null +++ b/pkg/providers/cloudstack/testdata/cluster_main_with_availability_zones.yaml @@ -0,0 +1,139 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: test-namespace +spec: + clusterNetwork: + cni: cilium + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + controlPlaneConfiguration: + count: 3 + endpoint: + host: 1.2.3.4 + machineGroupRef: + kind: CloudStackMachineConfig + name: test-cp + datacenterRef: + kind: CloudStackDatacenterConfig + name: test + externalEtcdConfiguration: + count: 3 + machineGroupRef: + kind: CloudStackMachineConfig + name: test-etcd + kubernetesVersion: "1.21" + workerNodeGroupConfigurations: + - count: 3 + machineGroupRef: + kind: CloudStackMachineConfig + name: test +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: CloudStackDatacenterConfig +metadata: + name: test + namespace: test-namespace +spec: + account: "admin" + domain: "domain1" + zones: + - name: "zone1" + network: + name: "net1" + managementApiEndpoint: "http://127.16.0.1:8080/client/api" + availabilityZones: + - credentialsRef: "zone2" + account: "admin" + domain: "domain2" + managementApiEndpoint: "http://127.16.0.2:8080/client/api" + zone: + name: "zone2" + network: + name: "net2" + +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: CloudStackMachineConfig +metadata: + name: test-cp + namespace: test-namespace +spec: + computeOffering: + name: "m4-large" + users: + - name: "mySshUsername" + sshAuthorizedKeys: # The key below was manually generated and not used in any production systems + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" + template: + name: "centos7-k8s-118" + diskOffering: + name: "Small" + mountPath: "/data-small" + device: "/dev/vdb" + filesystem: "ext4" + label: "data_disk" + symlinks: + /var/log/kubernetes: /data-small/var/log/kubernetes + affinityGroupIds: + - control-plane-anti-affinity +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: CloudStackMachineConfig +metadata: + name: test + namespace: test-namespace +spec: + computeOffering: + name: "m4-large" + users: + - name: "mySshUsername" + sshAuthorizedKeys: # The key below was manually generated and not used in any production systems + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" + template: + name: "centos7-k8s-118" + diskOffering: + name: "Small" + mountPath: "/data-small" + device: "/dev/vdb" + filesystem: "ext4" + label: "data_disk" + symlinks: + /var/log/pods: /data-small/var/log/pods + /var/log/containers: /data-small/var/log/containers + affinityGroupIds: + - worker-affinity + userCustomDetails: + foo: bar +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: CloudStackMachineConfig +metadata: + name: test-etcd + namespace: test-namespace +spec: + computeOffering: + name: "m4-large" + users: + - name: "mySshUsername" + sshAuthorizedKeys: # The key below was manually generated and not used in any production systems + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" + template: + name: "centos7-k8s-118" + diskOffering: + name: "Small" + mountPath: "/data-small" + device: "/dev/vdb" + filesystem: "ext4" + label: "data_disk" + symlinks: + /var/lib/: /data-small/var/lib + affinityGroupIds: + - etcd-affinity + +--- diff --git a/pkg/providers/cloudstack/validator.go b/pkg/providers/cloudstack/validator.go index c4b59e37e9ee..18b309710bc3 100644 --- a/pkg/providers/cloudstack/validator.go +++ b/pkg/providers/cloudstack/validator.go @@ -2,6 +2,7 @@ package cloudstack import ( "context" + "errors" "fmt" "net" "strconv" @@ -13,7 +14,8 @@ import ( ) type Validator struct { - cmk ProviderCmkClient + cmks CmkClientMap + availabilityZones []localAvailabilityZone } // Taken from https://github.com/shapeblue/cloudstack/blob/08bb4ad9fea7e422c3d3ac6d52f4670b1e89eed7/api/src/main/java/com/cloud/vm/VmDetailConstants.java @@ -28,71 +30,88 @@ var restrictedUserCustomDetails = [...]string{ "keypairnames", "controlNodeLoginUser", } -var domainId string - -func NewValidator(cmk ProviderCmkClient) *Validator { +func NewValidator(cmks CmkClientMap) *Validator { return &Validator{ - cmk: cmk, + cmks: cmks, + availabilityZones: []localAvailabilityZone{}, } } +type localAvailabilityZone struct { + *anywherev1.CloudStackAvailabilityZone + ZoneId string + DomainId string +} + type ProviderCmkClient interface { + GetManagementApiEndpoint() string ValidateCloudStackConnection(ctx context.Context) error ValidateServiceOfferingPresent(ctx context.Context, zoneId string, serviceOffering anywherev1.CloudStackResourceIdentifier) error ValidateDiskOfferingPresent(ctx context.Context, zoneId string, diskOffering anywherev1.CloudStackResourceDiskOffering) error ValidateTemplatePresent(ctx context.Context, domainId string, zoneId string, account string, template anywherev1.CloudStackResourceIdentifier) error ValidateAffinityGroupsPresent(ctx context.Context, domainId string, account string, affinityGroupIds []string) error - ValidateZonesPresent(ctx context.Context, zones []anywherev1.CloudStackZone) ([]anywherev1.CloudStackResourceIdentifier, error) - ValidateNetworkPresent(ctx context.Context, domainId string, zoneRef anywherev1.CloudStackZone, zones []anywherev1.CloudStackResourceIdentifier, account string, multipleZone bool) error + ValidateZonePresent(ctx context.Context, zone anywherev1.CloudStackZone) (string, error) + ValidateNetworkPresent(ctx context.Context, domainId string, network anywherev1.CloudStackResourceIdentifier, zoneId string, account string, multipleZone bool) error ValidateDomainPresent(ctx context.Context, domain string) (anywherev1.CloudStackResourceIdentifier, error) ValidateAccountPresent(ctx context.Context, account string, domainId string) error } -func (v *Validator) validateCloudStackAccess(ctx context.Context) error { - if err := v.cmk.ValidateCloudStackConnection(ctx); err != nil { - return fmt.Errorf("failed validating connection to cloudstack: %v", err) +type CmkClientMap map[string]ProviderCmkClient + +func (v *Validator) validateCloudStackAccess(ctx context.Context, datacenterConfig *anywherev1.CloudStackDatacenterConfig) error { + azNamesToCheck := []string{} + if len(datacenterConfig.Spec.Domain) > 0 { + azNamesToCheck = append(azNamesToCheck, decoder.CloudStackGlobalAZ) + } + for _, az := range datacenterConfig.Spec.AvailabilityZones { + azNamesToCheck = append(azNamesToCheck, az.CredentialsRef) + } + + for _, azName := range azNamesToCheck { + cmk, ok := v.cmks[azName] + if !ok { + return fmt.Errorf("cannot find CloudStack profile for availability zone %s", azName) + } + if err := cmk.ValidateCloudStackConnection(ctx); err != nil { + return fmt.Errorf("failed validating connection to cloudstack %s: %v", azName, err) + } } - logger.MarkPass("Connected to server") + logger.MarkPass("Connected to", "servers", azNamesToCheck) return nil } func (v *Validator) ValidateCloudStackDatacenterConfig(ctx context.Context, datacenterConfig *anywherev1.CloudStackDatacenterConfig) error { - if len(datacenterConfig.Spec.Domain) <= 0 { - return fmt.Errorf("CloudStackDatacenterConfig domain is not set or is empty") - } - if datacenterConfig.Spec.ManagementApiEndpoint == "" { - return fmt.Errorf("CloudStackDatacenterConfig managementApiEndpoint is not set or is empty") - } - _, err := getHostnameFromUrl(datacenterConfig.Spec.ManagementApiEndpoint) - if err != nil { - return fmt.Errorf("checking management api endpoint: %v", err) - } - execConfig, err := decoder.ParseCloudStackSecret() - if err != nil { - return fmt.Errorf("parsing cloudstack secret: %v", err) - } - if execConfig.ManagementUrl != datacenterConfig.Spec.ManagementApiEndpoint { - return fmt.Errorf("cloudstack secret management url (%s) differs from cluster spec management url (%s)", - execConfig.ManagementUrl, datacenterConfig.Spec.ManagementApiEndpoint) - } - - if err := v.validateDomainAndAccount(ctx, datacenterConfig); err != nil { + if err := v.generateLocalAvailabilityZones(ctx, datacenterConfig); err != nil { return err } - zones, errZone := v.cmk.ValidateZonesPresent(ctx, datacenterConfig.Spec.Zones) - if errZone != nil { - return fmt.Errorf("checking zones %v", errZone) - } + for _, az := range v.availabilityZones { + _, err := getHostnameFromUrl(az.ManagementApiEndpoint) + if err != nil { + return fmt.Errorf("checking management api endpoint: %v", err) + } - for _, zone := range datacenterConfig.Spec.Zones { - if len(zone.Network.Id) == 0 && len(zone.Network.Name) == 0 { - return fmt.Errorf("zone network is not set or is empty") + cmk, ok := v.cmks[az.CredentialsRef] + if !ok { + return fmt.Errorf("cannot find CloudStack profile for availability zone %s", az.CredentialsRef) } - err := v.cmk.ValidateNetworkPresent(ctx, domainId, zone, zones, datacenterConfig.Spec.Account, len(zones) > 1) + endpoint := cmk.GetManagementApiEndpoint() + if endpoint != az.ManagementApiEndpoint { + return fmt.Errorf("cloudstack secret management url (%s) differs from cluster spec management url (%s)", + endpoint, az.ManagementApiEndpoint) + } + + zoneId, err := cmk.ValidateZonePresent(ctx, az.CloudStackAvailabilityZone.Zone) if err != nil { - return fmt.Errorf("checking network %v", err) + return err + } + az.CloudStackAvailabilityZone.Zone.Id = zoneId + if len(az.CloudStackAvailabilityZone.Zone.Network.Id) == 0 && len(az.CloudStackAvailabilityZone.Zone.Network.Name) == 0 { + return fmt.Errorf("zone network is not set or is empty") + } + if err := cmk.ValidateNetworkPresent(ctx, az.DomainId, az.CloudStackAvailabilityZone.Zone.Network, zoneId, az.Account, true); err != nil { + return err } } @@ -100,32 +119,64 @@ func (v *Validator) ValidateCloudStackDatacenterConfig(ctx context.Context, data return nil } -func (v *Validator) validateDomainAndAccount(ctx context.Context, datacenterConfig *anywherev1.CloudStackDatacenterConfig) error { - if (datacenterConfig.Spec.Domain != "" && datacenterConfig.Spec.Account == "") || - (datacenterConfig.Spec.Domain == "" && datacenterConfig.Spec.Account != "") { - return fmt.Errorf("both domain and account must be specified or none of them must be specified") +func (v *Validator) generateLocalAvailabilityZones(ctx context.Context, datacenterConfig *anywherev1.CloudStackDatacenterConfig) error { + if datacenterConfig == nil { + return errors.New("CloudStack Datacenter Config is null") } - if datacenterConfig.Spec.Domain != "" && datacenterConfig.Spec.Account != "" { - domain, errDomain := v.cmk.ValidateDomainPresent(ctx, datacenterConfig.Spec.Domain) - if errDomain != nil { - return fmt.Errorf("checking domain: %v", errDomain) + if len(datacenterConfig.Spec.Domain) > 0 { + cmk, ok := v.cmks[decoder.CloudStackGlobalAZ] + if !ok { + return fmt.Errorf("cannot find CloudStack profile for availability zone %s", decoder.CloudStackGlobalAZ) } - - errAccount := v.cmk.ValidateAccountPresent(ctx, datacenterConfig.Spec.Account, domain.Id) - if errAccount != nil { - return fmt.Errorf("checking account: %v", errAccount) + domain, err := cmk.ValidateDomainPresent(ctx, datacenterConfig.Spec.Domain) + if err != nil { + return err + } + if err := cmk.ValidateAccountPresent(ctx, datacenterConfig.Spec.Account, domain.Id); err != nil { + return err + } + for _, zone := range datacenterConfig.Spec.Zones { + availabilityZone := localAvailabilityZone{ + CloudStackAvailabilityZone: &anywherev1.CloudStackAvailabilityZone{ + CredentialsRef: decoder.CloudStackGlobalAZ, + Domain: datacenterConfig.Spec.Domain, + Account: datacenterConfig.Spec.Account, + ManagementApiEndpoint: datacenterConfig.Spec.ManagementApiEndpoint, + Zone: zone, + }, + DomainId: domain.Id, + } + v.availabilityZones = append(v.availabilityZones, availabilityZone) + } + } + for _, az := range datacenterConfig.Spec.AvailabilityZones { + cmk, ok := v.cmks[az.CredentialsRef] + if !ok { + return fmt.Errorf("cannot find CloudStack profile for availability zone %s", az.CredentialsRef) + } + domain, err := cmk.ValidateDomainPresent(ctx, az.Domain) + if err != nil { + return err + } + if err := cmk.ValidateAccountPresent(ctx, az.Account, domain.Id); err != nil { + return err } + availabilityZone := localAvailabilityZone{ + CloudStackAvailabilityZone: &az, + DomainId: domain.Id, + } + v.availabilityZones = append(v.availabilityZones, availabilityZone) + } - domainId = domain.Id + if len(v.availabilityZones) <= 0 { + return fmt.Errorf("CloudStackDatacenterConfig domain or availabilityZones is not set or is empty") } return nil } // TODO: dry out machine configs validations func (v *Validator) ValidateClusterMachineConfigs(ctx context.Context, cloudStackClusterSpec *Spec) error { - var etcdMachineConfig *anywherev1.CloudStackMachineConfig - if len(cloudStackClusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host) <= 0 { return fmt.Errorf("cluster controlPlaneConfiguration.Endpoint.Host is not set or is empty") } @@ -142,7 +193,7 @@ func (v *Validator) ValidateClusterMachineConfigs(ctx context.Context, cloudStac if cloudStackClusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef == nil { return fmt.Errorf("must specify machineGroupRef for etcd machines") } - etcdMachineConfig = cloudStackClusterSpec.etcdMachineConfig() + etcdMachineConfig := cloudStackClusterSpec.etcdMachineConfig() if etcdMachineConfig == nil { return fmt.Errorf("cannot find CloudStackMachineConfig %v for etcd machines", cloudStackClusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name) } @@ -234,29 +285,27 @@ func (v *Validator) validateMachineConfig(ctx context.Context, datacenterConfig return fmt.Errorf("restricted key %s found in custom user details", restrictedKey) } } - zones, err := v.cmk.ValidateZonesPresent(ctx, datacenterConfig.Spec.Zones) - if err != nil { - return fmt.Errorf("checking zones %v", err) - } - account := datacenterConfig.Spec.Account - for _, zone := range zones { - if err = v.cmk.ValidateTemplatePresent(ctx, domainId, zone.Id, account, machineConfig.Spec.Template); err != nil { + for _, az := range v.availabilityZones { + cmk, ok := v.cmks[az.CredentialsRef] + if !ok { + return fmt.Errorf("cannot find CloudStack profile for availability zone %s", az.CredentialsRef) + } + if err := cmk.ValidateTemplatePresent(ctx, az.DomainId, az.CloudStackAvailabilityZone.Zone.Id, az.Account, machineConfig.Spec.Template); err != nil { return fmt.Errorf("validating template: %v", err) } - if err = v.cmk.ValidateServiceOfferingPresent(ctx, zone.Id, machineConfig.Spec.ComputeOffering); err != nil { + if err := cmk.ValidateServiceOfferingPresent(ctx, az.CloudStackAvailabilityZone.Zone.Id, machineConfig.Spec.ComputeOffering); err != nil { return fmt.Errorf("validating service offering: %v", err) } if len(machineConfig.Spec.DiskOffering.Id) > 0 || len(machineConfig.Spec.DiskOffering.Name) > 0 { - if err = v.cmk.ValidateDiskOfferingPresent(ctx, zone.Id, machineConfig.Spec.DiskOffering); err != nil { + if err := cmk.ValidateDiskOfferingPresent(ctx, az.CloudStackAvailabilityZone.Zone.Id, machineConfig.Spec.DiskOffering); err != nil { return fmt.Errorf("validating disk offering: %v", err) } } - } - - if len(machineConfig.Spec.AffinityGroupIds) > 0 { - if err = v.cmk.ValidateAffinityGroupsPresent(ctx, domainId, account, machineConfig.Spec.AffinityGroupIds); err != nil { - return fmt.Errorf("validating affinity group ids: %v", err) + if len(machineConfig.Spec.AffinityGroupIds) > 0 { + if err := cmk.ValidateAffinityGroupsPresent(ctx, az.DomainId, az.Account, machineConfig.Spec.AffinityGroupIds); err != nil { + return fmt.Errorf("validating affinity group ids: %v", err) + } } } diff --git a/pkg/providers/cloudstack/validator_test.go b/pkg/providers/cloudstack/validator_test.go index 1c63ea892532..cda388da7cfc 100644 --- a/pkg/providers/cloudstack/validator_test.go +++ b/pkg/providers/cloudstack/validator_test.go @@ -12,12 +12,14 @@ import ( "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" + "github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder" "github.com/aws/eks-anywhere/pkg/providers/cloudstack/mocks" ) const ( - testClusterConfigMainFilename = "cluster_main.yaml" - testDataDir = "testdata" + testClusterConfigMainFilename = "cluster_main.yaml" + testClusterConfigMainWithAZsFilename = "cluster_main_with_availability_zones.yaml" + testDataDir = "testdata" ) var testTemplate = v1alpha1.CloudStackResourceIdentifier{ @@ -42,18 +44,37 @@ func TestValidateCloudStackDatacenterConfig(t *testing.T) { ctx := context.Background() setupContext() cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) - cloudstackDatacenter, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) + datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) + if err != nil { + t.Fatalf("unable to get datacenter config from file") + } + + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + + err = validator.ValidateCloudStackDatacenterConfig(ctx, datacenterConfig) + if err != nil { + t.Fatalf("failed to validate CloudStackDataCenterConfig: %v", err) + } +} + +func TestValidateCloudStackDatacenterConfigWithAZ(t *testing.T) { + ctx := context.Background() + setupContext() + cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) + cmk2 := mocks.NewMockProviderCmkClient(gomock.NewController(t)) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk, "zone2": cmk2}) + + datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainWithAZsFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") } - cmk.EXPECT().ValidateZonesPresent(ctx, cloudstackDatacenter.Spec.Zones).Return([]v1alpha1.CloudStackResourceIdentifier{{Name: "zone1", Id: "4e3b338d-87a6-4189-b931-a1747edeea8f"}}, nil) - cmk.EXPECT().ValidateDomainPresent(ctx, cloudstackDatacenter.Spec.Domain).Return(v1alpha1.CloudStackResourceIdentifier{Id: "5300cdac-74d5-11ec-8696-c81f66d3e965", Name: cloudstackDatacenter.Spec.Domain}, nil) - cmk.EXPECT().ValidateAccountPresent(ctx, gomock.Any(), gomock.Any()).Return(nil) - cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), false).Return(nil) - err = validator.ValidateCloudStackDatacenterConfig(ctx, cloudstackDatacenter) + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + setupMockForAvailabilityZonesValidation(cmk2, ctx, datacenterConfig.Spec.AvailabilityZones) + + err = validator.ValidateCloudStackDatacenterConfig(ctx, datacenterConfig) if err != nil { t.Fatalf("failed to validate CloudStackDataCenterConfig: %v", err) } @@ -62,11 +83,14 @@ func TestValidateCloudStackDatacenterConfig(t *testing.T) { func TestValidateCloudStackConnection(t *testing.T) { ctx := context.Background() cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) + if err != nil { + t.Fatalf("unable to get datacenter config from file") + } cmk.EXPECT().ValidateCloudStackConnection(ctx).Return(nil) - err := validator.validateCloudStackAccess(ctx) - if err != nil { + if err := validator.validateCloudStackAccess(ctx, datacenterConfig); err != nil { t.Fatalf("failed to validate CloudStackDataCenterConfig: %v", err) } } @@ -74,7 +98,7 @@ func TestValidateCloudStackConnection(t *testing.T) { func TestValidateMachineConfigsNoControlPlaneEndpointIP(t *testing.T) { ctx := context.Background() cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { @@ -86,6 +110,8 @@ func TestValidateMachineConfigsNoControlPlaneEndpointIP(t *testing.T) { } clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host = "" + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) thenErrorExpected(t, "cluster controlPlaneConfiguration.Endpoint.Host is not set or is empty", err) @@ -105,13 +131,11 @@ func TestValidateDatacenterConfigsNoNetwork(t *testing.T) { datacenterConfig: datacenterConfig, machineConfigsLookup: nil, } - validator := NewValidator(cmk) - cmk.EXPECT().ValidateZonesPresent(ctx, gomock.Any()).Return([]v1alpha1.CloudStackResourceIdentifier{{Name: "zone1", Id: "4e3b338d-87a6-4189-b931-a1747edeea8f"}}, nil) - cmk.EXPECT().ValidateDomainPresent(ctx, gomock.Any()).Return(v1alpha1.CloudStackResourceIdentifier{Id: "5300cdac-74d5-11ec-8696-c81f66d3e965", Name: "ROOT"}, nil) - cmk.EXPECT().ValidateAccountPresent(ctx, gomock.Any(), gomock.Any()).Return(nil) - + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) datacenterConfig.Spec.Zones[0].Network.Id = "" datacenterConfig.Spec.Zones[0].Network.Name = "" + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + err = validator.ValidateCloudStackDatacenterConfig(ctx, cloudStackClusterSpec.datacenterConfig) thenErrorExpected(t, "zone network is not set or is empty", err) @@ -130,7 +154,8 @@ func TestValidateDatacenterBadManagementEndpoint(t *testing.T) { datacenterConfig: datacenterConfig, machineConfigsLookup: nil, } - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) datacenterConfig.Spec.ManagementApiEndpoint = ":1234.5234" err = validator.ValidateCloudStackDatacenterConfig(ctx, cloudStackClusterSpec.datacenterConfig) @@ -152,7 +177,8 @@ func TestValidateDatacenterInconsistentManagementEndpoints(t *testing.T) { datacenterConfig: datacenterConfig, machineConfigsLookup: nil, } - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) datacenterConfig.Spec.ManagementApiEndpoint = "abcefg.com" err = validator.ValidateCloudStackDatacenterConfig(ctx, cloudStackClusterSpec.datacenterConfig) @@ -168,7 +194,7 @@ func TestSetupAndValidateDiskOfferingEmpty(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -185,12 +211,14 @@ func TestSetupAndValidateDiskOfferingEmpty(t *testing.T) { etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name cloudStackClusterSpec.machineConfigsLookup[etcdMachineConfigName].Spec.DiskOffering = v1alpha1.CloudStackResourceDiskOffering{} - cmk.EXPECT().ValidateZonesPresent(gomock.Any(), gomock.Any()).Times(3).Return([]v1alpha1.CloudStackResourceIdentifier{{Name: "zone1", Id: "4e3b338d-87a6-4189-b931-a1747edeea8f"}}, nil) + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Times(3) cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + _ = validator.ValidateCloudStackDatacenterConfig(ctx, datacenterConfig) err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) if err != nil { t.Fatalf("validator.ValidateClusterMachineConfigs() err = %v, want err = nil", err) @@ -205,7 +233,7 @@ func TestSetupAndValidateValidDiskOffering(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -230,12 +258,14 @@ func TestSetupAndValidateValidDiskOffering(t *testing.T) { etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name cloudStackClusterSpec.machineConfigsLookup[etcdMachineConfigName].Spec.DiskOffering = v1alpha1.CloudStackResourceDiskOffering{} - cmk.EXPECT().ValidateZonesPresent(gomock.Any(), gomock.Any()).Times(3).Return([]v1alpha1.CloudStackResourceIdentifier{{Name: "zone1", Id: "4e3b338d-87a6-4189-b931-a1747edeea8f"}}, nil) + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Times(3) cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Times(1) cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + _ = validator.ValidateCloudStackDatacenterConfig(ctx, datacenterConfig) err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) if err != nil { t.Fatalf("validator.ValidateClusterMachineConfigs() err = %v, want err = nil", err) @@ -250,7 +280,7 @@ func TestSetupAndValidateInvalidDiskOfferingNotPresent(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -275,12 +305,14 @@ func TestSetupAndValidateInvalidDiskOfferingNotPresent(t *testing.T) { etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name cloudStackClusterSpec.machineConfigsLookup[etcdMachineConfigName].Spec.DiskOffering = v1alpha1.CloudStackResourceDiskOffering{} - cmk.EXPECT().ValidateZonesPresent(gomock.Any(), gomock.Any()).AnyTimes().Return([]v1alpha1.CloudStackResourceIdentifier{{Name: "zone1", Id: "4e3b338d-87a6-4189-b931-a1747edeea8f"}}, nil) + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return(errors.New("match me")) cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + _ = validator.ValidateCloudStackDatacenterConfig(ctx, datacenterConfig) err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) wantErrMsg := "validating disk offering: match me" assert.Contains(t, err.Error(), wantErrMsg, "expected error containing %q, got %v", wantErrMsg, err) @@ -294,7 +326,7 @@ func TestSetupAndValidateInValidDiskOfferingBadMountPath(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -319,7 +351,8 @@ func TestSetupAndValidateInValidDiskOfferingBadMountPath(t *testing.T) { etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name cloudStackClusterSpec.machineConfigsLookup[etcdMachineConfigName].Spec.DiskOffering = v1alpha1.CloudStackResourceDiskOffering{} - cmk.EXPECT().ValidateZonesPresent(gomock.Any(), gomock.Any()).AnyTimes().Return([]v1alpha1.CloudStackResourceIdentifier{{Name: "zone1", Id: "4e3b338d-87a6-4189-b931-a1747edeea8f"}}, nil) + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() @@ -337,7 +370,7 @@ func TestSetupAndValidateInValidDiskOfferingEmptyDevice(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -362,7 +395,8 @@ func TestSetupAndValidateInValidDiskOfferingEmptyDevice(t *testing.T) { etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name cloudStackClusterSpec.machineConfigsLookup[etcdMachineConfigName].Spec.DiskOffering = v1alpha1.CloudStackResourceDiskOffering{} - cmk.EXPECT().ValidateZonesPresent(gomock.Any(), gomock.Any()).AnyTimes().Return([]v1alpha1.CloudStackResourceIdentifier{{Name: "zone1", Id: "4e3b338d-87a6-4189-b931-a1747edeea8f"}}, nil) + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() @@ -380,7 +414,7 @@ func TestSetupAndValidateInValidDiskOfferingEmptyFilesystem(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -405,7 +439,8 @@ func TestSetupAndValidateInValidDiskOfferingEmptyFilesystem(t *testing.T) { etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name cloudStackClusterSpec.machineConfigsLookup[etcdMachineConfigName].Spec.DiskOffering = v1alpha1.CloudStackResourceDiskOffering{} - cmk.EXPECT().ValidateZonesPresent(gomock.Any(), gomock.Any()).AnyTimes().Return([]v1alpha1.CloudStackResourceIdentifier{{Name: "zone1", Id: "4e3b338d-87a6-4189-b931-a1747edeea8f"}}, nil) + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() @@ -423,7 +458,7 @@ func TestSetupAndValidateInValidDiskOfferingEmptyLabel(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -448,7 +483,8 @@ func TestSetupAndValidateInValidDiskOfferingEmptyLabel(t *testing.T) { etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name cloudStackClusterSpec.machineConfigsLookup[etcdMachineConfigName].Spec.DiskOffering = v1alpha1.CloudStackResourceDiskOffering{} - cmk.EXPECT().ValidateZonesPresent(gomock.Any(), gomock.Any()).AnyTimes().Return([]v1alpha1.CloudStackResourceIdentifier{{Name: "zone1", Id: "4e3b338d-87a6-4189-b931-a1747edeea8f"}}, nil) + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() @@ -466,7 +502,7 @@ func TestSetupAndValidateUsersNil(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -483,12 +519,14 @@ func TestSetupAndValidateUsersNil(t *testing.T) { etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name cloudStackClusterSpec.machineConfigsLookup[etcdMachineConfigName].Spec.Users = nil - cmk.EXPECT().ValidateZonesPresent(gomock.Any(), gomock.Any()).Times(3).Return([]v1alpha1.CloudStackResourceIdentifier{{Name: "zone1", Id: "4e3b338d-87a6-4189-b931-a1747edeea8f"}}, nil) + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Times(3) cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Times(3) cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + _ = validator.ValidateCloudStackDatacenterConfig(ctx, datacenterConfig) err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) if err != nil { t.Fatalf("validator.ValidateClusterMachineConfigs() err = %v, want err = nil", err) @@ -503,7 +541,7 @@ func TestSetupAndValidateRestrictedUserDetails(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -520,6 +558,8 @@ func TestSetupAndValidateRestrictedUserDetails(t *testing.T) { etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name cloudStackClusterSpec.machineConfigsLookup[etcdMachineConfigName].Spec.UserCustomDetails = map[string]string{"keyboard": "test"} + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) if err == nil { t.Fatalf("expected error like 'validation failed: restricted key keyboard found in custom user details' but no error was thrown") @@ -534,7 +574,7 @@ func TestSetupAndValidateSshAuthorizedKeysNil(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -551,17 +591,38 @@ func TestSetupAndValidateSshAuthorizedKeysNil(t *testing.T) { etcdMachineConfigName := clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name cloudStackClusterSpec.machineConfigsLookup[etcdMachineConfigName].Spec.Users[0].SshAuthorizedKeys = nil - cmk.EXPECT().ValidateZonesPresent(gomock.Any(), gomock.Any()).Times(3).Return([]v1alpha1.CloudStackResourceIdentifier{{Name: "zone1", Id: "4e3b338d-87a6-4189-b931-a1747edeea8f"}}, nil) + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Times(3) cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Times(3) cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + + _ = validator.ValidateCloudStackDatacenterConfig(ctx, datacenterConfig) err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) if err != nil { t.Fatalf("validator.ValidateClusterMachineConfigs() err = %v, want err = nil", err) } } +func setupMockForDatacenterConfigValidation(cmk *mocks.MockProviderCmkClient, ctx context.Context, datacenterConfig *v1alpha1.CloudStackDatacenterConfig) { + cmk.EXPECT().ValidateZonePresent(ctx, datacenterConfig.Spec.Zones[0]).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea8f", nil) + cmk.EXPECT().ValidateDomainPresent(ctx, datacenterConfig.Spec.Domain).AnyTimes().Return(v1alpha1.CloudStackResourceIdentifier{Id: "5300cdac-74d5-11ec-8696-c81f66d3e965", Name: datacenterConfig.Spec.Domain}, nil) + cmk.EXPECT().ValidateAccountPresent(ctx, gomock.Any(), gomock.Any()).AnyTimes().Return(nil) + cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) + cmk.EXPECT().GetManagementApiEndpoint().AnyTimes().Return(datacenterConfig.Spec.ManagementApiEndpoint) +} + +func setupMockForAvailabilityZonesValidation(cmk *mocks.MockProviderCmkClient, ctx context.Context, azs []v1alpha1.CloudStackAvailabilityZone) { + for _, az := range azs { + cmk.EXPECT().ValidateZonePresent(ctx, az.Zone).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea82", nil) + cmk.EXPECT().ValidateDomainPresent(ctx, az.Domain).AnyTimes().Return(v1alpha1.CloudStackResourceIdentifier{Id: "5300cdac-74d5-11ec-8696-c81f66d3e962", Name: az.Domain}, nil) + cmk.EXPECT().ValidateAccountPresent(ctx, az.Account, gomock.Any()).AnyTimes().Return(nil) + cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) + cmk.EXPECT().GetManagementApiEndpoint().AnyTimes().Return(az.ManagementApiEndpoint) + } +} + func TestSetupAndValidateCreateClusterCPMachineGroupRefNil(t *testing.T) { ctx := context.Background() cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) @@ -570,7 +631,7 @@ func TestSetupAndValidateCreateClusterCPMachineGroupRefNil(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -582,6 +643,8 @@ func TestSetupAndValidateCreateClusterCPMachineGroupRefNil(t *testing.T) { } clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef = nil + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) thenErrorExpected(t, "must specify machineGroupRef for control plane", err) } @@ -594,7 +657,7 @@ func TestSetupAndValidateCreateClusterWorkerMachineGroupRefNil(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -606,6 +669,8 @@ func TestSetupAndValidateCreateClusterWorkerMachineGroupRefNil(t *testing.T) { } clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef = nil + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) thenErrorExpected(t, "must specify machineGroupRef for worker nodes", err) } @@ -618,7 +683,7 @@ func TestSetupAndValidateCreateClusterEtcdMachineGroupRefNil(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -630,6 +695,8 @@ func TestSetupAndValidateCreateClusterEtcdMachineGroupRefNil(t *testing.T) { } clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef = nil + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) thenErrorExpected(t, "must specify machineGroupRef for etcd machines", err) } @@ -642,7 +709,7 @@ func TestSetupAndValidateCreateClusterCPMachineGroupRefNonexistent(t *testing.T) t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -654,6 +721,8 @@ func TestSetupAndValidateCreateClusterCPMachineGroupRefNonexistent(t *testing.T) } clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name = "nonexistent" + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) thenErrorExpected(t, "cannot find CloudStackMachineConfig nonexistent for control plane", err) } @@ -666,7 +735,7 @@ func TestSetupAndValidateCreateClusterWorkerMachineGroupRefNonexistent(t *testin t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -678,6 +747,8 @@ func TestSetupAndValidateCreateClusterWorkerMachineGroupRefNonexistent(t *testin } clusterSpec.Cluster.Spec.WorkerNodeGroupConfigurations[0].MachineGroupRef.Name = "nonexistent" + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) thenErrorExpected(t, "cannot find CloudStackMachineConfig nonexistent for worker nodes", err) } @@ -690,7 +761,7 @@ func TestSetupAndValidateCreateClusterEtcdMachineGroupRefNonexistent(t *testing. t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -702,6 +773,8 @@ func TestSetupAndValidateCreateClusterEtcdMachineGroupRefNonexistent(t *testing. } clusterSpec.Cluster.Spec.ExternalEtcdConfiguration.MachineGroupRef.Name = "nonexistent" + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) thenErrorExpected(t, "cannot find CloudStackMachineConfig nonexistent for etcd machines", err) } @@ -714,7 +787,7 @@ func TestSetupAndValidateCreateClusterTemplateDifferent(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -727,6 +800,8 @@ func TestSetupAndValidateCreateClusterTemplateDifferent(t *testing.T) { controlPlaneMachineConfigName := clusterSpec.Cluster.Spec.ControlPlaneConfiguration.MachineGroupRef.Name cloudStackClusterSpec.machineConfigsLookup[controlPlaneMachineConfigName].Spec.Template = v1alpha1.CloudStackResourceIdentifier{Name: "different"} + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) thenErrorExpected(t, "control plane and etcd machines must have the same template specified", err) } @@ -748,13 +823,16 @@ func TestValidateMachineConfigsHappyCase(t *testing.T) { datacenterConfig: datacenterConfig, machineConfigsLookup: machineConfigs, } - validator := NewValidator(cmk) - cmk.EXPECT().ValidateZonesPresent(gomock.Any(), gomock.Any()).Times(3).Return([]v1alpha1.CloudStackResourceIdentifier{{Name: "zone1", Id: "4e3b338d-87a6-4189-b931-a1747edeea8f"}}, nil) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) + cmk.EXPECT().ValidateTemplatePresent(ctx, gomock.Any(), gomock.Any(), datacenterConfig.Spec.Account, testTemplate).Times(3) cmk.EXPECT().ValidateServiceOfferingPresent(ctx, gomock.Any(), testOffering).Times(3) cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Times(3) cmk.EXPECT().ValidateAffinityGroupsPresent(ctx, gomock.Any(), datacenterConfig.Spec.Account, gomock.Any()).Times(3) + + _ = validator.ValidateCloudStackDatacenterConfig(ctx, datacenterConfig) err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) assert.Nil(t, err) assert.Equal(t, "1.2.3.4:6443", clusterSpec.Cluster.Spec.ControlPlaneConfiguration.Endpoint.Host) @@ -771,16 +849,9 @@ func TestValidateCloudStackMachineConfig(t *testing.T) { if err != nil { t.Fatalf("unable to get datacenter config from file") } - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) for _, machineConfig := range machineConfigs { - cmk.EXPECT().ValidateZonesPresent(gomock.Any(), gomock.Any()).Return([]v1alpha1.CloudStackResourceIdentifier{{Name: "zone1", Id: "4e3b338d-87a6-4189-b931-a1747edeea8f"}}, nil) - cmk.EXPECT().ValidateTemplatePresent(ctx, gomock.Any(), gomock.Any(), "admin", machineConfig.Spec.Template).Return(nil) - cmk.EXPECT().ValidateServiceOfferingPresent(ctx, gomock.Any(), machineConfig.Spec.ComputeOffering).Return(nil) - cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) - if len(machineConfig.Spec.AffinityGroupIds) > 0 { - cmk.EXPECT().ValidateAffinityGroupsPresent(ctx, gomock.Any(), "admin", machineConfig.Spec.AffinityGroupIds).Return(nil) - } err := validator.validateMachineConfig(ctx, datacenterConfig, machineConfig) if err != nil { t.Fatalf("failed to validate CloudStackMachineConfig: %v", err) @@ -814,11 +885,14 @@ func TestValidateMachineConfigsWithAffinity(t *testing.T) { machineConfig.Spec.AffinityGroupIds = []string{} } - validator := NewValidator(cmk) + validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + cmk.EXPECT().ValidateZonePresent(gomock.Any(), gomock.Any()).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea8f", nil) cmk.EXPECT().ValidateDomainPresent(gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateZonesPresent(gomock.Any(), gomock.Any()).AnyTimes().Return([]v1alpha1.CloudStackResourceIdentifier{{Name: "zone1", Id: "4e3b338d-87a6-4189-b931-a1747edeea8f"}}, nil) - cmk.EXPECT().ValidateTemplatePresent(ctx, gomock.Any(), - gomock.Any(), datacenterConfig.Spec.Account, testTemplate).AnyTimes() + cmk.EXPECT().ValidateAccountPresent(ctx, gomock.Any(), gomock.Any()).AnyTimes().Return(nil) + cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) + cmk.EXPECT().GetManagementApiEndpoint().AnyTimes().Return("http://127.16.0.1:8080/client/api") + + cmk.EXPECT().ValidateTemplatePresent(ctx, gomock.Any(), gomock.Any(), datacenterConfig.Spec.Account, testTemplate).AnyTimes() cmk.EXPECT().ValidateServiceOfferingPresent(ctx, gomock.Any(), testOffering).AnyTimes() cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateAffinityGroupsPresent(ctx, gomock.Any(), datacenterConfig.Spec.Account, gomock.Any()).AnyTimes() diff --git a/release/pkg/generate_spec.go b/release/pkg/generate_spec.go index 5712a728ea0f..9d9815b1295a 100644 --- a/release/pkg/generate_spec.go +++ b/release/pkg/generate_spec.go @@ -301,7 +301,6 @@ func (r *ReleaseConfig) GenerateBundleArtifactsTable() (map[string][]Artifact, e return nil, errors.Wrapf(err, "Error converting branch minor version to integer") } } - // TODO: change logic when we update major version to 1 if r.BuildRepoBranchName == "main" || branchMinorVersion > 9 { eksAArtifactsFuncs["cluster-api-provider-tinkerbell"] = r.GetCaptAssets diff --git a/release/pkg/test/testdata/release-0.9-bundle-release.yaml b/release/pkg/test/testdata/release-0.9-bundle-release.yaml new file mode 100644 index 000000000000..6bf8fa296ba3 --- /dev/null +++ b/release/pkg/test/testdata/release-0.9-bundle-release.yaml @@ -0,0 +1,1646 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Bundles +metadata: + creationTimestamp: "1970-01-01T00:00:00Z" + name: bundles-1 +spec: + cliMaxVersion: v0.9.0 + cliMinVersion: v0.9.0 + number: 1 + versionsBundles: + - aws: + clusterTemplate: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-aws/manifests/infrastructure-aws/v0.6.4/cluster-template.yaml + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-aws/manifests/infrastructure-aws/v0.6.4/infrastructure-components.yaml + controller: + arch: + - amd64 + description: Container image for cluster-api-aws-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-aws-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-aws/cluster-api-aws-controller:v0.6.4-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-aws/manifests/infrastructure-aws/v0.6.4/metadata.yaml + version: v0.6.4+abcdef1 + bootstrap: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.1.3/bootstrap-components.yaml + controller: + arch: + - amd64 + description: Container image for kubeadm-bootstrap-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kubeadm-bootstrap-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.1.3/metadata.yaml + version: v1.1.3+abcdef1 + bottlerocketAdmin: + admin: + arch: + - amd64 + description: Container image for bottlerocket-admin image + imageDigest: sha256:279ff0b939c8ebfae8fb5086751de831edee4c1ef307b6f0a27b553b1c2c9b52 + name: bottlerocket-admin + os: linux + uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.8.0 + bottlerocketBootstrap: + bootstrap: + arch: + - amd64 + description: Container image for bottlerocket-bootstrap image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: bottlerocket-bootstrap + os: linux + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-20-16-eks-a-v0.0.0-dev-release-0.9-build.1 + certManager: + acmesolver: + arch: + - amd64 + description: Container image for cert-manager-acmesolver image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-acmesolver + os: linux + uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-acmesolver:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 + cainjector: + arch: + - amd64 + description: Container image for cert-manager-cainjector image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-cainjector + os: linux + uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-cainjector:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 + controller: + arch: + - amd64 + description: Container image for cert-manager-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-controller + os: linux + uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-controller:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 + manifest: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cert-manager/manifests/v1.5.3/cert-manager.yaml + version: v1.5.3+abcdef1 + webhook: + arch: + - amd64 + description: Container image for cert-manager-webhook image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-webhook + os: linux + uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-webhook:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 + cilium: + cilium: + arch: + - amd64 + description: Container image for cilium image + imageDigest: sha256:e0c5180610dd7a2bac4ed271309b07eb6102d0bd74ed7dd33fb619879cc006f3 + name: cilium + os: linux + uri: public.ecr.aws/isovalent/cilium:v1.9.13-eksa.2 + helmChart: + description: Helm chart for cilium-chart + imageDigest: sha256:5982a9b5feded74c14a0b410006bba6d748655f8bc01f393b4519c9b10a463d0 + name: cilium-chart + uri: public.ecr.aws/isovalent/cilium:1.9.13-eksa.2 + manifest: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cilium/manifests/cilium/v1.9.13-eksa.2/cilium.yaml + operator: + arch: + - amd64 + description: Container image for operator-generic image + imageDigest: sha256:fd78027e876b00ea850f875e87a9ce81f6e4e6b4d963f115e978e8e7d180f478 + name: operator-generic + os: linux + uri: public.ecr.aws/isovalent/operator-generic:v1.9.13-eksa.2 + version: v1.9.13-eksa.2 + cloudStack: + clusterAPIController: + arch: + - amd64 + description: Container image for cluster-api-cloudstack-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-cloudstack-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-cloudstack/release/manager:v0.4.5-rc3-eks-a-v0.0.0-dev-release-0.9-build.1 + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.5-rc3/infrastructure-components.yaml + kubeVip: + arch: + - amd64 + description: Container image for kube-vip image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-vip + os: linux + uri: public.ecr.aws/release-container-registry/plunder-app/kube-vip:v0.4.2-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.5-rc3/metadata.yaml + version: v0.4.5-rc3+abcdef1 + clusterAPI: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/cluster-api/v1.1.3/core-components.yaml + controller: + arch: + - amd64 + description: Container image for cluster-api-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/cluster-api/v1.1.3/metadata.yaml + version: v1.1.3+abcdef1 + controlPlane: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/control-plane-kubeadm/v1.1.3/control-plane-components.yaml + controller: + arch: + - amd64 + description: Container image for kubeadm-control-plane-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kubeadm-control-plane-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/control-plane-kubeadm/v1.1.3/metadata.yaml + version: v1.1.3+abcdef1 + docker: + clusterTemplate: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/infrastructure-docker/v1.1.3/cluster-template-development.yaml + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/infrastructure-docker/v1.1.3/infrastructure-components-development.yaml + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + manager: + arch: + - amd64 + description: Container image for capd-manager image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: capd-manager + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/infrastructure-docker/v1.1.3/metadata.yaml + version: v1.1.3+abcdef1 + eksD: + channel: 1-20 + components: https://distro.eks.amazonaws.com/crds/releases.distro.eks.amazonaws.com-v1alpha1.yaml + gitCommit: 0123456789abcdef0123456789abcdef01234567 + kindNode: + arch: + - amd64 + description: Container image for kind-node image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kind-node + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.20.15-eks-d-1-20-16-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeVersion: v1.20.15 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-20/kubernetes-1-20-eks-16.yaml + name: kubernetes-1-20-eks-16 + ova: + bottlerocket: + arch: + - amd64 + crictl: {} + description: Bottlerocket Ova image for EKS-D 1-20-16 release + etcdadm: {} + name: bottlerocket-v1.20.15-eks-d-1-20-16-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova + os: linux + osName: bottlerocket + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-distro/ova/1-20/1-20-16/bottlerocket-v1.20.15-eks-d-1-20-16-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova + ubuntu: + arch: + - amd64 + crictl: + arch: + - amd64 + description: cri-tools tarball for linux/amd64 + name: cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + os: linux + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cri-tools/v1.20.0/cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + description: Ubuntu Ova image for EKS-D 1-20-16 release + etcdadm: + arch: + - amd64 + description: etcdadm tarball for linux/amd64 + name: etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + os: linux + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm/5b496a72af3d80d64a16a650c85ce9a5882bc014/etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + name: ubuntu-v1.20.15-eks-d-1-20-16-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova + os: linux + osName: ubuntu + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-distro/ova/1-20/1-20-16/ubuntu-v1.20.15-eks-d-1-20-16-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova + raw: + bottlerocket: + crictl: {} + etcdadm: {} + ubuntu: + arch: + - amd64 + crictl: + arch: + - amd64 + description: cri-tools tarball for linux/amd64 + name: cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + os: linux + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cri-tools/v1.20.0/cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + description: Ubuntu Raw image for EKS-D 1-20-16 release + etcdadm: + arch: + - amd64 + description: etcdadm tarball for linux/amd64 + name: etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + os: linux + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm/5b496a72af3d80d64a16a650c85ce9a5882bc014/etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + name: ubuntu-v1.20.15-eks-d-1-20-16-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.gz + os: linux + osName: ubuntu + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-distro/raw/1-20/1-20-16/ubuntu-v1.20.15-eks-d-1-20-16-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.gz + eksa: + cliTools: + arch: + - amd64 + description: Container image for eks-anywhere-cli-tools image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-cli-tools + os: linux + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.7.2-eks-a-v0.0.0-dev-release-0.9-build.1 + clusterController: + arch: + - amd64 + description: Container image for eks-anywhere-cluster-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-cluster-controller + os: linux + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.9.1-eks-a-v0.0.0-dev-release-0.9-build.1 + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-anywhere/manifests/cluster-controller/eksa-components.yaml + diagnosticCollector: + arch: + - amd64 + description: Container image for eks-anywhere-diagnostic-collector image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-diagnostic-collector + os: linux + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.7.2-eks-a-v0.0.0-dev-release-0.9-build.1 + version: v0.0.0-dev-release-0.9+build.0+abcdef1 + etcdadmBootstrap: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.2/bootstrap-components.yaml + controller: + arch: + - amd64 + description: Container image for etcdadm-bootstrap-provider image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: etcdadm-bootstrap-provider + os: linux + uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.2-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.2/metadata.yaml + version: v1.0.2+abcdef1 + etcdadmController: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.0/bootstrap-components.yaml + controller: + arch: + - amd64 + description: Container image for etcdadm-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: etcdadm-controller + os: linux + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.0-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.0/metadata.yaml + version: v1.0.0+abcdef1 + flux: + helmController: + arch: + - amd64 + description: Container image for helm-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: helm-controller + os: linux + uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v0.20.1-eks-a-v0.0.0-dev-release-0.9-build.1 + kustomizeController: + arch: + - amd64 + description: Container image for kustomize-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kustomize-controller + os: linux + uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v0.24.3-eks-a-v0.0.0-dev-release-0.9-build.1 + notificationController: + arch: + - amd64 + description: Container image for notification-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: notification-controller + os: linux + uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v0.23.4-eks-a-v0.0.0-dev-release-0.9-build.1 + sourceController: + arch: + - amd64 + description: Container image for source-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: source-controller + os: linux + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v0.24.2-eks-a-v0.0.0-dev-release-0.9-build.1 + version: v0.29.4+abcdef1 + haproxy: + image: + arch: + - amd64 + description: Container image for haproxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: haproxy + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.12.0-eks-a-v0.0.0-dev-release-0.9-build.1 + kindnetd: + manifest: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/kind/manifests/kindnetd/v0.12.0/kindnetd.yaml + version: v0.12.0+abcdef1 + kubeVersion: "1.20" + packageController: + helmChart: + description: 'Helm chart: eks-anywhere-packages-helm' + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-packages + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.1.10-eks-a-v0.0.0-dev-release-0.9-build.1 + packageController: + arch: + - amd64 + description: Container image for eks-anywhere-packages image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-packages + os: linux + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.1.10-eks-a-v0.0.0-dev-release-0.9-build.1 + version: v0.1.10+abcdef1 + snow: + components: {} + kubeVip: {} + manager: {} + metadata: {} + version: "" + tinkerbell: + clusterAPIController: {} + clusterTemplate: {} + components: {} + kubeVip: {} + metadata: {} + tinkerbellStack: + actions: + cexec: {} + imageToDisk: {} + kexec: {} + ociToDisk: {} + reboot: {} + writeFile: {} + boots: + image: {} + manifest: {} + cfssl: {} + hegel: + image: {} + manifest: {} + hook: + bootkit: {} + docker: {} + initramfs: + amd: {} + arm: {} + kernel: {} + vmlinuz: + amd: {} + arm: {} + pbnj: + image: {} + manifest: {} + rufio: + image: {} + manifest: {} + tink: + tinkCli: {} + tinkController: {} + tinkManifest: {} + tinkServer: {} + tinkWorker: {} + tinkerbellChart: {} + version: "" + vSphere: + clusterAPIController: + arch: + - amd64 + description: Container image for cluster-api-vsphere-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-vsphere-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.1.1-eks-a-v0.0.0-dev-release-0.9-build.1 + clusterTemplate: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.1.1/cluster-template.yaml + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.1.1/infrastructure-components.yaml + driver: + arch: + - amd64 + description: Container image for vsphere-csi-driver image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: vsphere-csi-driver + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/vsphere-csi-driver/csi/driver:v2.2.0-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeVip: + arch: + - amd64 + description: Container image for kube-vip image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-vip + os: linux + uri: public.ecr.aws/release-container-registry/plunder-app/kube-vip:v0.4.2-eks-a-v0.0.0-dev-release-0.9-build.1 + manager: + arch: + - amd64 + description: Container image for cloud-provider-vsphere image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cloud-provider-vsphere + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.20.0-eks-d-1-20-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.1.1/metadata.yaml + syncer: + arch: + - amd64 + description: Container image for vsphere-csi-syncer image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: vsphere-csi-syncer + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/vsphere-csi-driver/csi/syncer:v2.2.0-eks-a-v0.0.0-dev-release-0.9-build.1 + version: v1.1.1+abcdef1 + - aws: + clusterTemplate: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-aws/manifests/infrastructure-aws/v0.6.4/cluster-template.yaml + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-aws/manifests/infrastructure-aws/v0.6.4/infrastructure-components.yaml + controller: + arch: + - amd64 + description: Container image for cluster-api-aws-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-aws-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-aws/cluster-api-aws-controller:v0.6.4-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-aws/manifests/infrastructure-aws/v0.6.4/metadata.yaml + version: v0.6.4+abcdef1 + bootstrap: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.1.3/bootstrap-components.yaml + controller: + arch: + - amd64 + description: Container image for kubeadm-bootstrap-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kubeadm-bootstrap-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.1.3/metadata.yaml + version: v1.1.3+abcdef1 + bottlerocketAdmin: + admin: + arch: + - amd64 + description: Container image for bottlerocket-admin image + imageDigest: sha256:279ff0b939c8ebfae8fb5086751de831edee4c1ef307b6f0a27b553b1c2c9b52 + name: bottlerocket-admin + os: linux + uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.8.0 + bottlerocketBootstrap: + bootstrap: + arch: + - amd64 + description: Container image for bottlerocket-bootstrap image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: bottlerocket-bootstrap + os: linux + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-21-14-eks-a-v0.0.0-dev-release-0.9-build.1 + certManager: + acmesolver: + arch: + - amd64 + description: Container image for cert-manager-acmesolver image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-acmesolver + os: linux + uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-acmesolver:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 + cainjector: + arch: + - amd64 + description: Container image for cert-manager-cainjector image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-cainjector + os: linux + uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-cainjector:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 + controller: + arch: + - amd64 + description: Container image for cert-manager-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-controller + os: linux + uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-controller:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 + manifest: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cert-manager/manifests/v1.5.3/cert-manager.yaml + version: v1.5.3+abcdef1 + webhook: + arch: + - amd64 + description: Container image for cert-manager-webhook image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-webhook + os: linux + uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-webhook:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 + cilium: + cilium: + arch: + - amd64 + description: Container image for cilium image + imageDigest: sha256:e0c5180610dd7a2bac4ed271309b07eb6102d0bd74ed7dd33fb619879cc006f3 + name: cilium + os: linux + uri: public.ecr.aws/isovalent/cilium:v1.9.13-eksa.2 + helmChart: + description: Helm chart for cilium-chart + imageDigest: sha256:5982a9b5feded74c14a0b410006bba6d748655f8bc01f393b4519c9b10a463d0 + name: cilium-chart + uri: public.ecr.aws/isovalent/cilium:1.9.13-eksa.2 + manifest: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cilium/manifests/cilium/v1.9.13-eksa.2/cilium.yaml + operator: + arch: + - amd64 + description: Container image for operator-generic image + imageDigest: sha256:fd78027e876b00ea850f875e87a9ce81f6e4e6b4d963f115e978e8e7d180f478 + name: operator-generic + os: linux + uri: public.ecr.aws/isovalent/operator-generic:v1.9.13-eksa.2 + version: v1.9.13-eksa.2 + cloudStack: + clusterAPIController: + arch: + - amd64 + description: Container image for cluster-api-cloudstack-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-cloudstack-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-cloudstack/release/manager:v0.4.5-rc3-eks-a-v0.0.0-dev-release-0.9-build.1 + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.5-rc3/infrastructure-components.yaml + kubeVip: + arch: + - amd64 + description: Container image for kube-vip image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-vip + os: linux + uri: public.ecr.aws/release-container-registry/plunder-app/kube-vip:v0.4.2-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.5-rc3/metadata.yaml + version: v0.4.5-rc3+abcdef1 + clusterAPI: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/cluster-api/v1.1.3/core-components.yaml + controller: + arch: + - amd64 + description: Container image for cluster-api-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/cluster-api/v1.1.3/metadata.yaml + version: v1.1.3+abcdef1 + controlPlane: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/control-plane-kubeadm/v1.1.3/control-plane-components.yaml + controller: + arch: + - amd64 + description: Container image for kubeadm-control-plane-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kubeadm-control-plane-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/control-plane-kubeadm/v1.1.3/metadata.yaml + version: v1.1.3+abcdef1 + docker: + clusterTemplate: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/infrastructure-docker/v1.1.3/cluster-template-development.yaml + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/infrastructure-docker/v1.1.3/infrastructure-components-development.yaml + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + manager: + arch: + - amd64 + description: Container image for capd-manager image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: capd-manager + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/infrastructure-docker/v1.1.3/metadata.yaml + version: v1.1.3+abcdef1 + eksD: + channel: 1-21 + components: https://distro.eks.amazonaws.com/crds/releases.distro.eks.amazonaws.com-v1alpha1.yaml + gitCommit: 0123456789abcdef0123456789abcdef01234567 + kindNode: + arch: + - amd64 + description: Container image for kind-node image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kind-node + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.21.12-eks-d-1-21-14-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeVersion: v1.21.12 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-21/kubernetes-1-21-eks-14.yaml + name: kubernetes-1-21-eks-14 + ova: + bottlerocket: + arch: + - amd64 + crictl: {} + description: Bottlerocket Ova image for EKS-D 1-21-14 release + etcdadm: {} + name: bottlerocket-v1.21.12-eks-d-1-21-14-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova + os: linux + osName: bottlerocket + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-distro/ova/1-21/1-21-14/bottlerocket-v1.21.12-eks-d-1-21-14-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova + ubuntu: + arch: + - amd64 + crictl: + arch: + - amd64 + description: cri-tools tarball for linux/amd64 + name: cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + os: linux + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cri-tools/v1.20.0/cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + description: Ubuntu Ova image for EKS-D 1-21-14 release + etcdadm: + arch: + - amd64 + description: etcdadm tarball for linux/amd64 + name: etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + os: linux + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm/5b496a72af3d80d64a16a650c85ce9a5882bc014/etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + name: ubuntu-v1.21.12-eks-d-1-21-14-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova + os: linux + osName: ubuntu + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-distro/ova/1-21/1-21-14/ubuntu-v1.21.12-eks-d-1-21-14-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova + raw: + bottlerocket: + crictl: {} + etcdadm: {} + ubuntu: + arch: + - amd64 + crictl: + arch: + - amd64 + description: cri-tools tarball for linux/amd64 + name: cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + os: linux + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cri-tools/v1.20.0/cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + description: Ubuntu Raw image for EKS-D 1-21-14 release + etcdadm: + arch: + - amd64 + description: etcdadm tarball for linux/amd64 + name: etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + os: linux + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm/5b496a72af3d80d64a16a650c85ce9a5882bc014/etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + name: ubuntu-v1.21.12-eks-d-1-21-14-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.gz + os: linux + osName: ubuntu + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-distro/raw/1-21/1-21-14/ubuntu-v1.21.12-eks-d-1-21-14-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.gz + eksa: + cliTools: + arch: + - amd64 + description: Container image for eks-anywhere-cli-tools image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-cli-tools + os: linux + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.7.2-eks-a-v0.0.0-dev-release-0.9-build.1 + clusterController: + arch: + - amd64 + description: Container image for eks-anywhere-cluster-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-cluster-controller + os: linux + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.9.1-eks-a-v0.0.0-dev-release-0.9-build.1 + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-anywhere/manifests/cluster-controller/eksa-components.yaml + diagnosticCollector: + arch: + - amd64 + description: Container image for eks-anywhere-diagnostic-collector image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-diagnostic-collector + os: linux + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.7.2-eks-a-v0.0.0-dev-release-0.9-build.1 + version: v0.0.0-dev-release-0.9+build.0+abcdef1 + etcdadmBootstrap: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.2/bootstrap-components.yaml + controller: + arch: + - amd64 + description: Container image for etcdadm-bootstrap-provider image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: etcdadm-bootstrap-provider + os: linux + uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.2-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.2/metadata.yaml + version: v1.0.2+abcdef1 + etcdadmController: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.0/bootstrap-components.yaml + controller: + arch: + - amd64 + description: Container image for etcdadm-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: etcdadm-controller + os: linux + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.0-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.0/metadata.yaml + version: v1.0.0+abcdef1 + flux: + helmController: + arch: + - amd64 + description: Container image for helm-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: helm-controller + os: linux + uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v0.20.1-eks-a-v0.0.0-dev-release-0.9-build.1 + kustomizeController: + arch: + - amd64 + description: Container image for kustomize-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kustomize-controller + os: linux + uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v0.24.3-eks-a-v0.0.0-dev-release-0.9-build.1 + notificationController: + arch: + - amd64 + description: Container image for notification-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: notification-controller + os: linux + uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v0.23.4-eks-a-v0.0.0-dev-release-0.9-build.1 + sourceController: + arch: + - amd64 + description: Container image for source-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: source-controller + os: linux + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v0.24.2-eks-a-v0.0.0-dev-release-0.9-build.1 + version: v0.29.4+abcdef1 + haproxy: + image: + arch: + - amd64 + description: Container image for haproxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: haproxy + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.12.0-eks-a-v0.0.0-dev-release-0.9-build.1 + kindnetd: + manifest: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/kind/manifests/kindnetd/v0.12.0/kindnetd.yaml + version: v0.12.0+abcdef1 + kubeVersion: "1.21" + packageController: + helmChart: + description: 'Helm chart: eks-anywhere-packages-helm' + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-packages + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.1.10-eks-a-v0.0.0-dev-release-0.9-build.1 + packageController: + arch: + - amd64 + description: Container image for eks-anywhere-packages image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-packages + os: linux + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.1.10-eks-a-v0.0.0-dev-release-0.9-build.1 + version: v0.1.10+abcdef1 + snow: + components: {} + kubeVip: {} + manager: {} + metadata: {} + version: "" + tinkerbell: + clusterAPIController: {} + clusterTemplate: {} + components: {} + kubeVip: {} + metadata: {} + tinkerbellStack: + actions: + cexec: {} + imageToDisk: {} + kexec: {} + ociToDisk: {} + reboot: {} + writeFile: {} + boots: + image: {} + manifest: {} + cfssl: {} + hegel: + image: {} + manifest: {} + hook: + bootkit: {} + docker: {} + initramfs: + amd: {} + arm: {} + kernel: {} + vmlinuz: + amd: {} + arm: {} + pbnj: + image: {} + manifest: {} + rufio: + image: {} + manifest: {} + tink: + tinkCli: {} + tinkController: {} + tinkManifest: {} + tinkServer: {} + tinkWorker: {} + tinkerbellChart: {} + version: "" + vSphere: + clusterAPIController: + arch: + - amd64 + description: Container image for cluster-api-vsphere-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-vsphere-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.1.1-eks-a-v0.0.0-dev-release-0.9-build.1 + clusterTemplate: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.1.1/cluster-template.yaml + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.1.1/infrastructure-components.yaml + driver: + arch: + - amd64 + description: Container image for vsphere-csi-driver image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: vsphere-csi-driver + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/vsphere-csi-driver/csi/driver:v2.2.0-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeVip: + arch: + - amd64 + description: Container image for kube-vip image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-vip + os: linux + uri: public.ecr.aws/release-container-registry/plunder-app/kube-vip:v0.4.2-eks-a-v0.0.0-dev-release-0.9-build.1 + manager: + arch: + - amd64 + description: Container image for cloud-provider-vsphere image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cloud-provider-vsphere + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.21.0-eks-d-1-21-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.1.1/metadata.yaml + syncer: + arch: + - amd64 + description: Container image for vsphere-csi-syncer image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: vsphere-csi-syncer + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/vsphere-csi-driver/csi/syncer:v2.2.0-eks-a-v0.0.0-dev-release-0.9-build.1 + version: v1.1.1+abcdef1 + - aws: + clusterTemplate: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-aws/manifests/infrastructure-aws/v0.6.4/cluster-template.yaml + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-aws/manifests/infrastructure-aws/v0.6.4/infrastructure-components.yaml + controller: + arch: + - amd64 + description: Container image for cluster-api-aws-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-aws-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-aws/cluster-api-aws-controller:v0.6.4-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-aws/manifests/infrastructure-aws/v0.6.4/metadata.yaml + version: v0.6.4+abcdef1 + bootstrap: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.1.3/bootstrap-components.yaml + controller: + arch: + - amd64 + description: Container image for kubeadm-bootstrap-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kubeadm-bootstrap-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.1.3/metadata.yaml + version: v1.1.3+abcdef1 + bottlerocketAdmin: + admin: + arch: + - amd64 + description: Container image for bottlerocket-admin image + imageDigest: sha256:279ff0b939c8ebfae8fb5086751de831edee4c1ef307b6f0a27b553b1c2c9b52 + name: bottlerocket-admin + os: linux + uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.8.0 + bottlerocketBootstrap: + bootstrap: + arch: + - amd64 + description: Container image for bottlerocket-bootstrap image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: bottlerocket-bootstrap + os: linux + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-22-7-eks-a-v0.0.0-dev-release-0.9-build.1 + certManager: + acmesolver: + arch: + - amd64 + description: Container image for cert-manager-acmesolver image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-acmesolver + os: linux + uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-acmesolver:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 + cainjector: + arch: + - amd64 + description: Container image for cert-manager-cainjector image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-cainjector + os: linux + uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-cainjector:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 + controller: + arch: + - amd64 + description: Container image for cert-manager-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-controller + os: linux + uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-controller:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 + manifest: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cert-manager/manifests/v1.5.3/cert-manager.yaml + version: v1.5.3+abcdef1 + webhook: + arch: + - amd64 + description: Container image for cert-manager-webhook image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cert-manager-webhook + os: linux + uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-webhook:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 + cilium: + cilium: + arch: + - amd64 + description: Container image for cilium image + imageDigest: sha256:e0c5180610dd7a2bac4ed271309b07eb6102d0bd74ed7dd33fb619879cc006f3 + name: cilium + os: linux + uri: public.ecr.aws/isovalent/cilium:v1.9.13-eksa.2 + helmChart: + description: Helm chart for cilium-chart + imageDigest: sha256:5982a9b5feded74c14a0b410006bba6d748655f8bc01f393b4519c9b10a463d0 + name: cilium-chart + uri: public.ecr.aws/isovalent/cilium:1.9.13-eksa.2 + manifest: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cilium/manifests/cilium/v1.9.13-eksa.2/cilium.yaml + operator: + arch: + - amd64 + description: Container image for operator-generic image + imageDigest: sha256:fd78027e876b00ea850f875e87a9ce81f6e4e6b4d963f115e978e8e7d180f478 + name: operator-generic + os: linux + uri: public.ecr.aws/isovalent/operator-generic:v1.9.13-eksa.2 + version: v1.9.13-eksa.2 + cloudStack: + clusterAPIController: + arch: + - amd64 + description: Container image for cluster-api-cloudstack-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-cloudstack-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-cloudstack/release/manager:v0.4.5-rc3-eks-a-v0.0.0-dev-release-0.9-build.1 + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.5-rc3/infrastructure-components.yaml + kubeVip: + arch: + - amd64 + description: Container image for kube-vip image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-vip + os: linux + uri: public.ecr.aws/release-container-registry/plunder-app/kube-vip:v0.4.2-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.5-rc3/metadata.yaml + version: v0.4.5-rc3+abcdef1 + clusterAPI: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/cluster-api/v1.1.3/core-components.yaml + controller: + arch: + - amd64 + description: Container image for cluster-api-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/cluster-api/v1.1.3/metadata.yaml + version: v1.1.3+abcdef1 + controlPlane: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/control-plane-kubeadm/v1.1.3/control-plane-components.yaml + controller: + arch: + - amd64 + description: Container image for kubeadm-control-plane-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kubeadm-control-plane-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/control-plane-kubeadm/v1.1.3/metadata.yaml + version: v1.1.3+abcdef1 + docker: + clusterTemplate: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/infrastructure-docker/v1.1.3/cluster-template-development.yaml + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/infrastructure-docker/v1.1.3/infrastructure-components-development.yaml + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + manager: + arch: + - amd64 + description: Container image for capd-manager image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: capd-manager + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/infrastructure-docker/v1.1.3/metadata.yaml + version: v1.1.3+abcdef1 + eksD: + channel: 1-22 + components: https://distro.eks.amazonaws.com/crds/releases.distro.eks.amazonaws.com-v1alpha1.yaml + gitCommit: 0123456789abcdef0123456789abcdef01234567 + kindNode: + arch: + - amd64 + description: Container image for kind-node image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kind-node + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.22.9-eks-d-1-22-7-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeVersion: v1.22.9 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-22/kubernetes-1-22-eks-7.yaml + name: kubernetes-1-22-eks-7 + ova: + bottlerocket: + arch: + - amd64 + crictl: {} + description: Bottlerocket Ova image for EKS-D 1-22-7 release + etcdadm: {} + name: bottlerocket-v1.22.9-eks-d-1-22-7-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova + os: linux + osName: bottlerocket + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-distro/ova/1-22/1-22-7/bottlerocket-v1.22.9-eks-d-1-22-7-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova + ubuntu: + arch: + - amd64 + crictl: + arch: + - amd64 + description: cri-tools tarball for linux/amd64 + name: cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + os: linux + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cri-tools/v1.20.0/cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + description: Ubuntu Ova image for EKS-D 1-22-7 release + etcdadm: + arch: + - amd64 + description: etcdadm tarball for linux/amd64 + name: etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + os: linux + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm/5b496a72af3d80d64a16a650c85ce9a5882bc014/etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + name: ubuntu-v1.22.9-eks-d-1-22-7-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova + os: linux + osName: ubuntu + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-distro/ova/1-22/1-22-7/ubuntu-v1.22.9-eks-d-1-22-7-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova + raw: + bottlerocket: + crictl: {} + etcdadm: {} + ubuntu: + arch: + - amd64 + crictl: + arch: + - amd64 + description: cri-tools tarball for linux/amd64 + name: cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + os: linux + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cri-tools/v1.20.0/cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + description: Ubuntu Raw image for EKS-D 1-22-7 release + etcdadm: + arch: + - amd64 + description: etcdadm tarball for linux/amd64 + name: etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + os: linux + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm/5b496a72af3d80d64a16a650c85ce9a5882bc014/etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz + name: ubuntu-v1.22.9-eks-d-1-22-7-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.gz + os: linux + osName: ubuntu + sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-distro/raw/1-22/1-22-7/ubuntu-v1.22.9-eks-d-1-22-7-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.gz + eksa: + cliTools: + arch: + - amd64 + description: Container image for eks-anywhere-cli-tools image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-cli-tools + os: linux + uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.7.2-eks-a-v0.0.0-dev-release-0.9-build.1 + clusterController: + arch: + - amd64 + description: Container image for eks-anywhere-cluster-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-cluster-controller + os: linux + uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.9.1-eks-a-v0.0.0-dev-release-0.9-build.1 + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-anywhere/manifests/cluster-controller/eksa-components.yaml + diagnosticCollector: + arch: + - amd64 + description: Container image for eks-anywhere-diagnostic-collector image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-diagnostic-collector + os: linux + uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.7.2-eks-a-v0.0.0-dev-release-0.9-build.1 + version: v0.0.0-dev-release-0.9+build.0+abcdef1 + etcdadmBootstrap: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.2/bootstrap-components.yaml + controller: + arch: + - amd64 + description: Container image for etcdadm-bootstrap-provider image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: etcdadm-bootstrap-provider + os: linux + uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.2-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.2/metadata.yaml + version: v1.0.2+abcdef1 + etcdadmController: + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.0/bootstrap-components.yaml + controller: + arch: + - amd64 + description: Container image for etcdadm-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: etcdadm-controller + os: linux + uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.0-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.0/metadata.yaml + version: v1.0.0+abcdef1 + flux: + helmController: + arch: + - amd64 + description: Container image for helm-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: helm-controller + os: linux + uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v0.20.1-eks-a-v0.0.0-dev-release-0.9-build.1 + kustomizeController: + arch: + - amd64 + description: Container image for kustomize-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kustomize-controller + os: linux + uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v0.24.3-eks-a-v0.0.0-dev-release-0.9-build.1 + notificationController: + arch: + - amd64 + description: Container image for notification-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: notification-controller + os: linux + uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v0.23.4-eks-a-v0.0.0-dev-release-0.9-build.1 + sourceController: + arch: + - amd64 + description: Container image for source-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: source-controller + os: linux + uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v0.24.2-eks-a-v0.0.0-dev-release-0.9-build.1 + version: v0.29.4+abcdef1 + haproxy: + image: + arch: + - amd64 + description: Container image for haproxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: haproxy + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.12.0-eks-a-v0.0.0-dev-release-0.9-build.1 + kindnetd: + manifest: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/kind/manifests/kindnetd/v0.12.0/kindnetd.yaml + version: v0.12.0+abcdef1 + kubeVersion: "1.22" + packageController: + helmChart: + description: 'Helm chart: eks-anywhere-packages-helm' + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-packages + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.1.10-eks-a-v0.0.0-dev-release-0.9-build.1 + packageController: + arch: + - amd64 + description: Container image for eks-anywhere-packages image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: eks-anywhere-packages + os: linux + uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.1.10-eks-a-v0.0.0-dev-release-0.9-build.1 + version: v0.1.10+abcdef1 + snow: + components: {} + kubeVip: {} + manager: {} + metadata: {} + version: "" + tinkerbell: + clusterAPIController: {} + clusterTemplate: {} + components: {} + kubeVip: {} + metadata: {} + tinkerbellStack: + actions: + cexec: {} + imageToDisk: {} + kexec: {} + ociToDisk: {} + reboot: {} + writeFile: {} + boots: + image: {} + manifest: {} + cfssl: {} + hegel: + image: {} + manifest: {} + hook: + bootkit: {} + docker: {} + initramfs: + amd: {} + arm: {} + kernel: {} + vmlinuz: + amd: {} + arm: {} + pbnj: + image: {} + manifest: {} + rufio: + image: {} + manifest: {} + tink: + tinkCli: {} + tinkController: {} + tinkManifest: {} + tinkServer: {} + tinkWorker: {} + tinkerbellChart: {} + version: "" + vSphere: + clusterAPIController: + arch: + - amd64 + description: Container image for cluster-api-vsphere-controller image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cluster-api-vsphere-controller + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.1.1-eks-a-v0.0.0-dev-release-0.9-build.1 + clusterTemplate: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.1.1/cluster-template.yaml + components: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.1.1/infrastructure-components.yaml + driver: + arch: + - amd64 + description: Container image for vsphere-csi-driver image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: vsphere-csi-driver + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/vsphere-csi-driver/csi/driver:v2.2.0-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeProxy: + arch: + - amd64 + description: Container image for kube-rbac-proxy image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-rbac-proxy + os: linux + uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 + kubeVip: + arch: + - amd64 + description: Container image for kube-vip image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: kube-vip + os: linux + uri: public.ecr.aws/release-container-registry/plunder-app/kube-vip:v0.4.2-eks-a-v0.0.0-dev-release-0.9-build.1 + manager: + arch: + - amd64 + description: Container image for cloud-provider-vsphere image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: cloud-provider-vsphere + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.22.5-eks-d-1-22-eks-a-v0.0.0-dev-release-0.9-build.1 + metadata: + uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.1.1/metadata.yaml + syncer: + arch: + - amd64 + description: Container image for vsphere-csi-syncer image + imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef + name: vsphere-csi-syncer + os: linux + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/vsphere-csi-driver/csi/syncer:v2.2.0-eks-a-v0.0.0-dev-release-0.9-build.1 + version: v1.1.1+abcdef1 +status: {} From c464ce93ce145e1b4d8a9deb244e9f89bd9b5154 Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Tue, 28 Jun 2022 13:52:35 -0500 Subject: [PATCH 02/22] Update for review comments --- pkg/dependencies/factory.go | 3 +- pkg/executables/cmk.go | 8 +-- pkg/executables/cmk_test.go | 15 ----- pkg/providers/cloudstack/cloudstack.go | 14 ++--- pkg/providers/cloudstack/validator.go | 78 ++++++++++++-------------- 5 files changed, 48 insertions(+), 70 deletions(-) diff --git a/pkg/dependencies/factory.go b/pkg/dependencies/factory.go index be5dcdda5937..957155948277 100644 --- a/pkg/dependencies/factory.go +++ b/pkg/dependencies/factory.go @@ -266,6 +266,7 @@ func (f *Factory) WithProvider(clusterConfigFile string, clusterConfig *v1alpha1 return fmt.Errorf("unable to get machine config from file %s: %v", clusterConfigFile, err) } + // map[string]*executables.Cmk and map[string]ProviderCmkClient are not compatible so we convert the map manually cmkClientMap := cloudstack.CmkClientMap{} for name, cmk := range f.dependencies.Cmks { cmkClientMap[name] = cmk @@ -419,7 +420,7 @@ func (f *Factory) WithCmk() *Factory { f.WithExecutableBuilder().WithWriter() f.buildSteps = append(f.buildSteps, func(ctx context.Context) error { - if f.dependencies.Cmks != nil { + if f.dependencies.Cmks != nil && len(f.dependencies.Cmks) > 0 { return nil } f.dependencies.Cmks = map[string]*executables.Cmk{} diff --git a/pkg/executables/cmk.go b/pkg/executables/cmk.go index 5940a32bd351..b28d6b7c1baf 100644 --- a/pkg/executables/cmk.go +++ b/pkg/executables/cmk.go @@ -262,9 +262,6 @@ func (c *Cmk) ValidateDomainPresent(ctx context.Context, domain string) (v1alpha func (c *Cmk) ValidateNetworkPresent(ctx context.Context, domainId string, network v1alpha1.CloudStackResourceIdentifier, zoneId string, account string, multipleZone bool) error { command := newCmkCommand("list networks") - if len(network.Id) > 0 { - applyCmkArgs(&command, withCloudStackId(network.Id)) - } if multipleZone { applyCmkArgs(&command, withCloudStackNetworkType(Shared)) } @@ -432,14 +429,13 @@ func (c *Cmk) exec(ctx context.Context, args ...string) (stdout bytes.Buffer, er func (c *Cmk) buildCmkConfigFile() (configFile string, err error) { t := templater.New(c.writer) - cloudstackPreflightTimeout := defaultCloudStackPreflightTimeout + c.config.Timeout = defaultCloudStackPreflightTimeout if timeout, isSet := os.LookupEnv("CLOUDSTACK_PREFLIGHT_TIMEOUT"); isSet { if _, err := strconv.ParseUint(timeout, 10, 16); err != nil { return "", fmt.Errorf("CLOUDSTACK_PREFLIGHT_TIMEOUT must be a number: %v", err) } - cloudstackPreflightTimeout = timeout + c.config.Timeout = timeout } - c.config.Timeout = cloudstackPreflightTimeout writtenFileName, err := t.WriteToFile(cmkConfigTemplate, c.config, fmt.Sprintf(cmkConfigFileNameTemplate, c.config.Name)) if err != nil { return "", fmt.Errorf("creating file for cmk config: %v", err) diff --git a/pkg/executables/cmk_test.go b/pkg/executables/cmk_test.go index 21feeb79350a..93255c969e85 100644 --- a/pkg/executables/cmk_test.go +++ b/pkg/executables/cmk_test.go @@ -518,21 +518,6 @@ func TestCmkListOperations(t *testing.T) { shouldSecondCallOccur: false, wantResultCount: 1, }, - { - testName: "listnetworks success on id filter", - jsonResponseFile: "testdata/cmk_list_network_singular.json", - argumentsExecCall: []string{ - "-c", configFilePath, - "list", "networks", fmt.Sprintf("id=\"%s\"", resourceId.Id), fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), fmt.Sprintf("zoneid=\"%s\"", "TEST_RESOURCE"), - }, - cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateNetworkPresent(ctx, domainId, zones[3].Network, zones[3].Id, accountName, false) - }, - cmkResponseError: nil, - wantErr: false, - shouldSecondCallOccur: true, - wantResultCount: 1, - }, { testName: "listnetworks no results", jsonResponseFile: "testdata/cmk_list_empty_response.json", diff --git a/pkg/providers/cloudstack/cloudstack.go b/pkg/providers/cloudstack/cloudstack.go index 1288483856cc..61ba51e950ea 100644 --- a/pkg/providers/cloudstack/cloudstack.go +++ b/pkg/providers/cloudstack/cloudstack.go @@ -395,15 +395,15 @@ func (p *cloudstackProvider) validateClusterSpec(ctx context.Context, clusterSpe func (p *cloudstackProvider) SetupAndValidateCreateCluster(ctx context.Context, clusterSpec *cluster.Spec) error { if err := p.validateEnv(ctx); err != nil { - return fmt.Errorf("failed setup and validations: %v", err) + return fmt.Errorf("validating environment variables: %v", err) } if err := p.validateClusterSpec(ctx, clusterSpec); err != nil { - return fmt.Errorf("failed cluster spec validation: %v", err) + return fmt.Errorf("validating cluster spec: %v", err) } if err := p.setupSSHAuthKeysForCreate(); err != nil { - return fmt.Errorf("failed setup and validations: %v", err) + return fmt.Errorf("setting up SSH keys: %v", err) } if clusterSpec.Cluster.IsManaged() { @@ -434,15 +434,15 @@ func (p *cloudstackProvider) SetupAndValidateCreateCluster(ctx context.Context, func (p *cloudstackProvider) SetupAndValidateUpgradeCluster(ctx context.Context, cluster *types.Cluster, clusterSpec *cluster.Spec) error { if err := p.validateEnv(ctx); err != nil { - return fmt.Errorf("failed setup and validations: %v", err) + return fmt.Errorf("validating environment variables: %v", err) } if err := p.validateClusterSpec(ctx, clusterSpec); err != nil { - return fmt.Errorf("failed cluster spec validation: %v", err) + return fmt.Errorf("validating cluster spec: %v", err) } if err := p.setupSSHAuthKeysForUpgrade(); err != nil { - return fmt.Errorf("failed setup and validations: %v", err) + return fmt.Errorf("setting up SSH keys: %v", err) } if err := p.validateMachineConfigsNameUniqueness(ctx, cluster, clusterSpec); err != nil { @@ -454,7 +454,7 @@ func (p *cloudstackProvider) SetupAndValidateUpgradeCluster(ctx context.Context, func (p *cloudstackProvider) SetupAndValidateDeleteCluster(ctx context.Context, _ *types.Cluster) error { err := p.validateEnv(ctx) if err != nil { - return fmt.Errorf("failed setup and validations: %v", err) + return fmt.Errorf("validating environment variables: %v", err) } return nil } diff --git a/pkg/providers/cloudstack/validator.go b/pkg/providers/cloudstack/validator.go index 18b309710bc3..1cc9723b32de 100644 --- a/pkg/providers/cloudstack/validator.go +++ b/pkg/providers/cloudstack/validator.go @@ -14,8 +14,8 @@ import ( ) type Validator struct { - cmks CmkClientMap - availabilityZones []localAvailabilityZone + cmks CmkClientMap + localAvailabilityZones []localAvailabilityZone } // Taken from https://github.com/shapeblue/cloudstack/blob/08bb4ad9fea7e422c3d3ac6d52f4670b1e89eed7/api/src/main/java/com/cloud/vm/VmDetailConstants.java @@ -32,8 +32,8 @@ var restrictedUserCustomDetails = [...]string{ func NewValidator(cmks CmkClientMap) *Validator { return &Validator{ - cmks: cmks, - availabilityZones: []localAvailabilityZone{}, + cmks: cmks, + localAvailabilityZones: []localAvailabilityZone{}, } } @@ -59,25 +59,25 @@ type ProviderCmkClient interface { type CmkClientMap map[string]ProviderCmkClient func (v *Validator) validateCloudStackAccess(ctx context.Context, datacenterConfig *anywherev1.CloudStackDatacenterConfig) error { - azNamesToCheck := []string{} + refNamesToCheck := []string{} if len(datacenterConfig.Spec.Domain) > 0 { - azNamesToCheck = append(azNamesToCheck, decoder.CloudStackGlobalAZ) + refNamesToCheck = append(refNamesToCheck, decoder.CloudStackGlobalAZ) } for _, az := range datacenterConfig.Spec.AvailabilityZones { - azNamesToCheck = append(azNamesToCheck, az.CredentialsRef) + refNamesToCheck = append(refNamesToCheck, az.CredentialsRef) } - for _, azName := range azNamesToCheck { - cmk, ok := v.cmks[azName] + for _, refName := range refNamesToCheck { + cmk, ok := v.cmks[refName] if !ok { - return fmt.Errorf("cannot find CloudStack profile for availability zone %s", azName) + return fmt.Errorf("cannot find CloudStack profile for credentialsRef %s", refName) } if err := cmk.ValidateCloudStackConnection(ctx); err != nil { - return fmt.Errorf("failed validating connection to cloudstack %s: %v", azName, err) + return fmt.Errorf("failed validating connection to cloudstack %s: %v", refName, err) } } - logger.MarkPass("Connected to", "servers", azNamesToCheck) + logger.MarkPass("Connected to", "servers", refNamesToCheck) return nil } @@ -86,7 +86,8 @@ func (v *Validator) ValidateCloudStackDatacenterConfig(ctx context.Context, data return err } - for _, az := range v.availabilityZones { + for _, az := range v.localAvailabilityZones { + fmt.Printf("az: %+v\n", az.CloudStackAvailabilityZone) _, err := getHostnameFromUrl(az.ManagementApiEndpoint) if err != nil { return fmt.Errorf("checking management api endpoint: %v", err) @@ -94,7 +95,7 @@ func (v *Validator) ValidateCloudStackDatacenterConfig(ctx context.Context, data cmk, ok := v.cmks[az.CredentialsRef] if !ok { - return fmt.Errorf("cannot find CloudStack profile for availability zone %s", az.CredentialsRef) + return fmt.Errorf("cannot find CloudStack profile named %s for availability zone %s", az.CredentialsRef, az.Name) } endpoint := cmk.GetManagementApiEndpoint() if endpoint != az.ManagementApiEndpoint { @@ -102,6 +103,16 @@ func (v *Validator) ValidateCloudStackDatacenterConfig(ctx context.Context, data endpoint, az.ManagementApiEndpoint) } + domain, err := cmk.ValidateDomainPresent(ctx, az.Domain) + if err != nil { + return err + } + az.DomainId = domain.Id + + if err := cmk.ValidateAccountPresent(ctx, az.Account, az.DomainId); err != nil { + return err + } + zoneId, err := cmk.ValidateZonePresent(ctx, az.CloudStackAvailabilityZone.Zone) if err != nil { return err @@ -125,52 +136,37 @@ func (v *Validator) generateLocalAvailabilityZones(ctx context.Context, datacent } if len(datacenterConfig.Spec.Domain) > 0 { - cmk, ok := v.cmks[decoder.CloudStackGlobalAZ] + _, ok := v.cmks[decoder.CloudStackGlobalAZ] if !ok { - return fmt.Errorf("cannot find CloudStack profile for availability zone %s", decoder.CloudStackGlobalAZ) - } - domain, err := cmk.ValidateDomainPresent(ctx, datacenterConfig.Spec.Domain) - if err != nil { - return err - } - if err := cmk.ValidateAccountPresent(ctx, datacenterConfig.Spec.Account, domain.Id); err != nil { - return err + return fmt.Errorf("cannot find CloudStack profile named %s for default availability zone", decoder.CloudStackGlobalAZ) } - for _, zone := range datacenterConfig.Spec.Zones { + for index, zone := range datacenterConfig.Spec.Zones { availabilityZone := localAvailabilityZone{ CloudStackAvailabilityZone: &anywherev1.CloudStackAvailabilityZone{ + Name: fmt.Sprintf("availability-zone-%d", index), CredentialsRef: decoder.CloudStackGlobalAZ, Domain: datacenterConfig.Spec.Domain, Account: datacenterConfig.Spec.Account, ManagementApiEndpoint: datacenterConfig.Spec.ManagementApiEndpoint, Zone: zone, }, - DomainId: domain.Id, } - v.availabilityZones = append(v.availabilityZones, availabilityZone) + v.localAvailabilityZones = append(v.localAvailabilityZones, availabilityZone) } } for _, az := range datacenterConfig.Spec.AvailabilityZones { - cmk, ok := v.cmks[az.CredentialsRef] + _, ok := v.cmks[az.CredentialsRef] if !ok { - return fmt.Errorf("cannot find CloudStack profile for availability zone %s", az.CredentialsRef) - } - domain, err := cmk.ValidateDomainPresent(ctx, az.Domain) - if err != nil { - return err - } - if err := cmk.ValidateAccountPresent(ctx, az.Account, domain.Id); err != nil { - return err + return fmt.Errorf("cannot find CloudStack profile named %s for availability zone %s", az.CredentialsRef, az.Name) } availabilityZone := localAvailabilityZone{ CloudStackAvailabilityZone: &az, - DomainId: domain.Id, } - v.availabilityZones = append(v.availabilityZones, availabilityZone) + v.localAvailabilityZones = append(v.localAvailabilityZones, availabilityZone) } - if len(v.availabilityZones) <= 0 { - return fmt.Errorf("CloudStackDatacenterConfig domain or availabilityZones is not set or is empty") + if len(v.localAvailabilityZones) <= 0 { + return fmt.Errorf("CloudStackDatacenterConfig domain or localAvailabilityZones is not set or is empty") } return nil } @@ -286,10 +282,10 @@ func (v *Validator) validateMachineConfig(ctx context.Context, datacenterConfig } } - for _, az := range v.availabilityZones { + for _, az := range v.localAvailabilityZones { cmk, ok := v.cmks[az.CredentialsRef] if !ok { - return fmt.Errorf("cannot find CloudStack profile for availability zone %s", az.CredentialsRef) + return fmt.Errorf("cannot find CloudStack profile named %s for availability zone %s", az.CredentialsRef, az.Name) } if err := cmk.ValidateTemplatePresent(ctx, az.DomainId, az.CloudStackAvailabilityZone.Zone.Id, az.Account, machineConfig.Spec.Template); err != nil { return fmt.Errorf("validating template: %v", err) From 3efce418387525720f1ac616c4f977f30f526935 Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Tue, 28 Jun 2022 14:04:08 -0500 Subject: [PATCH 03/22] Fix unit test failures --- pkg/providers/cloudstack/cloudstack_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/providers/cloudstack/cloudstack_test.go b/pkg/providers/cloudstack/cloudstack_test.go index 2956d054a288..d76f8f156687 100644 --- a/pkg/providers/cloudstack/cloudstack_test.go +++ b/pkg/providers/cloudstack/cloudstack_test.go @@ -657,7 +657,7 @@ func TestSetupAndValidateForCreateSSHAuthorizedKeyInvalidCP(t *testing.T) { tctx.SaveContext() err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec) - thenErrorExpected(t, "failed setup and validations: ssh: no key found", err) + thenErrorExpected(t, "setting up SSH keys: ssh: no key found", err) } func TestSetupAndValidateForCreateSSHAuthorizedKeyInvalidWorker(t *testing.T) { @@ -672,7 +672,7 @@ func TestSetupAndValidateForCreateSSHAuthorizedKeyInvalidWorker(t *testing.T) { tctx.SaveContext() err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec) - thenErrorExpected(t, "failed setup and validations: ssh: no key found", err) + thenErrorExpected(t, "setting up SSH keys: ssh: no key found", err) } func TestSetupAndValidateForCreateSSHAuthorizedKeyInvalidEtcd(t *testing.T) { @@ -687,7 +687,7 @@ func TestSetupAndValidateForCreateSSHAuthorizedKeyInvalidEtcd(t *testing.T) { tctx.SaveContext() err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec) - thenErrorExpected(t, "failed setup and validations: ssh: no key found", err) + thenErrorExpected(t, "setting up SSH keys: ssh: no key found", err) } func TestSetupAndValidateSSHAuthorizedKeyEmptyCP(t *testing.T) { @@ -1333,7 +1333,7 @@ func TestSetupAndValidateForUpgradeSSHAuthorizedKeyInvalidCP(t *testing.T) { cluster := &types.Cluster{} err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec) - thenErrorExpected(t, "failed setup and validations: ssh: no key found", err) + thenErrorExpected(t, "setting up SSH keys: ssh: no key found", err) } func TestSetupAndValidateForUpgradeSSHAuthorizedKeyInvalidWorker(t *testing.T) { @@ -1349,7 +1349,7 @@ func TestSetupAndValidateForUpgradeSSHAuthorizedKeyInvalidWorker(t *testing.T) { cluster := &types.Cluster{} err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec) - thenErrorExpected(t, "failed setup and validations: ssh: no key found", err) + thenErrorExpected(t, "setting up SSH keys: ssh: no key found", err) } func TestSetupAndValidateForUpgradeSSHAuthorizedKeyInvalidEtcd(t *testing.T) { @@ -1365,7 +1365,7 @@ func TestSetupAndValidateForUpgradeSSHAuthorizedKeyInvalidEtcd(t *testing.T) { cluster := &types.Cluster{} err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec) - thenErrorExpected(t, "failed setup and validations: ssh: no key found", err) + thenErrorExpected(t, "setting up SSH keys: ssh: no key found", err) } func TestClusterUpgradeNeededNoChanges(t *testing.T) { From 31679e26e06d072948fc50aaa92a0b823cd5fcec Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Tue, 28 Jun 2022 14:11:52 -0500 Subject: [PATCH 04/22] Remove debug print --- pkg/providers/cloudstack/validator.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/providers/cloudstack/validator.go b/pkg/providers/cloudstack/validator.go index 1cc9723b32de..d972f3dc3f46 100644 --- a/pkg/providers/cloudstack/validator.go +++ b/pkg/providers/cloudstack/validator.go @@ -87,7 +87,6 @@ func (v *Validator) ValidateCloudStackDatacenterConfig(ctx context.Context, data } for _, az := range v.localAvailabilityZones { - fmt.Printf("az: %+v\n", az.CloudStackAvailabilityZone) _, err := getHostnameFromUrl(az.ManagementApiEndpoint) if err != nil { return fmt.Errorf("checking management api endpoint: %v", err) From 461a8049e4b4a2fbff842e6221e3e21b09b713ba Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Tue, 28 Jun 2022 14:19:13 -0500 Subject: [PATCH 05/22] Remove unnecessary file --- .../testdata/release-0.9-bundle-release.yaml | 1646 ----------------- 1 file changed, 1646 deletions(-) delete mode 100644 release/pkg/test/testdata/release-0.9-bundle-release.yaml diff --git a/release/pkg/test/testdata/release-0.9-bundle-release.yaml b/release/pkg/test/testdata/release-0.9-bundle-release.yaml deleted file mode 100644 index 6bf8fa296ba3..000000000000 --- a/release/pkg/test/testdata/release-0.9-bundle-release.yaml +++ /dev/null @@ -1,1646 +0,0 @@ -apiVersion: anywhere.eks.amazonaws.com/v1alpha1 -kind: Bundles -metadata: - creationTimestamp: "1970-01-01T00:00:00Z" - name: bundles-1 -spec: - cliMaxVersion: v0.9.0 - cliMinVersion: v0.9.0 - number: 1 - versionsBundles: - - aws: - clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-aws/manifests/infrastructure-aws/v0.6.4/cluster-template.yaml - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-aws/manifests/infrastructure-aws/v0.6.4/infrastructure-components.yaml - controller: - arch: - - amd64 - description: Container image for cluster-api-aws-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cluster-api-aws-controller - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-aws/cluster-api-aws-controller:v0.6.4-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-aws/manifests/infrastructure-aws/v0.6.4/metadata.yaml - version: v0.6.4+abcdef1 - bootstrap: - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.1.3/bootstrap-components.yaml - controller: - arch: - - amd64 - description: Container image for kubeadm-bootstrap-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kubeadm-bootstrap-controller - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.1.3/metadata.yaml - version: v1.1.3+abcdef1 - bottlerocketAdmin: - admin: - arch: - - amd64 - description: Container image for bottlerocket-admin image - imageDigest: sha256:279ff0b939c8ebfae8fb5086751de831edee4c1ef307b6f0a27b553b1c2c9b52 - name: bottlerocket-admin - os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.8.0 - bottlerocketBootstrap: - bootstrap: - arch: - - amd64 - description: Container image for bottlerocket-bootstrap image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: bottlerocket-bootstrap - os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-20-16-eks-a-v0.0.0-dev-release-0.9-build.1 - certManager: - acmesolver: - arch: - - amd64 - description: Container image for cert-manager-acmesolver image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cert-manager-acmesolver - os: linux - uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-acmesolver:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 - cainjector: - arch: - - amd64 - description: Container image for cert-manager-cainjector image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cert-manager-cainjector - os: linux - uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-cainjector:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 - controller: - arch: - - amd64 - description: Container image for cert-manager-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cert-manager-controller - os: linux - uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-controller:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 - manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cert-manager/manifests/v1.5.3/cert-manager.yaml - version: v1.5.3+abcdef1 - webhook: - arch: - - amd64 - description: Container image for cert-manager-webhook image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cert-manager-webhook - os: linux - uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-webhook:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 - cilium: - cilium: - arch: - - amd64 - description: Container image for cilium image - imageDigest: sha256:e0c5180610dd7a2bac4ed271309b07eb6102d0bd74ed7dd33fb619879cc006f3 - name: cilium - os: linux - uri: public.ecr.aws/isovalent/cilium:v1.9.13-eksa.2 - helmChart: - description: Helm chart for cilium-chart - imageDigest: sha256:5982a9b5feded74c14a0b410006bba6d748655f8bc01f393b4519c9b10a463d0 - name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.9.13-eksa.2 - manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cilium/manifests/cilium/v1.9.13-eksa.2/cilium.yaml - operator: - arch: - - amd64 - description: Container image for operator-generic image - imageDigest: sha256:fd78027e876b00ea850f875e87a9ce81f6e4e6b4d963f115e978e8e7d180f478 - name: operator-generic - os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.9.13-eksa.2 - version: v1.9.13-eksa.2 - cloudStack: - clusterAPIController: - arch: - - amd64 - description: Container image for cluster-api-cloudstack-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cluster-api-cloudstack-controller - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-cloudstack/release/manager:v0.4.5-rc3-eks-a-v0.0.0-dev-release-0.9-build.1 - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.5-rc3/infrastructure-components.yaml - kubeVip: - arch: - - amd64 - description: Container image for kube-vip image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-vip - os: linux - uri: public.ecr.aws/release-container-registry/plunder-app/kube-vip:v0.4.2-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.5-rc3/metadata.yaml - version: v0.4.5-rc3+abcdef1 - clusterAPI: - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/cluster-api/v1.1.3/core-components.yaml - controller: - arch: - - amd64 - description: Container image for cluster-api-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cluster-api-controller - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/cluster-api/v1.1.3/metadata.yaml - version: v1.1.3+abcdef1 - controlPlane: - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/control-plane-kubeadm/v1.1.3/control-plane-components.yaml - controller: - arch: - - amd64 - description: Container image for kubeadm-control-plane-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kubeadm-control-plane-controller - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/control-plane-kubeadm/v1.1.3/metadata.yaml - version: v1.1.3+abcdef1 - docker: - clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/infrastructure-docker/v1.1.3/cluster-template-development.yaml - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/infrastructure-docker/v1.1.3/infrastructure-components-development.yaml - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - manager: - arch: - - amd64 - description: Container image for capd-manager image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: capd-manager - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/infrastructure-docker/v1.1.3/metadata.yaml - version: v1.1.3+abcdef1 - eksD: - channel: 1-20 - components: https://distro.eks.amazonaws.com/crds/releases.distro.eks.amazonaws.com-v1alpha1.yaml - gitCommit: 0123456789abcdef0123456789abcdef01234567 - kindNode: - arch: - - amd64 - description: Container image for kind-node image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kind-node - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.20.15-eks-d-1-20-16-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeVersion: v1.20.15 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-20/kubernetes-1-20-eks-16.yaml - name: kubernetes-1-20-eks-16 - ova: - bottlerocket: - arch: - - amd64 - crictl: {} - description: Bottlerocket Ova image for EKS-D 1-20-16 release - etcdadm: {} - name: bottlerocket-v1.20.15-eks-d-1-20-16-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova - os: linux - osName: bottlerocket - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-distro/ova/1-20/1-20-16/bottlerocket-v1.20.15-eks-d-1-20-16-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova - ubuntu: - arch: - - amd64 - crictl: - arch: - - amd64 - description: cri-tools tarball for linux/amd64 - name: cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - os: linux - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cri-tools/v1.20.0/cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - description: Ubuntu Ova image for EKS-D 1-20-16 release - etcdadm: - arch: - - amd64 - description: etcdadm tarball for linux/amd64 - name: etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - os: linux - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm/5b496a72af3d80d64a16a650c85ce9a5882bc014/etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - name: ubuntu-v1.20.15-eks-d-1-20-16-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova - os: linux - osName: ubuntu - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-distro/ova/1-20/1-20-16/ubuntu-v1.20.15-eks-d-1-20-16-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova - raw: - bottlerocket: - crictl: {} - etcdadm: {} - ubuntu: - arch: - - amd64 - crictl: - arch: - - amd64 - description: cri-tools tarball for linux/amd64 - name: cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - os: linux - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cri-tools/v1.20.0/cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - description: Ubuntu Raw image for EKS-D 1-20-16 release - etcdadm: - arch: - - amd64 - description: etcdadm tarball for linux/amd64 - name: etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - os: linux - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm/5b496a72af3d80d64a16a650c85ce9a5882bc014/etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - name: ubuntu-v1.20.15-eks-d-1-20-16-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.gz - os: linux - osName: ubuntu - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-distro/raw/1-20/1-20-16/ubuntu-v1.20.15-eks-d-1-20-16-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.gz - eksa: - cliTools: - arch: - - amd64 - description: Container image for eks-anywhere-cli-tools image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: eks-anywhere-cli-tools - os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.7.2-eks-a-v0.0.0-dev-release-0.9-build.1 - clusterController: - arch: - - amd64 - description: Container image for eks-anywhere-cluster-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: eks-anywhere-cluster-controller - os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.9.1-eks-a-v0.0.0-dev-release-0.9-build.1 - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-anywhere/manifests/cluster-controller/eksa-components.yaml - diagnosticCollector: - arch: - - amd64 - description: Container image for eks-anywhere-diagnostic-collector image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: eks-anywhere-diagnostic-collector - os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.7.2-eks-a-v0.0.0-dev-release-0.9-build.1 - version: v0.0.0-dev-release-0.9+build.0+abcdef1 - etcdadmBootstrap: - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.2/bootstrap-components.yaml - controller: - arch: - - amd64 - description: Container image for etcdadm-bootstrap-provider image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: etcdadm-bootstrap-provider - os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.2-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.2/metadata.yaml - version: v1.0.2+abcdef1 - etcdadmController: - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.0/bootstrap-components.yaml - controller: - arch: - - amd64 - description: Container image for etcdadm-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: etcdadm-controller - os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.0-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.0/metadata.yaml - version: v1.0.0+abcdef1 - flux: - helmController: - arch: - - amd64 - description: Container image for helm-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: helm-controller - os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v0.20.1-eks-a-v0.0.0-dev-release-0.9-build.1 - kustomizeController: - arch: - - amd64 - description: Container image for kustomize-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kustomize-controller - os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v0.24.3-eks-a-v0.0.0-dev-release-0.9-build.1 - notificationController: - arch: - - amd64 - description: Container image for notification-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: notification-controller - os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v0.23.4-eks-a-v0.0.0-dev-release-0.9-build.1 - sourceController: - arch: - - amd64 - description: Container image for source-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: source-controller - os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v0.24.2-eks-a-v0.0.0-dev-release-0.9-build.1 - version: v0.29.4+abcdef1 - haproxy: - image: - arch: - - amd64 - description: Container image for haproxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: haproxy - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.12.0-eks-a-v0.0.0-dev-release-0.9-build.1 - kindnetd: - manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/kind/manifests/kindnetd/v0.12.0/kindnetd.yaml - version: v0.12.0+abcdef1 - kubeVersion: "1.20" - packageController: - helmChart: - description: 'Helm chart: eks-anywhere-packages-helm' - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.1.10-eks-a-v0.0.0-dev-release-0.9-build.1 - packageController: - arch: - - amd64 - description: Container image for eks-anywhere-packages image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: eks-anywhere-packages - os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.1.10-eks-a-v0.0.0-dev-release-0.9-build.1 - version: v0.1.10+abcdef1 - snow: - components: {} - kubeVip: {} - manager: {} - metadata: {} - version: "" - tinkerbell: - clusterAPIController: {} - clusterTemplate: {} - components: {} - kubeVip: {} - metadata: {} - tinkerbellStack: - actions: - cexec: {} - imageToDisk: {} - kexec: {} - ociToDisk: {} - reboot: {} - writeFile: {} - boots: - image: {} - manifest: {} - cfssl: {} - hegel: - image: {} - manifest: {} - hook: - bootkit: {} - docker: {} - initramfs: - amd: {} - arm: {} - kernel: {} - vmlinuz: - amd: {} - arm: {} - pbnj: - image: {} - manifest: {} - rufio: - image: {} - manifest: {} - tink: - tinkCli: {} - tinkController: {} - tinkManifest: {} - tinkServer: {} - tinkWorker: {} - tinkerbellChart: {} - version: "" - vSphere: - clusterAPIController: - arch: - - amd64 - description: Container image for cluster-api-vsphere-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cluster-api-vsphere-controller - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.1.1-eks-a-v0.0.0-dev-release-0.9-build.1 - clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.1.1/cluster-template.yaml - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.1.1/infrastructure-components.yaml - driver: - arch: - - amd64 - description: Container image for vsphere-csi-driver image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: vsphere-csi-driver - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/vsphere-csi-driver/csi/driver:v2.2.0-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeVip: - arch: - - amd64 - description: Container image for kube-vip image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-vip - os: linux - uri: public.ecr.aws/release-container-registry/plunder-app/kube-vip:v0.4.2-eks-a-v0.0.0-dev-release-0.9-build.1 - manager: - arch: - - amd64 - description: Container image for cloud-provider-vsphere image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cloud-provider-vsphere - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.20.0-eks-d-1-20-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.1.1/metadata.yaml - syncer: - arch: - - amd64 - description: Container image for vsphere-csi-syncer image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: vsphere-csi-syncer - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/vsphere-csi-driver/csi/syncer:v2.2.0-eks-a-v0.0.0-dev-release-0.9-build.1 - version: v1.1.1+abcdef1 - - aws: - clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-aws/manifests/infrastructure-aws/v0.6.4/cluster-template.yaml - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-aws/manifests/infrastructure-aws/v0.6.4/infrastructure-components.yaml - controller: - arch: - - amd64 - description: Container image for cluster-api-aws-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cluster-api-aws-controller - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-aws/cluster-api-aws-controller:v0.6.4-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-aws/manifests/infrastructure-aws/v0.6.4/metadata.yaml - version: v0.6.4+abcdef1 - bootstrap: - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.1.3/bootstrap-components.yaml - controller: - arch: - - amd64 - description: Container image for kubeadm-bootstrap-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kubeadm-bootstrap-controller - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.1.3/metadata.yaml - version: v1.1.3+abcdef1 - bottlerocketAdmin: - admin: - arch: - - amd64 - description: Container image for bottlerocket-admin image - imageDigest: sha256:279ff0b939c8ebfae8fb5086751de831edee4c1ef307b6f0a27b553b1c2c9b52 - name: bottlerocket-admin - os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.8.0 - bottlerocketBootstrap: - bootstrap: - arch: - - amd64 - description: Container image for bottlerocket-bootstrap image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: bottlerocket-bootstrap - os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-21-14-eks-a-v0.0.0-dev-release-0.9-build.1 - certManager: - acmesolver: - arch: - - amd64 - description: Container image for cert-manager-acmesolver image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cert-manager-acmesolver - os: linux - uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-acmesolver:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 - cainjector: - arch: - - amd64 - description: Container image for cert-manager-cainjector image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cert-manager-cainjector - os: linux - uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-cainjector:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 - controller: - arch: - - amd64 - description: Container image for cert-manager-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cert-manager-controller - os: linux - uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-controller:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 - manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cert-manager/manifests/v1.5.3/cert-manager.yaml - version: v1.5.3+abcdef1 - webhook: - arch: - - amd64 - description: Container image for cert-manager-webhook image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cert-manager-webhook - os: linux - uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-webhook:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 - cilium: - cilium: - arch: - - amd64 - description: Container image for cilium image - imageDigest: sha256:e0c5180610dd7a2bac4ed271309b07eb6102d0bd74ed7dd33fb619879cc006f3 - name: cilium - os: linux - uri: public.ecr.aws/isovalent/cilium:v1.9.13-eksa.2 - helmChart: - description: Helm chart for cilium-chart - imageDigest: sha256:5982a9b5feded74c14a0b410006bba6d748655f8bc01f393b4519c9b10a463d0 - name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.9.13-eksa.2 - manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cilium/manifests/cilium/v1.9.13-eksa.2/cilium.yaml - operator: - arch: - - amd64 - description: Container image for operator-generic image - imageDigest: sha256:fd78027e876b00ea850f875e87a9ce81f6e4e6b4d963f115e978e8e7d180f478 - name: operator-generic - os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.9.13-eksa.2 - version: v1.9.13-eksa.2 - cloudStack: - clusterAPIController: - arch: - - amd64 - description: Container image for cluster-api-cloudstack-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cluster-api-cloudstack-controller - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-cloudstack/release/manager:v0.4.5-rc3-eks-a-v0.0.0-dev-release-0.9-build.1 - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.5-rc3/infrastructure-components.yaml - kubeVip: - arch: - - amd64 - description: Container image for kube-vip image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-vip - os: linux - uri: public.ecr.aws/release-container-registry/plunder-app/kube-vip:v0.4.2-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.5-rc3/metadata.yaml - version: v0.4.5-rc3+abcdef1 - clusterAPI: - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/cluster-api/v1.1.3/core-components.yaml - controller: - arch: - - amd64 - description: Container image for cluster-api-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cluster-api-controller - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/cluster-api/v1.1.3/metadata.yaml - version: v1.1.3+abcdef1 - controlPlane: - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/control-plane-kubeadm/v1.1.3/control-plane-components.yaml - controller: - arch: - - amd64 - description: Container image for kubeadm-control-plane-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kubeadm-control-plane-controller - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/control-plane-kubeadm/v1.1.3/metadata.yaml - version: v1.1.3+abcdef1 - docker: - clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/infrastructure-docker/v1.1.3/cluster-template-development.yaml - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/infrastructure-docker/v1.1.3/infrastructure-components-development.yaml - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - manager: - arch: - - amd64 - description: Container image for capd-manager image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: capd-manager - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/infrastructure-docker/v1.1.3/metadata.yaml - version: v1.1.3+abcdef1 - eksD: - channel: 1-21 - components: https://distro.eks.amazonaws.com/crds/releases.distro.eks.amazonaws.com-v1alpha1.yaml - gitCommit: 0123456789abcdef0123456789abcdef01234567 - kindNode: - arch: - - amd64 - description: Container image for kind-node image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kind-node - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.21.12-eks-d-1-21-14-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeVersion: v1.21.12 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-21/kubernetes-1-21-eks-14.yaml - name: kubernetes-1-21-eks-14 - ova: - bottlerocket: - arch: - - amd64 - crictl: {} - description: Bottlerocket Ova image for EKS-D 1-21-14 release - etcdadm: {} - name: bottlerocket-v1.21.12-eks-d-1-21-14-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova - os: linux - osName: bottlerocket - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-distro/ova/1-21/1-21-14/bottlerocket-v1.21.12-eks-d-1-21-14-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova - ubuntu: - arch: - - amd64 - crictl: - arch: - - amd64 - description: cri-tools tarball for linux/amd64 - name: cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - os: linux - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cri-tools/v1.20.0/cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - description: Ubuntu Ova image for EKS-D 1-21-14 release - etcdadm: - arch: - - amd64 - description: etcdadm tarball for linux/amd64 - name: etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - os: linux - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm/5b496a72af3d80d64a16a650c85ce9a5882bc014/etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - name: ubuntu-v1.21.12-eks-d-1-21-14-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova - os: linux - osName: ubuntu - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-distro/ova/1-21/1-21-14/ubuntu-v1.21.12-eks-d-1-21-14-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova - raw: - bottlerocket: - crictl: {} - etcdadm: {} - ubuntu: - arch: - - amd64 - crictl: - arch: - - amd64 - description: cri-tools tarball for linux/amd64 - name: cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - os: linux - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cri-tools/v1.20.0/cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - description: Ubuntu Raw image for EKS-D 1-21-14 release - etcdadm: - arch: - - amd64 - description: etcdadm tarball for linux/amd64 - name: etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - os: linux - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm/5b496a72af3d80d64a16a650c85ce9a5882bc014/etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - name: ubuntu-v1.21.12-eks-d-1-21-14-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.gz - os: linux - osName: ubuntu - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-distro/raw/1-21/1-21-14/ubuntu-v1.21.12-eks-d-1-21-14-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.gz - eksa: - cliTools: - arch: - - amd64 - description: Container image for eks-anywhere-cli-tools image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: eks-anywhere-cli-tools - os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.7.2-eks-a-v0.0.0-dev-release-0.9-build.1 - clusterController: - arch: - - amd64 - description: Container image for eks-anywhere-cluster-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: eks-anywhere-cluster-controller - os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.9.1-eks-a-v0.0.0-dev-release-0.9-build.1 - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-anywhere/manifests/cluster-controller/eksa-components.yaml - diagnosticCollector: - arch: - - amd64 - description: Container image for eks-anywhere-diagnostic-collector image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: eks-anywhere-diagnostic-collector - os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.7.2-eks-a-v0.0.0-dev-release-0.9-build.1 - version: v0.0.0-dev-release-0.9+build.0+abcdef1 - etcdadmBootstrap: - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.2/bootstrap-components.yaml - controller: - arch: - - amd64 - description: Container image for etcdadm-bootstrap-provider image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: etcdadm-bootstrap-provider - os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.2-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.2/metadata.yaml - version: v1.0.2+abcdef1 - etcdadmController: - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.0/bootstrap-components.yaml - controller: - arch: - - amd64 - description: Container image for etcdadm-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: etcdadm-controller - os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.0-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.0/metadata.yaml - version: v1.0.0+abcdef1 - flux: - helmController: - arch: - - amd64 - description: Container image for helm-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: helm-controller - os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v0.20.1-eks-a-v0.0.0-dev-release-0.9-build.1 - kustomizeController: - arch: - - amd64 - description: Container image for kustomize-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kustomize-controller - os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v0.24.3-eks-a-v0.0.0-dev-release-0.9-build.1 - notificationController: - arch: - - amd64 - description: Container image for notification-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: notification-controller - os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v0.23.4-eks-a-v0.0.0-dev-release-0.9-build.1 - sourceController: - arch: - - amd64 - description: Container image for source-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: source-controller - os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v0.24.2-eks-a-v0.0.0-dev-release-0.9-build.1 - version: v0.29.4+abcdef1 - haproxy: - image: - arch: - - amd64 - description: Container image for haproxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: haproxy - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.12.0-eks-a-v0.0.0-dev-release-0.9-build.1 - kindnetd: - manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/kind/manifests/kindnetd/v0.12.0/kindnetd.yaml - version: v0.12.0+abcdef1 - kubeVersion: "1.21" - packageController: - helmChart: - description: 'Helm chart: eks-anywhere-packages-helm' - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.1.10-eks-a-v0.0.0-dev-release-0.9-build.1 - packageController: - arch: - - amd64 - description: Container image for eks-anywhere-packages image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: eks-anywhere-packages - os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.1.10-eks-a-v0.0.0-dev-release-0.9-build.1 - version: v0.1.10+abcdef1 - snow: - components: {} - kubeVip: {} - manager: {} - metadata: {} - version: "" - tinkerbell: - clusterAPIController: {} - clusterTemplate: {} - components: {} - kubeVip: {} - metadata: {} - tinkerbellStack: - actions: - cexec: {} - imageToDisk: {} - kexec: {} - ociToDisk: {} - reboot: {} - writeFile: {} - boots: - image: {} - manifest: {} - cfssl: {} - hegel: - image: {} - manifest: {} - hook: - bootkit: {} - docker: {} - initramfs: - amd: {} - arm: {} - kernel: {} - vmlinuz: - amd: {} - arm: {} - pbnj: - image: {} - manifest: {} - rufio: - image: {} - manifest: {} - tink: - tinkCli: {} - tinkController: {} - tinkManifest: {} - tinkServer: {} - tinkWorker: {} - tinkerbellChart: {} - version: "" - vSphere: - clusterAPIController: - arch: - - amd64 - description: Container image for cluster-api-vsphere-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cluster-api-vsphere-controller - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.1.1-eks-a-v0.0.0-dev-release-0.9-build.1 - clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.1.1/cluster-template.yaml - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.1.1/infrastructure-components.yaml - driver: - arch: - - amd64 - description: Container image for vsphere-csi-driver image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: vsphere-csi-driver - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/vsphere-csi-driver/csi/driver:v2.2.0-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeVip: - arch: - - amd64 - description: Container image for kube-vip image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-vip - os: linux - uri: public.ecr.aws/release-container-registry/plunder-app/kube-vip:v0.4.2-eks-a-v0.0.0-dev-release-0.9-build.1 - manager: - arch: - - amd64 - description: Container image for cloud-provider-vsphere image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cloud-provider-vsphere - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.21.0-eks-d-1-21-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.1.1/metadata.yaml - syncer: - arch: - - amd64 - description: Container image for vsphere-csi-syncer image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: vsphere-csi-syncer - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/vsphere-csi-driver/csi/syncer:v2.2.0-eks-a-v0.0.0-dev-release-0.9-build.1 - version: v1.1.1+abcdef1 - - aws: - clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-aws/manifests/infrastructure-aws/v0.6.4/cluster-template.yaml - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-aws/manifests/infrastructure-aws/v0.6.4/infrastructure-components.yaml - controller: - arch: - - amd64 - description: Container image for cluster-api-aws-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cluster-api-aws-controller - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-aws/cluster-api-aws-controller:v0.6.4-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-aws/manifests/infrastructure-aws/v0.6.4/metadata.yaml - version: v0.6.4+abcdef1 - bootstrap: - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.1.3/bootstrap-components.yaml - controller: - arch: - - amd64 - description: Container image for kubeadm-bootstrap-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kubeadm-bootstrap-controller - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-bootstrap-controller:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/bootstrap-kubeadm/v1.1.3/metadata.yaml - version: v1.1.3+abcdef1 - bottlerocketAdmin: - admin: - arch: - - amd64 - description: Container image for bottlerocket-admin image - imageDigest: sha256:279ff0b939c8ebfae8fb5086751de831edee4c1ef307b6f0a27b553b1c2c9b52 - name: bottlerocket-admin - os: linux - uri: public.ecr.aws/bottlerocket/bottlerocket-admin:v0.8.0 - bottlerocketBootstrap: - bootstrap: - arch: - - amd64 - description: Container image for bottlerocket-bootstrap image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: bottlerocket-bootstrap - os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-22-7-eks-a-v0.0.0-dev-release-0.9-build.1 - certManager: - acmesolver: - arch: - - amd64 - description: Container image for cert-manager-acmesolver image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cert-manager-acmesolver - os: linux - uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-acmesolver:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 - cainjector: - arch: - - amd64 - description: Container image for cert-manager-cainjector image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cert-manager-cainjector - os: linux - uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-cainjector:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 - controller: - arch: - - amd64 - description: Container image for cert-manager-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cert-manager-controller - os: linux - uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-controller:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 - manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cert-manager/manifests/v1.5.3/cert-manager.yaml - version: v1.5.3+abcdef1 - webhook: - arch: - - amd64 - description: Container image for cert-manager-webhook image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cert-manager-webhook - os: linux - uri: public.ecr.aws/release-container-registry/jetstack/cert-manager-webhook:v1.5.3-eks-a-v0.0.0-dev-release-0.9-build.1 - cilium: - cilium: - arch: - - amd64 - description: Container image for cilium image - imageDigest: sha256:e0c5180610dd7a2bac4ed271309b07eb6102d0bd74ed7dd33fb619879cc006f3 - name: cilium - os: linux - uri: public.ecr.aws/isovalent/cilium:v1.9.13-eksa.2 - helmChart: - description: Helm chart for cilium-chart - imageDigest: sha256:5982a9b5feded74c14a0b410006bba6d748655f8bc01f393b4519c9b10a463d0 - name: cilium-chart - uri: public.ecr.aws/isovalent/cilium:1.9.13-eksa.2 - manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cilium/manifests/cilium/v1.9.13-eksa.2/cilium.yaml - operator: - arch: - - amd64 - description: Container image for operator-generic image - imageDigest: sha256:fd78027e876b00ea850f875e87a9ce81f6e4e6b4d963f115e978e8e7d180f478 - name: operator-generic - os: linux - uri: public.ecr.aws/isovalent/operator-generic:v1.9.13-eksa.2 - version: v1.9.13-eksa.2 - cloudStack: - clusterAPIController: - arch: - - amd64 - description: Container image for cluster-api-cloudstack-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cluster-api-cloudstack-controller - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-cloudstack/release/manager:v0.4.5-rc3-eks-a-v0.0.0-dev-release-0.9-build.1 - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.5-rc3/infrastructure-components.yaml - kubeVip: - arch: - - amd64 - description: Container image for kube-vip image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-vip - os: linux - uri: public.ecr.aws/release-container-registry/plunder-app/kube-vip:v0.4.2-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-cloudstack/manifests/infrastructure-cloudstack/v0.4.5-rc3/metadata.yaml - version: v0.4.5-rc3+abcdef1 - clusterAPI: - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/cluster-api/v1.1.3/core-components.yaml - controller: - arch: - - amd64 - description: Container image for cluster-api-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cluster-api-controller - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/cluster-api-controller:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/cluster-api/v1.1.3/metadata.yaml - version: v1.1.3+abcdef1 - controlPlane: - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/control-plane-kubeadm/v1.1.3/control-plane-components.yaml - controller: - arch: - - amd64 - description: Container image for kubeadm-control-plane-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kubeadm-control-plane-controller - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/kubeadm-control-plane-controller:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/control-plane-kubeadm/v1.1.3/metadata.yaml - version: v1.1.3+abcdef1 - docker: - clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/infrastructure-docker/v1.1.3/cluster-template-development.yaml - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/infrastructure-docker/v1.1.3/infrastructure-components-development.yaml - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - manager: - arch: - - amd64 - description: Container image for capd-manager image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: capd-manager - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api/capd-manager:v1.1.3-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api/manifests/infrastructure-docker/v1.1.3/metadata.yaml - version: v1.1.3+abcdef1 - eksD: - channel: 1-22 - components: https://distro.eks.amazonaws.com/crds/releases.distro.eks.amazonaws.com-v1alpha1.yaml - gitCommit: 0123456789abcdef0123456789abcdef01234567 - kindNode: - arch: - - amd64 - description: Container image for kind-node image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kind-node - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.22.9-eks-d-1-22-7-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeVersion: v1.22.9 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-22/kubernetes-1-22-eks-7.yaml - name: kubernetes-1-22-eks-7 - ova: - bottlerocket: - arch: - - amd64 - crictl: {} - description: Bottlerocket Ova image for EKS-D 1-22-7 release - etcdadm: {} - name: bottlerocket-v1.22.9-eks-d-1-22-7-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova - os: linux - osName: bottlerocket - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-distro/ova/1-22/1-22-7/bottlerocket-v1.22.9-eks-d-1-22-7-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova - ubuntu: - arch: - - amd64 - crictl: - arch: - - amd64 - description: cri-tools tarball for linux/amd64 - name: cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - os: linux - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cri-tools/v1.20.0/cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - description: Ubuntu Ova image for EKS-D 1-22-7 release - etcdadm: - arch: - - amd64 - description: etcdadm tarball for linux/amd64 - name: etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - os: linux - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm/5b496a72af3d80d64a16a650c85ce9a5882bc014/etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - name: ubuntu-v1.22.9-eks-d-1-22-7-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova - os: linux - osName: ubuntu - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-distro/ova/1-22/1-22-7/ubuntu-v1.22.9-eks-d-1-22-7-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.ova - raw: - bottlerocket: - crictl: {} - etcdadm: {} - ubuntu: - arch: - - amd64 - crictl: - arch: - - amd64 - description: cri-tools tarball for linux/amd64 - name: cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - os: linux - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cri-tools/v1.20.0/cri-tools-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - description: Ubuntu Raw image for EKS-D 1-22-7 release - etcdadm: - arch: - - amd64 - description: etcdadm tarball for linux/amd64 - name: etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - os: linux - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm/5b496a72af3d80d64a16a650c85ce9a5882bc014/etcdadm-v0.0.0-dev-release-0.9+build.0-linux-amd64.tar.gz - name: ubuntu-v1.22.9-eks-d-1-22-7-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.gz - os: linux - osName: ubuntu - sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-distro/raw/1-22/1-22-7/ubuntu-v1.22.9-eks-d-1-22-7-eks-a-v0.0.0-dev-release-0.9-build.0-amd64.gz - eksa: - cliTools: - arch: - - amd64 - description: Container image for eks-anywhere-cli-tools image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: eks-anywhere-cli-tools - os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cli-tools:v0.7.2-eks-a-v0.0.0-dev-release-0.9-build.1 - clusterController: - arch: - - amd64 - description: Container image for eks-anywhere-cluster-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: eks-anywhere-cluster-controller - os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-cluster-controller:v0.9.1-eks-a-v0.0.0-dev-release-0.9-build.1 - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/eks-anywhere/manifests/cluster-controller/eksa-components.yaml - diagnosticCollector: - arch: - - amd64 - description: Container image for eks-anywhere-diagnostic-collector image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: eks-anywhere-diagnostic-collector - os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-diagnostic-collector:v0.7.2-eks-a-v0.0.0-dev-release-0.9-build.1 - version: v0.0.0-dev-release-0.9+build.0+abcdef1 - etcdadmBootstrap: - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.2/bootstrap-components.yaml - controller: - arch: - - amd64 - description: Container image for etcdadm-bootstrap-provider image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: etcdadm-bootstrap-provider - os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-bootstrap-provider:v1.0.2-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-bootstrap-provider/manifests/bootstrap-etcdadm-bootstrap/v1.0.2/metadata.yaml - version: v1.0.2+abcdef1 - etcdadmController: - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.0/bootstrap-components.yaml - controller: - arch: - - amd64 - description: Container image for etcdadm-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: etcdadm-controller - os: linux - uri: public.ecr.aws/release-container-registry/aws/etcdadm-controller:v1.0.0-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/etcdadm-controller/manifests/bootstrap-etcdadm-controller/v1.0.0/metadata.yaml - version: v1.0.0+abcdef1 - flux: - helmController: - arch: - - amd64 - description: Container image for helm-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: helm-controller - os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/helm-controller:v0.20.1-eks-a-v0.0.0-dev-release-0.9-build.1 - kustomizeController: - arch: - - amd64 - description: Container image for kustomize-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kustomize-controller - os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/kustomize-controller:v0.24.3-eks-a-v0.0.0-dev-release-0.9-build.1 - notificationController: - arch: - - amd64 - description: Container image for notification-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: notification-controller - os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/notification-controller:v0.23.4-eks-a-v0.0.0-dev-release-0.9-build.1 - sourceController: - arch: - - amd64 - description: Container image for source-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: source-controller - os: linux - uri: public.ecr.aws/release-container-registry/fluxcd/source-controller:v0.24.2-eks-a-v0.0.0-dev-release-0.9-build.1 - version: v0.29.4+abcdef1 - haproxy: - image: - arch: - - amd64 - description: Container image for haproxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: haproxy - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/haproxy:v0.12.0-eks-a-v0.0.0-dev-release-0.9-build.1 - kindnetd: - manifest: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/kind/manifests/kindnetd/v0.12.0/kindnetd.yaml - version: v0.12.0+abcdef1 - kubeVersion: "1.22" - packageController: - helmChart: - description: 'Helm chart: eks-anywhere-packages-helm' - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: eks-anywhere-packages - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:0.1.10-eks-a-v0.0.0-dev-release-0.9-build.1 - packageController: - arch: - - amd64 - description: Container image for eks-anywhere-packages image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: eks-anywhere-packages - os: linux - uri: public.ecr.aws/release-container-registry/eks-anywhere-packages:v0.1.10-eks-a-v0.0.0-dev-release-0.9-build.1 - version: v0.1.10+abcdef1 - snow: - components: {} - kubeVip: {} - manager: {} - metadata: {} - version: "" - tinkerbell: - clusterAPIController: {} - clusterTemplate: {} - components: {} - kubeVip: {} - metadata: {} - tinkerbellStack: - actions: - cexec: {} - imageToDisk: {} - kexec: {} - ociToDisk: {} - reboot: {} - writeFile: {} - boots: - image: {} - manifest: {} - cfssl: {} - hegel: - image: {} - manifest: {} - hook: - bootkit: {} - docker: {} - initramfs: - amd: {} - arm: {} - kernel: {} - vmlinuz: - amd: {} - arm: {} - pbnj: - image: {} - manifest: {} - rufio: - image: {} - manifest: {} - tink: - tinkCli: {} - tinkController: {} - tinkManifest: {} - tinkServer: {} - tinkWorker: {} - tinkerbellChart: {} - version: "" - vSphere: - clusterAPIController: - arch: - - amd64 - description: Container image for cluster-api-vsphere-controller image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cluster-api-vsphere-controller - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/cluster-api-provider-vsphere/release/manager:v1.1.1-eks-a-v0.0.0-dev-release-0.9-build.1 - clusterTemplate: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.1.1/cluster-template.yaml - components: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.1.1/infrastructure-components.yaml - driver: - arch: - - amd64 - description: Container image for vsphere-csi-driver image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: vsphere-csi-driver - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/vsphere-csi-driver/csi/driver:v2.2.0-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeProxy: - arch: - - amd64 - description: Container image for kube-rbac-proxy image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-rbac-proxy - os: linux - uri: public.ecr.aws/release-container-registry/brancz/kube-rbac-proxy:v0.8.0-eks-a-v0.0.0-dev-release-0.9-build.1 - kubeVip: - arch: - - amd64 - description: Container image for kube-vip image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: kube-vip - os: linux - uri: public.ecr.aws/release-container-registry/plunder-app/kube-vip:v0.4.2-eks-a-v0.0.0-dev-release-0.9-build.1 - manager: - arch: - - amd64 - description: Container image for cloud-provider-vsphere image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: cloud-provider-vsphere - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes/cloud-provider-vsphere/cpi/manager:v1.22.5-eks-d-1-22-eks-a-v0.0.0-dev-release-0.9-build.1 - metadata: - uri: https://release-bucket/artifacts/v0.0.0-dev-release-0.9-build.0/cluster-api-provider-vsphere/manifests/infrastructure-vsphere/v1.1.1/metadata.yaml - syncer: - arch: - - amd64 - description: Container image for vsphere-csi-syncer image - imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - name: vsphere-csi-syncer - os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/vsphere-csi-driver/csi/syncer:v2.2.0-eks-a-v0.0.0-dev-release-0.9-build.1 - version: v1.1.1+abcdef1 -status: {} From dbdac2b3df1ba7ceeecbf30f6b6d2d06247ad1a8 Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Thu, 30 Jun 2022 11:27:43 -0500 Subject: [PATCH 06/22] Remove multierror module --- go.mod | 1 - internal/test/cleanup/cleanup.go | 8 ++++---- release/go.sum | 1 - test/e2e/tools/eks-anywhere-test-tool/go.sum | 4 ---- 4 files changed, 4 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index a4f504d65e01..ed2a5edb1a6d 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,6 @@ require ( github.com/golang/mock v1.6.0 github.com/google/go-github/v35 v35.3.0 github.com/google/uuid v1.3.0 - github.com/hashicorp/go-multierror v1.1.1 github.com/mrajashree/etcdadm-controller v1.0.0-rc3 github.com/onsi/gomega v1.19.0 github.com/pkg/errors v0.9.1 diff --git a/internal/test/cleanup/cleanup.go b/internal/test/cleanup/cleanup.go index aa999b367640..6e57862569e4 100644 --- a/internal/test/cleanup/cleanup.go +++ b/internal/test/cleanup/cleanup.go @@ -6,7 +6,6 @@ import ( "strconv" "github.com/aws/aws-sdk-go/aws/session" - "github.com/hashicorp/go-multierror" "github.com/aws/eks-anywhere/internal/pkg/ec2" "github.com/aws/eks-anywhere/internal/pkg/s3" @@ -86,7 +85,7 @@ func VsphereRmVms(ctx context.Context, clusterName string, opts ...executables.G return govc.CleanupVms(ctx, clusterName, false) } -func CleanUpCloudstackTestResources(ctx context.Context, clusterName string, dryRun bool) (retErr error) { +func CleanUpCloudstackTestResources(ctx context.Context, clusterName string, dryRun bool) error { executableBuilder, close, err := executables.NewExecutableBuilder(ctx, executables.DefaultEksaImage()) if err != nil { return fmt.Errorf("unable to initialize executables: %v", err) @@ -103,11 +102,12 @@ func CleanUpCloudstackTestResources(ctx context.Context, clusterName string, dry for _, config := range execConfig.Profiles { cmk := executableBuilder.BuildCmkExecutable(tmpWriter, config) if err := cleanupCloudStackVms(ctx, cmk, clusterName, dryRun); err != nil { - retErr = multierror.Append(retErr, err) + cmk.Close(ctx) + return err } cmk.Close(ctx) } - return retErr + return nil } func cleanupCloudStackVms(ctx context.Context, cmk *executables.Cmk, clusterName string, dryRun bool) error { diff --git a/release/go.sum b/release/go.sum index c56a4340b555..6a7863ca1ea0 100644 --- a/release/go.sum +++ b/release/go.sum @@ -462,7 +462,6 @@ github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= diff --git a/test/e2e/tools/eks-anywhere-test-tool/go.sum b/test/e2e/tools/eks-anywhere-test-tool/go.sum index 4fcace3fab94..6bfcb1e860a5 100644 --- a/test/e2e/tools/eks-anywhere-test-tool/go.sum +++ b/test/e2e/tools/eks-anywhere-test-tool/go.sum @@ -553,10 +553,6 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= From 0a87c51faacc0d06f15fe868e00fdb1fb40fbcd3 Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Thu, 30 Jun 2022 14:16:16 -0500 Subject: [PATCH 07/22] Update to use single cmk object instead of list or map of multiple cmks --- internal/test/cleanup/cleanup.go | 15 +- pkg/dependencies/factory.go | 19 +-- pkg/executables/builder.go | 4 +- pkg/executables/cmk.go | 80 +++++---- pkg/executables/cmk_test.go | 103 ++++++------ pkg/providers/cloudstack/cloudstack.go | 8 +- pkg/providers/cloudstack/cloudstack_test.go | 24 ++- pkg/providers/cloudstack/mocks/client.go | 83 +++++----- pkg/providers/cloudstack/validator.go | 70 +++----- pkg/providers/cloudstack/validator_test.go | 173 ++++++++++---------- 10 files changed, 282 insertions(+), 297 deletions(-) diff --git a/internal/test/cleanup/cleanup.go b/internal/test/cleanup/cleanup.go index 6e57862569e4..789ec6049df1 100644 --- a/internal/test/cleanup/cleanup.go +++ b/internal/test/cleanup/cleanup.go @@ -99,23 +99,22 @@ func CleanUpCloudstackTestResources(ctx context.Context, clusterName string, dry if err != nil { return fmt.Errorf("building cmk executable: %v", err) } - for _, config := range execConfig.Profiles { - cmk := executableBuilder.BuildCmkExecutable(tmpWriter, config) - if err := cleanupCloudStackVms(ctx, cmk, clusterName, dryRun); err != nil { + cmk := executableBuilder.BuildCmkExecutable(tmpWriter, execConfig.Profiles) + defer cmk.Close(ctx) + for _, profile := range execConfig.Profiles { + if err := cleanupCloudStackVms(ctx, profile.Name, cmk, clusterName, dryRun); err != nil { cmk.Close(ctx) - return err } - cmk.Close(ctx) } return nil } -func cleanupCloudStackVms(ctx context.Context, cmk *executables.Cmk, clusterName string, dryRun bool) error { - if err := cmk.ValidateCloudStackConnection(ctx); err != nil { +func cleanupCloudStackVms(ctx context.Context, profile string, cmk *executables.Cmk, clusterName string, dryRun bool) error { + if err := cmk.ValidateCloudStackConnection(ctx, profile); err != nil { return fmt.Errorf("validating cloudstack connection with cloudmonkey: %v", err) } - if err := cmk.CleanupVms(ctx, clusterName, dryRun); err != nil { + if err := cmk.CleanupVms(ctx, profile, clusterName, dryRun); err != nil { return fmt.Errorf("cleaning up VMs with cloudmonkey: %v", err) } return nil diff --git a/pkg/dependencies/factory.go b/pkg/dependencies/factory.go index 42030b981ed6..6a782404b784 100644 --- a/pkg/dependencies/factory.go +++ b/pkg/dependencies/factory.go @@ -50,7 +50,7 @@ type Dependencies struct { DockerClient *executables.Docker Kubectl *executables.Kubectl Govc *executables.Govc - Cmks map[string]*executables.Cmk + Cmk *executables.Cmk SnowAwsClient aws.Clients SnowConfigManager *snow.ConfigManager Writer filewriter.FileWriter @@ -268,17 +268,12 @@ func (f *Factory) WithProvider(clusterConfigFile string, clusterConfig *v1alpha1 } // map[string]*executables.Cmk and map[string]ProviderCmkClient are not compatible so we convert the map manually - cmkClientMap := cloudstack.CmkClientMap{} - for name, cmk := range f.dependencies.Cmks { - cmkClientMap[name] = cmk - } - f.dependencies.Provider = cloudstack.NewProvider( datacenterConfig, machineConfigs, clusterConfig, f.dependencies.Kubectl, - cmkClientMap, + f.dependencies.Cmk, f.dependencies.Writer, time.Now, skipIpCheck, @@ -421,21 +416,17 @@ func (f *Factory) WithCmk() *Factory { f.WithExecutableBuilder().WithWriter() f.buildSteps = append(f.buildSteps, func(ctx context.Context) error { - if f.dependencies.Cmks != nil && len(f.dependencies.Cmks) > 0 { + if f.dependencies.Cmk != nil { return nil } - f.dependencies.Cmks = map[string]*executables.Cmk{} execConfig, err := decoder.ParseCloudStackSecret() if err != nil { return fmt.Errorf("building cmk executable: %v", err) } - for _, profileConfig := range execConfig.Profiles { - cmk := f.executableBuilder.BuildCmkExecutable(f.dependencies.Writer, profileConfig) - f.dependencies.Cmks[profileConfig.Name] = cmk - f.dependencies.closers = append(f.dependencies.closers, cmk) - } + f.dependencies.Cmk = f.executableBuilder.BuildCmkExecutable(f.dependencies.Writer, execConfig.Profiles) + f.dependencies.closers = append(f.dependencies.closers, f.dependencies.Cmk) return nil }) diff --git a/pkg/executables/builder.go b/pkg/executables/builder.go index 37bdcee06539..bd03ab63b20a 100644 --- a/pkg/executables/builder.go +++ b/pkg/executables/builder.go @@ -41,8 +41,8 @@ func (b *ExecutableBuilder) BuildGovcExecutable(writer filewriter.FileWriter, op return NewGovc(b.buildExecutable(govcPath), writer, opts...) } -func (b *ExecutableBuilder) BuildCmkExecutable(writer filewriter.FileWriter, config decoder.CloudStackProfileConfig) *Cmk { - return NewCmk(b.buildExecutable(cmkPath), writer, config) +func (b *ExecutableBuilder) BuildCmkExecutable(writer filewriter.FileWriter, configs []decoder.CloudStackProfileConfig) *Cmk { + return NewCmk(b.buildExecutable(cmkPath), writer, configs) } func (b *ExecutableBuilder) BuildAwsCli() *AwsCli { diff --git a/pkg/executables/cmk.go b/pkg/executables/cmk.go index b28d6b7c1baf..e212bd23059d 100644 --- a/pkg/executables/cmk.go +++ b/pkg/executables/cmk.go @@ -34,14 +34,14 @@ const ( type Cmk struct { writer filewriter.FileWriter executable Executable - config decoder.CloudStackProfileConfig + configMap map[string]decoder.CloudStackProfileConfig } func (c *Cmk) Close(ctx context.Context) error { return nil } -func (c *Cmk) ValidateTemplatePresent(ctx context.Context, domainId string, zoneId string, account string, template v1alpha1.CloudStackResourceIdentifier) error { +func (c *Cmk) ValidateTemplatePresent(ctx context.Context, profile string, domainId string, zoneId string, account string, template v1alpha1.CloudStackResourceIdentifier) error { command := newCmkCommand("list templates") applyCmkArgs(&command, appendArgs("templatefilter=all"), appendArgs("listall=true")) if len(template.Id) > 0 { @@ -57,7 +57,7 @@ func (c *Cmk) ValidateTemplatePresent(ctx context.Context, domainId string, zone applyCmkArgs(&command, withCloudStackAccount(account)) } } - result, err := c.exec(ctx, command...) + result, err := c.exec(ctx, profile, command...) if err != nil { return fmt.Errorf("getting templates info - %s: %v", result.String(), err) } @@ -80,7 +80,7 @@ func (c *Cmk) ValidateTemplatePresent(ctx context.Context, domainId string, zone return nil } -func (c *Cmk) ValidateServiceOfferingPresent(ctx context.Context, zoneId string, serviceOffering v1alpha1.CloudStackResourceIdentifier) error { +func (c *Cmk) ValidateServiceOfferingPresent(ctx context.Context, profile string, zoneId string, serviceOffering v1alpha1.CloudStackResourceIdentifier) error { command := newCmkCommand("list serviceofferings") if len(serviceOffering.Id) > 0 { applyCmkArgs(&command, withCloudStackId(serviceOffering.Id)) @@ -88,7 +88,7 @@ func (c *Cmk) ValidateServiceOfferingPresent(ctx context.Context, zoneId string, applyCmkArgs(&command, withCloudStackName(serviceOffering.Name)) } applyCmkArgs(&command, withCloudStackZoneId(zoneId)) - result, err := c.exec(ctx, command...) + result, err := c.exec(ctx, profile, command...) if err != nil { return fmt.Errorf("getting service offerings info - %s: %v", result.String(), err) } @@ -112,7 +112,7 @@ func (c *Cmk) ValidateServiceOfferingPresent(ctx context.Context, zoneId string, return nil } -func (c *Cmk) ValidateDiskOfferingPresent(ctx context.Context, zoneId string, diskOffering v1alpha1.CloudStackResourceDiskOffering) error { +func (c *Cmk) ValidateDiskOfferingPresent(ctx context.Context, profile string, zoneId string, diskOffering v1alpha1.CloudStackResourceDiskOffering) error { command := newCmkCommand("list diskofferings") if len(diskOffering.Id) > 0 { applyCmkArgs(&command, withCloudStackId(diskOffering.Id)) @@ -120,7 +120,7 @@ func (c *Cmk) ValidateDiskOfferingPresent(ctx context.Context, zoneId string, di applyCmkArgs(&command, withCloudStackName(diskOffering.Name)) } applyCmkArgs(&command, withCloudStackZoneId(zoneId)) - result, err := c.exec(ctx, command...) + result, err := c.exec(ctx, profile, command...) if err != nil { return fmt.Errorf("getting disk offerings info - %s: %v", result.String(), err) } @@ -150,7 +150,7 @@ func (c *Cmk) ValidateDiskOfferingPresent(ctx context.Context, zoneId string, di return nil } -func (c *Cmk) ValidateAffinityGroupsPresent(ctx context.Context, domainId string, account string, affinityGroupIds []string) error { +func (c *Cmk) ValidateAffinityGroupsPresent(ctx context.Context, profile string, domainId string, account string, affinityGroupIds []string) error { for _, affinityGroupId := range affinityGroupIds { command := newCmkCommand("list affinitygroups") applyCmkArgs(&command, withCloudStackId(affinityGroupId)) @@ -163,7 +163,7 @@ func (c *Cmk) ValidateAffinityGroupsPresent(ctx context.Context, domainId string } } - result, err := c.exec(ctx, command...) + result, err := c.exec(ctx, profile, command...) if err != nil { return fmt.Errorf("getting affinity group info - %s: %v", result.String(), err) } @@ -187,14 +187,14 @@ func (c *Cmk) ValidateAffinityGroupsPresent(ctx context.Context, domainId string return nil } -func (c *Cmk) ValidateZonePresent(ctx context.Context, zone v1alpha1.CloudStackZone) (string, error) { +func (c *Cmk) ValidateZonePresent(ctx context.Context, profile string, zone v1alpha1.CloudStackZone) (string, error) { command := newCmkCommand("list zones") if len(zone.Id) > 0 { applyCmkArgs(&command, withCloudStackId(zone.Id)) } else { applyCmkArgs(&command, withCloudStackName(zone.Name)) } - result, err := c.exec(ctx, command...) + result, err := c.exec(ctx, profile, command...) if err != nil { return "", fmt.Errorf("getting zones info - %s: %v", result.String(), err) } @@ -217,7 +217,7 @@ func (c *Cmk) ValidateZonePresent(ctx context.Context, zone v1alpha1.CloudStackZ return cmkZones[0].Id, nil } -func (c *Cmk) ValidateDomainPresent(ctx context.Context, domain string) (v1alpha1.CloudStackResourceIdentifier, error) { +func (c *Cmk) ValidateDomainPresent(ctx context.Context, profile string, domain string) (v1alpha1.CloudStackResourceIdentifier, error) { domainIdentifier := v1alpha1.CloudStackResourceIdentifier{Name: domain, Id: ""} command := newCmkCommand("list domains") // "list domains" API does not support querying by domain path, so here we extract the domain name which is the last part of the input domain @@ -225,7 +225,7 @@ func (c *Cmk) ValidateDomainPresent(ctx context.Context, domain string) (v1alpha domainName := tokens[len(tokens)-1] applyCmkArgs(&command, withCloudStackName(domainName), appendArgs("listall=true")) - result, err := c.exec(ctx, command...) + result, err := c.exec(ctx, profile, command...) if err != nil { return domainIdentifier, fmt.Errorf("getting domain info - %s: %v", result.String(), err) } @@ -260,7 +260,7 @@ func (c *Cmk) ValidateDomainPresent(ctx context.Context, domain string) (v1alpha return domainIdentifier, nil } -func (c *Cmk) ValidateNetworkPresent(ctx context.Context, domainId string, network v1alpha1.CloudStackResourceIdentifier, zoneId string, account string, multipleZone bool) error { +func (c *Cmk) ValidateNetworkPresent(ctx context.Context, profile string, domainId string, network v1alpha1.CloudStackResourceIdentifier, zoneId string, account string, multipleZone bool) error { command := newCmkCommand("list networks") if multipleZone { applyCmkArgs(&command, withCloudStackNetworkType(Shared)) @@ -274,7 +274,7 @@ func (c *Cmk) ValidateNetworkPresent(ctx context.Context, domainId string, netwo } } applyCmkArgs(&command, withCloudStackZoneId(zoneId)) - result, err := c.exec(ctx, command...) + result, err := c.exec(ctx, profile, command...) if err != nil { return fmt.Errorf("getting network info - %s: %v", result.String(), err) } @@ -319,7 +319,7 @@ func (c *Cmk) ValidateNetworkPresent(ctx context.Context, domainId string, netwo return nil } -func (c *Cmk) ValidateAccountPresent(ctx context.Context, account string, domainId string) error { +func (c *Cmk) ValidateAccountPresent(ctx context.Context, profile string, account string, domainId string) error { // If account is not specified then no need to check its presence if len(account) == 0 { return nil @@ -327,7 +327,7 @@ func (c *Cmk) ValidateAccountPresent(ctx context.Context, account string, domain command := newCmkCommand("list accounts") applyCmkArgs(&command, withCloudStackName(account), withCloudStackDomainId(domainId)) - result, err := c.exec(ctx, command...) + result, err := c.exec(ctx, profile, command...) if err != nil { return fmt.Errorf("getting accounts info - %s: %v", result.String(), err) } @@ -350,22 +350,31 @@ func (c *Cmk) ValidateAccountPresent(ctx context.Context, account string, domain return nil } -func NewCmk(executable Executable, writer filewriter.FileWriter, config decoder.CloudStackProfileConfig) *Cmk { +func NewCmk(executable Executable, writer filewriter.FileWriter, configs []decoder.CloudStackProfileConfig) *Cmk { + configMap := map[string]decoder.CloudStackProfileConfig{} + for _, config := range configs { + configMap[config.Name] = config + } + return &Cmk{ writer: writer, executable: executable, - config: config, + configMap: configMap, } } -func (c *Cmk) GetManagementApiEndpoint() string { - return c.config.ManagementUrl +func (c *Cmk) GetManagementApiEndpoint(profile string) (string, error) { + config, exist := c.configMap[profile] + if exist { + return config.ManagementUrl, nil + } + return "", fmt.Errorf("profile %s does not exist", profile) } // ValidateCloudStackConnection Calls `cmk sync` to ensure that the endpoint and credentials + domain are valid -func (c *Cmk) ValidateCloudStackConnection(ctx context.Context) error { +func (c *Cmk) ValidateCloudStackConnection(ctx context.Context, profile string) error { command := newCmkCommand("sync") - buffer, err := c.exec(ctx, command...) + buffer, err := c.exec(ctx, profile, command...) if err != nil { return fmt.Errorf("validating cloudstack connection for cmk: %s: %v", buffer.String(), err) } @@ -373,10 +382,10 @@ func (c *Cmk) ValidateCloudStackConnection(ctx context.Context) error { return nil } -func (c *Cmk) CleanupVms(ctx context.Context, clusterName string, dryRun bool) error { +func (c *Cmk) CleanupVms(ctx context.Context, profile string, clusterName string, dryRun bool) error { command := newCmkCommand("list virtualmachines") applyCmkArgs(&command, withCloudStackKeyword(clusterName), appendArgs("listall=true")) - result, err := c.exec(ctx, command...) + result, err := c.exec(ctx, profile, command...) if err != nil { return fmt.Errorf("listing virtual machines in cluster %s: %s: %v", clusterName, result.String(), err) } @@ -396,13 +405,13 @@ func (c *Cmk) CleanupVms(ctx context.Context, clusterName string, dryRun bool) e } stopCommand := newCmkCommand("stop virtualmachine") applyCmkArgs(&stopCommand, withCloudStackId(vm.Id), appendArgs("forced=true")) - stopResult, err := c.exec(ctx, stopCommand...) + stopResult, err := c.exec(ctx, profile, stopCommand...) if err != nil { return fmt.Errorf("stopping virtual machine with name %s and id %s: %s: %v", vm.Name, vm.Id, stopResult.String(), err) } destroyCommand := newCmkCommand("destroy virtualmachine") applyCmkArgs(&destroyCommand, withCloudStackId(vm.Id), appendArgs("expunge=true")) - destroyResult, err := c.exec(ctx, destroyCommand...) + destroyResult, err := c.exec(ctx, profile, destroyCommand...) if err != nil { return fmt.Errorf("destroying virtual machine with name %s and id %s: %s: %v", vm.Name, vm.Id, destroyResult.String(), err) } @@ -412,12 +421,12 @@ func (c *Cmk) CleanupVms(ctx context.Context, clusterName string, dryRun bool) e return nil } -func (c *Cmk) exec(ctx context.Context, args ...string) (stdout bytes.Buffer, err error) { +func (c *Cmk) exec(ctx context.Context, profile string, args ...string) (stdout bytes.Buffer, err error) { if err != nil { return stdout, fmt.Errorf("failed get environment map: %v", err) } - configFile, err := c.buildCmkConfigFile() + configFile, err := c.buildCmkConfigFile(profile) if err != nil { return stdout, fmt.Errorf("failed cmk validations: %v", err) } @@ -426,17 +435,22 @@ func (c *Cmk) exec(ctx context.Context, args ...string) (stdout bytes.Buffer, er return c.executable.Execute(ctx, argsWithConfigFile...) } -func (c *Cmk) buildCmkConfigFile() (configFile string, err error) { +func (c *Cmk) buildCmkConfigFile(profile string) (configFile string, err error) { + config, exist := c.configMap[profile] + if !exist { + return "", fmt.Errorf("profile %s does not exist", profile) + } + t := templater.New(c.writer) - c.config.Timeout = defaultCloudStackPreflightTimeout + config.Timeout = defaultCloudStackPreflightTimeout if timeout, isSet := os.LookupEnv("CLOUDSTACK_PREFLIGHT_TIMEOUT"); isSet { if _, err := strconv.ParseUint(timeout, 10, 16); err != nil { return "", fmt.Errorf("CLOUDSTACK_PREFLIGHT_TIMEOUT must be a number: %v", err) } - c.config.Timeout = timeout + config.Timeout = timeout } - writtenFileName, err := t.WriteToFile(cmkConfigTemplate, c.config, fmt.Sprintf(cmkConfigFileNameTemplate, c.config.Name)) + writtenFileName, err := t.WriteToFile(cmkConfigTemplate, config, fmt.Sprintf(cmkConfigFileNameTemplate, profile)) if err != nil { return "", fmt.Errorf("creating file for cmk config: %v", err) } diff --git a/pkg/executables/cmk_test.go b/pkg/executables/cmk_test.go index 93255c969e85..4d1f4d377a78 100644 --- a/pkg/executables/cmk_test.go +++ b/pkg/executables/cmk_test.go @@ -111,8 +111,8 @@ func TestValidateCloudStackConnectionSuccess(t *testing.T) { configFilePath, _ := filepath.Abs(filepath.Join(writer.Dir(), "generated", cmkConfigFileName)) expectedArgs := []string{"-c", configFilePath, "sync"} executable.EXPECT().Execute(ctx, expectedArgs).Return(bytes.Buffer{}, nil) - c := executables.NewCmk(executable, writer, execConfig.Profiles[0]) - err := c.ValidateCloudStackConnection(ctx) + c := executables.NewCmk(executable, writer, execConfig.Profiles) + err := c.ValidateCloudStackConnection(ctx, execConfig.Profiles[0].Name) if err != nil { t.Fatalf("Cmk.ValidateCloudStackConnection() error = %v, want nil", err) } @@ -131,13 +131,12 @@ func TestValidateMultipleCloudStackProfiles(t *testing.T) { expectedArgs2 := []string{"-c", configFilePath2, "sync"} executable.EXPECT().Execute(ctx, expectedArgs2).Return(bytes.Buffer{}, nil) - c := executables.NewCmk(executable, writer, execConfigWithMultipleProfiles.Profiles[0]) - err := c.ValidateCloudStackConnection(ctx) + c := executables.NewCmk(executable, writer, execConfigWithMultipleProfiles.Profiles) + err := c.ValidateCloudStackConnection(ctx, execConfigWithMultipleProfiles.Profiles[0].Name) if err != nil { t.Fatalf("Cmk.ValidateCloudStackConnection() error = %v, want nil", err) } - c = executables.NewCmk(executable, writer, execConfigWithMultipleProfiles.Profiles[1]) - err = c.ValidateCloudStackConnection(ctx) + err = c.ValidateCloudStackConnection(ctx, execConfigWithMultipleProfiles.Profiles[1].Name) if err != nil { t.Fatalf("Cmk.ValidateCloudStackConnection() error = %v, want nil", err) } @@ -152,8 +151,8 @@ func TestValidateCloudStackConnectionError(t *testing.T) { configFilePath, _ := filepath.Abs(filepath.Join(writer.Dir(), "generated", cmkConfigFileName)) expectedArgs := []string{"-c", configFilePath, "sync"} executable.EXPECT().Execute(ctx, expectedArgs).Return(bytes.Buffer{}, errors.New("cmk test error")) - c := executables.NewCmk(executable, writer, execConfig.Profiles[0]) - err := c.ValidateCloudStackConnection(ctx) + c := executables.NewCmk(executable, writer, execConfig.Profiles) + err := c.ValidateCloudStackConnection(ctx, execConfig.Profiles[0].Name) if err == nil { t.Fatalf("Cmk.ValidateCloudStackConnection() didn't throw expected error") } @@ -179,7 +178,7 @@ func TestCmkCleanupVms(t *testing.T) { "list", "virtualmachines", fmt.Sprintf("keyword=\"%s\"", clusterName), "listall=true", }}, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.CleanupVms(ctx, clusterName, false) + return cmk.CleanupVms(ctx, execConfig.Profiles[0].Name, clusterName, false) }, cmkResponseError: nil, wantErr: true, @@ -194,7 +193,7 @@ func TestCmkCleanupVms(t *testing.T) { }, }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.CleanupVms(ctx, clusterName, true) + return cmk.CleanupVms(ctx, execConfig.Profiles[0].Name, clusterName, true) }, cmkResponseError: nil, wantErr: false, @@ -207,7 +206,7 @@ func TestCmkCleanupVms(t *testing.T) { "list", "virtualmachines", fmt.Sprintf("keyword=\"%s\"", clusterName), "listall=true", }}, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.CleanupVms(ctx, clusterName, false) + return cmk.CleanupVms(ctx, execConfig.Profiles[0].Name, clusterName, false) }, cmkResponseError: nil, wantErr: true, @@ -220,7 +219,7 @@ func TestCmkCleanupVms(t *testing.T) { "list", "virtualmachines", fmt.Sprintf("keyword=\"%s\"", clusterName), "listall=true", }}, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.CleanupVms(ctx, clusterName, false) + return cmk.CleanupVms(ctx, execConfig.Profiles[0].Name, clusterName, false) }, cmkResponseError: nil, wantErr: true, @@ -243,7 +242,7 @@ func TestCmkCleanupVms(t *testing.T) { }, }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.CleanupVms(ctx, clusterName, false) + return cmk.CleanupVms(ctx, execConfig.Profiles[0].Name, clusterName, false) }, cmkResponseError: nil, wantErr: false, @@ -266,7 +265,7 @@ func TestCmkCleanupVms(t *testing.T) { executable.EXPECT().Execute(ctx, argsList). Return(*bytes.NewBufferString(fileContent), tt.cmkResponseError) } - cmk := executables.NewCmk(executable, writer, execConfig.Profiles[0]) + cmk := executables.NewCmk(executable, writer, execConfig.Profiles) err := tt.cmkFunc(*cmk, ctx) if tt.wantErr && err != nil || !tt.wantErr && err == nil { return @@ -297,7 +296,7 @@ func TestCmkListOperations(t *testing.T) { "list", "domains", fmt.Sprintf("name=\"%s\"", rootDomain), "listall=true", }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - domain, err := cmk.ValidateDomainPresent(ctx, rootDomain) + domain, err := cmk.ValidateDomainPresent(ctx, execConfig.Profiles[0].Name, rootDomain) if domain.Id != rootDomainId { t.Fatalf("Expected domain id: %s, actual domain id: %s", rootDomainId, domain.Id) } @@ -316,7 +315,7 @@ func TestCmkListOperations(t *testing.T) { "list", "domains", fmt.Sprintf("name=\"%s\"", domainName), "listall=true", }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - domain, err := cmk.ValidateDomainPresent(ctx, domain) + domain, err := cmk.ValidateDomainPresent(ctx, execConfig.Profiles[0].Name, domain) if domain.Id != domainId { t.Fatalf("Expected domain id: %s, actual domain id: %s", domainId, domain.Id) } @@ -335,7 +334,7 @@ func TestCmkListOperations(t *testing.T) { "list", "domains", fmt.Sprintf("name=\"%s\"", domainName), "listall=true", }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateDomainPresent(ctx, domainName) + _, err := cmk.ValidateDomainPresent(ctx, execConfig.Profiles[0].Name, domainName) return err }, cmkResponseError: nil, @@ -351,7 +350,7 @@ func TestCmkListOperations(t *testing.T) { "list", "domains", fmt.Sprintf("name=\"%s\"", domain2Name), "listall=true", }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - domain, err := cmk.ValidateDomainPresent(ctx, domain2) + domain, err := cmk.ValidateDomainPresent(ctx, execConfig.Profiles[0].Name, domain2) if domain.Id != domain2Id { t.Fatalf("Expected domain id: %s, actual domain id: %s", domain2Id, domain.Id) } @@ -370,7 +369,7 @@ func TestCmkListOperations(t *testing.T) { "list", "domains", fmt.Sprintf("name=\"%s\"", domainName), "listall=true", }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateDomainPresent(ctx, domain) + _, err := cmk.ValidateDomainPresent(ctx, execConfig.Profiles[0].Name, domain) return err }, cmkResponseError: nil, @@ -386,7 +385,7 @@ func TestCmkListOperations(t *testing.T) { "list", "domains", fmt.Sprintf("name=\"%s\"", domainName), "listall=true", }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateDomainPresent(ctx, domain) + _, err := cmk.ValidateDomainPresent(ctx, execConfig.Profiles[0].Name, domain) return err }, cmkResponseError: nil, @@ -402,7 +401,7 @@ func TestCmkListOperations(t *testing.T) { "list", "accounts", fmt.Sprintf("name=\"%s\"", accountName), fmt.Sprintf("domainid=\"%s\"", domainId), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateAccountPresent(ctx, accountName, domainId) + return cmk.ValidateAccountPresent(ctx, execConfig.Profiles[0].Name, accountName, domainId) }, cmkResponseError: nil, wantErr: false, @@ -417,7 +416,7 @@ func TestCmkListOperations(t *testing.T) { "list", "accounts", fmt.Sprintf("name=\"%s\"", accountName), fmt.Sprintf("domainid=\"%s\"", domainId), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateAccountPresent(ctx, accountName, domainId) + return cmk.ValidateAccountPresent(ctx, execConfig.Profiles[0].Name, accountName, domainId) }, cmkResponseError: nil, wantErr: true, @@ -432,7 +431,7 @@ func TestCmkListOperations(t *testing.T) { "list", "accounts", fmt.Sprintf("name=\"%s\"", accountName), fmt.Sprintf("domainid=\"%s\"", domainId), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateAccountPresent(ctx, accountName, domainId) + return cmk.ValidateAccountPresent(ctx, execConfig.Profiles[0].Name, accountName, domainId) }, cmkResponseError: nil, wantErr: true, @@ -447,7 +446,7 @@ func TestCmkListOperations(t *testing.T) { "list", "zones", fmt.Sprintf("name=\"%s\"", resourceName.Name), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateZonePresent(ctx, zones[0]) + _, err := cmk.ValidateZonePresent(ctx, execConfig.Profiles[0].Name, zones[0]) return err }, cmkResponseError: nil, @@ -463,7 +462,7 @@ func TestCmkListOperations(t *testing.T) { "list", "zones", fmt.Sprintf("id=\"%s\"", resourceId.Id), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateZonePresent(ctx, zones[2]) + _, err := cmk.ValidateZonePresent(ctx, execConfig.Profiles[0].Name, zones[2]) return err }, cmkResponseError: nil, @@ -479,7 +478,7 @@ func TestCmkListOperations(t *testing.T) { "list", "zones", fmt.Sprintf("name=\"%s\"", resourceName.Name), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateZonePresent(ctx, zones[0]) + _, err := cmk.ValidateZonePresent(ctx, execConfig.Profiles[0].Name, zones[0]) return err }, cmkResponseError: nil, @@ -495,7 +494,7 @@ func TestCmkListOperations(t *testing.T) { "list", "zones", fmt.Sprintf("name=\"%s\"", resourceName.Name), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateZonePresent(ctx, zones[0]) + _, err := cmk.ValidateZonePresent(ctx, execConfig.Profiles[0].Name, zones[0]) return err }, cmkResponseError: nil, @@ -511,7 +510,7 @@ func TestCmkListOperations(t *testing.T) { "list", "networks", fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), fmt.Sprintf("zoneid=\"%s\"", "TEST_RESOURCE"), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateNetworkPresent(ctx, domainId, zones[2].Network, zones[2].Id, accountName, false) + return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainId, zones[2].Network, zones[2].Id, accountName, false) }, cmkResponseError: nil, wantErr: false, @@ -526,7 +525,7 @@ func TestCmkListOperations(t *testing.T) { "list", "networks", fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), fmt.Sprintf("zoneid=\"%s\"", "TEST_RESOURCE"), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateNetworkPresent(ctx, domainId, zones[2].Network, zones[2].Id, accountName, false) + return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainId, zones[2].Network, zones[2].Id, accountName, false) }, cmkResponseError: nil, wantErr: true, @@ -541,7 +540,7 @@ func TestCmkListOperations(t *testing.T) { "list", "networks", fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), fmt.Sprintf("zoneid=\"%s\"", "TEST_RESOURCE"), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateNetworkPresent(ctx, domainId, zones[2].Network, zones[2].Id, accountName, false) + return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainId, zones[2].Network, zones[2].Id, accountName, false) }, cmkResponseError: nil, wantErr: true, @@ -556,7 +555,7 @@ func TestCmkListOperations(t *testing.T) { "list", "serviceofferings", fmt.Sprintf("name=\"%s\"", resourceName.Name), fmt.Sprintf("zoneid=\"%s\"", zoneId), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateServiceOfferingPresent(ctx, zoneId, resourceName) + return cmk.ValidateServiceOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, resourceName) }, cmkResponseError: nil, wantErr: false, @@ -571,7 +570,7 @@ func TestCmkListOperations(t *testing.T) { "list", "serviceofferings", fmt.Sprintf("id=\"%s\"", resourceId.Id), fmt.Sprintf("zoneid=\"%s\"", zoneId), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateServiceOfferingPresent(ctx, zoneId, resourceId) + return cmk.ValidateServiceOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, resourceId) }, cmkResponseError: nil, wantErr: false, @@ -586,7 +585,7 @@ func TestCmkListOperations(t *testing.T) { "list", "serviceofferings", fmt.Sprintf("id=\"%s\"", resourceId.Id), fmt.Sprintf("zoneid=\"%s\"", zoneId), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateServiceOfferingPresent(ctx, zoneId, resourceId) + return cmk.ValidateServiceOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, resourceId) }, cmkResponseError: nil, wantErr: true, @@ -601,7 +600,7 @@ func TestCmkListOperations(t *testing.T) { "list", "serviceofferings", fmt.Sprintf("name=\"%s\"", resourceName.Name), fmt.Sprintf("zoneid=\"%s\"", zoneId), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateServiceOfferingPresent(ctx, zoneId, resourceName) + return cmk.ValidateServiceOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, resourceName) }, cmkResponseError: nil, wantErr: true, @@ -616,7 +615,7 @@ func TestCmkListOperations(t *testing.T) { "list", "diskofferings", fmt.Sprintf("name=\"%s\"", resourceName.Name), fmt.Sprintf("zoneid=\"%s\"", zoneId), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateDiskOfferingPresent(ctx, zoneId, diskOfferingResourceName) + return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, diskOfferingResourceName) }, cmkResponseError: nil, wantErr: false, @@ -631,7 +630,7 @@ func TestCmkListOperations(t *testing.T) { "list", "diskofferings", fmt.Sprintf("id=\"%s\"", resourceId.Id), fmt.Sprintf("zoneid=\"%s\"", zoneId), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateDiskOfferingPresent(ctx, zoneId, diskOfferingResourceID) + return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, diskOfferingResourceID) }, cmkResponseError: nil, wantErr: false, @@ -646,7 +645,7 @@ func TestCmkListOperations(t *testing.T) { "list", "diskofferings", fmt.Sprintf("id=\"%s\"", resourceId.Id), fmt.Sprintf("zoneid=\"%s\"", zoneId), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateDiskOfferingPresent(ctx, zoneId, diskOfferingResourceID) + return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, diskOfferingResourceID) }, cmkResponseError: nil, wantErr: true, @@ -661,7 +660,7 @@ func TestCmkListOperations(t *testing.T) { "list", "diskofferings", fmt.Sprintf("id=\"%s\"", resourceId.Id), fmt.Sprintf("zoneid=\"%s\"", zoneId), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateDiskOfferingPresent(ctx, zoneId, diskOfferingResourceID) + return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, diskOfferingResourceID) }, cmkResponseError: nil, wantErr: true, @@ -676,7 +675,7 @@ func TestCmkListOperations(t *testing.T) { "list", "diskofferings", fmt.Sprintf("id=\"%s\"", resourceId.Id), fmt.Sprintf("zoneid=\"%s\"", zoneId), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateDiskOfferingPresent(ctx, zoneId, diskOfferingResourceID) + return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, diskOfferingResourceID) }, cmkResponseError: nil, wantErr: true, @@ -691,7 +690,7 @@ func TestCmkListOperations(t *testing.T) { "list", "diskofferings", fmt.Sprintf("id=\"%s\"", resourceId.Id), fmt.Sprintf("zoneid=\"%s\"", zoneId), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateDiskOfferingPresent(ctx, zoneId, diskOfferingCustomSizeInGB) + return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, diskOfferingCustomSizeInGB) }, cmkResponseError: nil, wantErr: false, @@ -706,7 +705,7 @@ func TestCmkListOperations(t *testing.T) { "list", "diskofferings", fmt.Sprintf("id=\"%s\"", resourceId.Id), fmt.Sprintf("zoneid=\"%s\"", zoneId), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateDiskOfferingPresent(ctx, zoneId, diskOfferingCustomSizeInGB) + return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, diskOfferingCustomSizeInGB) }, cmkResponseError: nil, wantErr: true, @@ -721,7 +720,7 @@ func TestCmkListOperations(t *testing.T) { "list", "diskofferings", fmt.Sprintf("id=\"%s\"", resourceId.Id), fmt.Sprintf("zoneid=\"%s\"", zoneId), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateDiskOfferingPresent(ctx, zoneId, diskOfferingResourceID) + return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, diskOfferingResourceID) }, cmkResponseError: nil, wantErr: true, @@ -736,7 +735,7 @@ func TestCmkListOperations(t *testing.T) { "list", "diskofferings", fmt.Sprintf("id=\"%s\"", resourceId.Id), fmt.Sprintf("zoneid=\"%s\"", zoneId), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateDiskOfferingPresent(ctx, zoneId, diskOfferingResourceID) + return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, diskOfferingResourceID) }, cmkResponseError: errors.New("cmk calling return exception"), wantErr: true, @@ -751,7 +750,7 @@ func TestCmkListOperations(t *testing.T) { "list", "diskofferings", fmt.Sprintf("name=\"%s\"", resourceName.Name), fmt.Sprintf("zoneid=\"%s\"", zoneId), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateDiskOfferingPresent(ctx, zoneId, diskOfferingResourceName) + return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, diskOfferingResourceName) }, cmkResponseError: nil, wantErr: true, @@ -766,7 +765,7 @@ func TestCmkListOperations(t *testing.T) { "list", "templates", "templatefilter=all", "listall=true", fmt.Sprintf("name=\"%s\"", resourceName.Name), fmt.Sprintf("zoneid=\"%s\"", zoneId), fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateTemplatePresent(ctx, domainId, zoneId, accountName, resourceName) + return cmk.ValidateTemplatePresent(ctx, execConfig.Profiles[0].Name, domainId, zoneId, accountName, resourceName) }, cmkResponseError: nil, wantErr: false, @@ -781,7 +780,7 @@ func TestCmkListOperations(t *testing.T) { "list", "templates", "templatefilter=all", "listall=true", fmt.Sprintf("id=\"%s\"", resourceId.Id), fmt.Sprintf("zoneid=\"%s\"", zoneId), fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateTemplatePresent(ctx, domainId, zoneId, accountName, resourceId) + return cmk.ValidateTemplatePresent(ctx, execConfig.Profiles[0].Name, domainId, zoneId, accountName, resourceId) }, cmkResponseError: nil, wantErr: false, @@ -796,7 +795,7 @@ func TestCmkListOperations(t *testing.T) { "list", "templates", "templatefilter=all", "listall=true", fmt.Sprintf("name=\"%s\"", resourceName.Name), fmt.Sprintf("zoneid=\"%s\"", zoneId), fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateTemplatePresent(ctx, domainId, zoneId, accountName, resourceName) + return cmk.ValidateTemplatePresent(ctx, execConfig.Profiles[0].Name, domainId, zoneId, accountName, resourceName) }, cmkResponseError: nil, wantErr: true, @@ -811,7 +810,7 @@ func TestCmkListOperations(t *testing.T) { "list", "templates", "templatefilter=all", "listall=true", fmt.Sprintf("name=\"%s\"", resourceName.Name), fmt.Sprintf("zoneid=\"%s\"", zoneId), fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateTemplatePresent(ctx, domainId, zoneId, accountName, resourceName) + return cmk.ValidateTemplatePresent(ctx, execConfig.Profiles[0].Name, domainId, zoneId, accountName, resourceName) }, cmkResponseError: nil, wantErr: true, @@ -826,7 +825,7 @@ func TestCmkListOperations(t *testing.T) { "list", "affinitygroups", fmt.Sprintf("id=\"%s\"", resourceId.Id), fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateAffinityGroupsPresent(ctx, domainId, accountName, []string{resourceId.Id}) + return cmk.ValidateAffinityGroupsPresent(ctx, execConfig.Profiles[0].Name, domainId, accountName, []string{resourceId.Id}) }, cmkResponseError: nil, wantErr: false, @@ -841,7 +840,7 @@ func TestCmkListOperations(t *testing.T) { "list", "affinitygroups", fmt.Sprintf("id=\"%s\"", resourceId.Id), fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateAffinityGroupsPresent(ctx, domainId, accountName, []string{resourceId.Id}) + return cmk.ValidateAffinityGroupsPresent(ctx, execConfig.Profiles[0].Name, domainId, accountName, []string{resourceId.Id}) }, cmkResponseError: nil, wantErr: true, @@ -856,7 +855,7 @@ func TestCmkListOperations(t *testing.T) { "list", "affinitygroups", fmt.Sprintf("id=\"%s\"", resourceId.Id), fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateAffinityGroupsPresent(ctx, domainId, accountName, []string{resourceId.Id}) + return cmk.ValidateAffinityGroupsPresent(ctx, execConfig.Profiles[0].Name, domainId, accountName, []string{resourceId.Id}) }, cmkResponseError: nil, wantErr: true, @@ -879,7 +878,7 @@ func TestCmkListOperations(t *testing.T) { executable := mockexecutables.NewMockExecutable(mockCtrl) executable.EXPECT().Execute(ctx, tt.argumentsExecCall). Return(*bytes.NewBufferString(fileContent), tt.cmkResponseError) - cmk := executables.NewCmk(executable, writer, execConfig.Profiles[0]) + cmk := executables.NewCmk(executable, writer, execConfig.Profiles) err := tt.cmkFunc(*cmk, ctx) if tt.wantErr && err != nil || !tt.wantErr && err == nil { return diff --git a/pkg/providers/cloudstack/cloudstack.go b/pkg/providers/cloudstack/cloudstack.go index 61ba51e950ea..1aa321b368da 100644 --- a/pkg/providers/cloudstack/cloudstack.go +++ b/pkg/providers/cloudstack/cloudstack.go @@ -201,20 +201,20 @@ type ProviderKubectlClient interface { SetEksaControllerEnvVar(ctx context.Context, envVar, envVarVal, kubeconfig string) error } -func NewProvider(datacenterConfig *v1alpha1.CloudStackDatacenterConfig, machineConfigs map[string]*v1alpha1.CloudStackMachineConfig, clusterConfig *v1alpha1.Cluster, providerKubectlClient ProviderKubectlClient, providerCmkClients CmkClientMap, writer filewriter.FileWriter, now types.NowFunc, skipIpCheck bool) *cloudstackProvider { +func NewProvider(datacenterConfig *v1alpha1.CloudStackDatacenterConfig, machineConfigs map[string]*v1alpha1.CloudStackMachineConfig, clusterConfig *v1alpha1.Cluster, providerKubectlClient ProviderKubectlClient, providerCmkClient ProviderCmkClient, writer filewriter.FileWriter, now types.NowFunc, skipIpCheck bool) *cloudstackProvider { return NewProviderCustomNet( datacenterConfig, machineConfigs, clusterConfig, providerKubectlClient, - providerCmkClients, + providerCmkClient, writer, now, skipIpCheck, ) } -func NewProviderCustomNet(datacenterConfig *v1alpha1.CloudStackDatacenterConfig, machineConfigs map[string]*v1alpha1.CloudStackMachineConfig, clusterConfig *v1alpha1.Cluster, providerKubectlClient ProviderKubectlClient, providerCmkClients CmkClientMap, writer filewriter.FileWriter, now types.NowFunc, skipIpCheck bool) *cloudstackProvider { +func NewProviderCustomNet(datacenterConfig *v1alpha1.CloudStackDatacenterConfig, machineConfigs map[string]*v1alpha1.CloudStackMachineConfig, clusterConfig *v1alpha1.Cluster, providerKubectlClient ProviderKubectlClient, providerCmkClient ProviderCmkClient, writer filewriter.FileWriter, now types.NowFunc, skipIpCheck bool) *cloudstackProvider { var controlPlaneMachineSpec, etcdMachineSpec *v1alpha1.CloudStackMachineConfigSpec workerNodeGroupMachineSpecs := make(map[string]v1alpha1.CloudStackMachineConfigSpec, len(machineConfigs)) if clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef != nil && machineConfigs[clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef.Name] != nil { @@ -240,7 +240,7 @@ func NewProviderCustomNet(datacenterConfig *v1alpha1.CloudStackDatacenterConfig, now: now, }, skipIpCheck: skipIpCheck, - validator: NewValidator(providerCmkClients), + validator: NewValidator(providerCmkClient), } } diff --git a/pkg/providers/cloudstack/cloudstack_test.go b/pkg/providers/cloudstack/cloudstack_test.go index d76f8f156687..1fab79718fcf 100644 --- a/pkg/providers/cloudstack/cloudstack_test.go +++ b/pkg/providers/cloudstack/cloudstack_test.go @@ -59,16 +59,16 @@ func givenEmptyClusterSpec() *cluster.Spec { func givenWildcardCmk(mockCtrl *gomock.Controller) ProviderCmkClient { cmk := mocks.NewMockProviderCmkClient(mockCtrl) - cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateZonePresent(gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateCloudStackConnection(gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateDomainPresent(gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateAccountPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateNetworkPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().GetManagementApiEndpoint().AnyTimes().Return("http://127.16.0.1:8080/client/api") + cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateZonePresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateCloudStackConnection(gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateDomainPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateAccountPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateNetworkPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().GetManagementApiEndpoint(gomock.Any()).AnyTimes().Return("http://127.16.0.1:8080/client/api", nil) return cmk } @@ -194,15 +194,13 @@ func newProviderWithKubectl(t *testing.T, datacenterConfig *v1alpha1.CloudStackD func newProvider(t *testing.T, datacenterConfig *v1alpha1.CloudStackDatacenterConfig, machineConfigs map[string]*v1alpha1.CloudStackMachineConfig, clusterConfig *v1alpha1.Cluster, kubectl ProviderKubectlClient, cmk ProviderCmkClient) *cloudstackProvider { _, writer := test.NewWriter(t) - cmks := CmkClientMap{} - cmks["Global"] = cmk return NewProviderCustomNet( datacenterConfig, machineConfigs, clusterConfig, kubectl, - cmks, + cmk, writer, test.FakeNow, false, diff --git a/pkg/providers/cloudstack/mocks/client.go b/pkg/providers/cloudstack/mocks/client.go index e95dc89d4f53..cd94d84ff43e 100644 --- a/pkg/providers/cloudstack/mocks/client.go +++ b/pkg/providers/cloudstack/mocks/client.go @@ -42,145 +42,146 @@ func (m *MockProviderCmkClient) EXPECT() *MockProviderCmkClientMockRecorder { } // GetManagementApiEndpoint mocks base method. -func (m *MockProviderCmkClient) GetManagementApiEndpoint() string { +func (m *MockProviderCmkClient) GetManagementApiEndpoint(arg0 string) (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetManagementApiEndpoint") + ret := m.ctrl.Call(m, "GetManagementApiEndpoint", arg0) ret0, _ := ret[0].(string) - return ret0 + ret1, _ := ret[1].(error) + return ret0, ret1 } // GetManagementApiEndpoint indicates an expected call of GetManagementApiEndpoint. -func (mr *MockProviderCmkClientMockRecorder) GetManagementApiEndpoint() *gomock.Call { +func (mr *MockProviderCmkClientMockRecorder) GetManagementApiEndpoint(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetManagementApiEndpoint", reflect.TypeOf((*MockProviderCmkClient)(nil).GetManagementApiEndpoint)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetManagementApiEndpoint", reflect.TypeOf((*MockProviderCmkClient)(nil).GetManagementApiEndpoint), arg0) } // ValidateAccountPresent mocks base method. -func (m *MockProviderCmkClient) ValidateAccountPresent(arg0 context.Context, arg1, arg2 string) error { +func (m *MockProviderCmkClient) ValidateAccountPresent(arg0 context.Context, arg1, arg2, arg3 string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidateAccountPresent", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "ValidateAccountPresent", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // ValidateAccountPresent indicates an expected call of ValidateAccountPresent. -func (mr *MockProviderCmkClientMockRecorder) ValidateAccountPresent(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockProviderCmkClientMockRecorder) ValidateAccountPresent(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateAccountPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateAccountPresent), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateAccountPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateAccountPresent), arg0, arg1, arg2, arg3) } // ValidateAffinityGroupsPresent mocks base method. -func (m *MockProviderCmkClient) ValidateAffinityGroupsPresent(arg0 context.Context, arg1, arg2 string, arg3 []string) error { +func (m *MockProviderCmkClient) ValidateAffinityGroupsPresent(arg0 context.Context, arg1, arg2, arg3 string, arg4 []string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidateAffinityGroupsPresent", arg0, arg1, arg2, arg3) + ret := m.ctrl.Call(m, "ValidateAffinityGroupsPresent", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(error) return ret0 } // ValidateAffinityGroupsPresent indicates an expected call of ValidateAffinityGroupsPresent. -func (mr *MockProviderCmkClientMockRecorder) ValidateAffinityGroupsPresent(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { +func (mr *MockProviderCmkClientMockRecorder) ValidateAffinityGroupsPresent(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateAffinityGroupsPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateAffinityGroupsPresent), arg0, arg1, arg2, arg3) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateAffinityGroupsPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateAffinityGroupsPresent), arg0, arg1, arg2, arg3, arg4) } // ValidateCloudStackConnection mocks base method. -func (m *MockProviderCmkClient) ValidateCloudStackConnection(arg0 context.Context) error { +func (m *MockProviderCmkClient) ValidateCloudStackConnection(arg0 context.Context, arg1 string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidateCloudStackConnection", arg0) + ret := m.ctrl.Call(m, "ValidateCloudStackConnection", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // ValidateCloudStackConnection indicates an expected call of ValidateCloudStackConnection. -func (mr *MockProviderCmkClientMockRecorder) ValidateCloudStackConnection(arg0 interface{}) *gomock.Call { +func (mr *MockProviderCmkClientMockRecorder) ValidateCloudStackConnection(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateCloudStackConnection", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateCloudStackConnection), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateCloudStackConnection", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateCloudStackConnection), arg0, arg1) } // ValidateDiskOfferingPresent mocks base method. -func (m *MockProviderCmkClient) ValidateDiskOfferingPresent(arg0 context.Context, arg1 string, arg2 v1alpha1.CloudStackResourceDiskOffering) error { +func (m *MockProviderCmkClient) ValidateDiskOfferingPresent(arg0 context.Context, arg1, arg2 string, arg3 v1alpha1.CloudStackResourceDiskOffering) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidateDiskOfferingPresent", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "ValidateDiskOfferingPresent", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // ValidateDiskOfferingPresent indicates an expected call of ValidateDiskOfferingPresent. -func (mr *MockProviderCmkClientMockRecorder) ValidateDiskOfferingPresent(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockProviderCmkClientMockRecorder) ValidateDiskOfferingPresent(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateDiskOfferingPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateDiskOfferingPresent), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateDiskOfferingPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateDiskOfferingPresent), arg0, arg1, arg2, arg3) } // ValidateDomainPresent mocks base method. -func (m *MockProviderCmkClient) ValidateDomainPresent(arg0 context.Context, arg1 string) (v1alpha1.CloudStackResourceIdentifier, error) { +func (m *MockProviderCmkClient) ValidateDomainPresent(arg0 context.Context, arg1, arg2 string) (v1alpha1.CloudStackResourceIdentifier, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidateDomainPresent", arg0, arg1) + ret := m.ctrl.Call(m, "ValidateDomainPresent", arg0, arg1, arg2) ret0, _ := ret[0].(v1alpha1.CloudStackResourceIdentifier) ret1, _ := ret[1].(error) return ret0, ret1 } // ValidateDomainPresent indicates an expected call of ValidateDomainPresent. -func (mr *MockProviderCmkClientMockRecorder) ValidateDomainPresent(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockProviderCmkClientMockRecorder) ValidateDomainPresent(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateDomainPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateDomainPresent), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateDomainPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateDomainPresent), arg0, arg1, arg2) } // ValidateNetworkPresent mocks base method. -func (m *MockProviderCmkClient) ValidateNetworkPresent(arg0 context.Context, arg1 string, arg2 v1alpha1.CloudStackResourceIdentifier, arg3, arg4 string, arg5 bool) error { +func (m *MockProviderCmkClient) ValidateNetworkPresent(arg0 context.Context, arg1, arg2 string, arg3 v1alpha1.CloudStackResourceIdentifier, arg4, arg5 string, arg6 bool) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidateNetworkPresent", arg0, arg1, arg2, arg3, arg4, arg5) + ret := m.ctrl.Call(m, "ValidateNetworkPresent", arg0, arg1, arg2, arg3, arg4, arg5, arg6) ret0, _ := ret[0].(error) return ret0 } // ValidateNetworkPresent indicates an expected call of ValidateNetworkPresent. -func (mr *MockProviderCmkClientMockRecorder) ValidateNetworkPresent(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { +func (mr *MockProviderCmkClientMockRecorder) ValidateNetworkPresent(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateNetworkPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateNetworkPresent), arg0, arg1, arg2, arg3, arg4, arg5) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateNetworkPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateNetworkPresent), arg0, arg1, arg2, arg3, arg4, arg5, arg6) } // ValidateServiceOfferingPresent mocks base method. -func (m *MockProviderCmkClient) ValidateServiceOfferingPresent(arg0 context.Context, arg1 string, arg2 v1alpha1.CloudStackResourceIdentifier) error { +func (m *MockProviderCmkClient) ValidateServiceOfferingPresent(arg0 context.Context, arg1, arg2 string, arg3 v1alpha1.CloudStackResourceIdentifier) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidateServiceOfferingPresent", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "ValidateServiceOfferingPresent", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // ValidateServiceOfferingPresent indicates an expected call of ValidateServiceOfferingPresent. -func (mr *MockProviderCmkClientMockRecorder) ValidateServiceOfferingPresent(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockProviderCmkClientMockRecorder) ValidateServiceOfferingPresent(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateServiceOfferingPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateServiceOfferingPresent), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateServiceOfferingPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateServiceOfferingPresent), arg0, arg1, arg2, arg3) } // ValidateTemplatePresent mocks base method. -func (m *MockProviderCmkClient) ValidateTemplatePresent(arg0 context.Context, arg1, arg2, arg3 string, arg4 v1alpha1.CloudStackResourceIdentifier) error { +func (m *MockProviderCmkClient) ValidateTemplatePresent(arg0 context.Context, arg1, arg2, arg3, arg4 string, arg5 v1alpha1.CloudStackResourceIdentifier) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidateTemplatePresent", arg0, arg1, arg2, arg3, arg4) + ret := m.ctrl.Call(m, "ValidateTemplatePresent", arg0, arg1, arg2, arg3, arg4, arg5) ret0, _ := ret[0].(error) return ret0 } // ValidateTemplatePresent indicates an expected call of ValidateTemplatePresent. -func (mr *MockProviderCmkClientMockRecorder) ValidateTemplatePresent(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { +func (mr *MockProviderCmkClientMockRecorder) ValidateTemplatePresent(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateTemplatePresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateTemplatePresent), arg0, arg1, arg2, arg3, arg4) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateTemplatePresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateTemplatePresent), arg0, arg1, arg2, arg3, arg4, arg5) } // ValidateZonePresent mocks base method. -func (m *MockProviderCmkClient) ValidateZonePresent(arg0 context.Context, arg1 v1alpha1.CloudStackZone) (string, error) { +func (m *MockProviderCmkClient) ValidateZonePresent(arg0 context.Context, arg1 string, arg2 v1alpha1.CloudStackZone) (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidateZonePresent", arg0, arg1) + ret := m.ctrl.Call(m, "ValidateZonePresent", arg0, arg1, arg2) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // ValidateZonePresent indicates an expected call of ValidateZonePresent. -func (mr *MockProviderCmkClientMockRecorder) ValidateZonePresent(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockProviderCmkClientMockRecorder) ValidateZonePresent(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateZonePresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateZonePresent), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateZonePresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateZonePresent), arg0, arg1, arg2) } // MockProviderKubectlClient is a mock of ProviderKubectlClient interface. diff --git a/pkg/providers/cloudstack/validator.go b/pkg/providers/cloudstack/validator.go index d972f3dc3f46..bc3f1d807470 100644 --- a/pkg/providers/cloudstack/validator.go +++ b/pkg/providers/cloudstack/validator.go @@ -14,7 +14,7 @@ import ( ) type Validator struct { - cmks CmkClientMap + cmk ProviderCmkClient localAvailabilityZones []localAvailabilityZone } @@ -30,9 +30,9 @@ var restrictedUserCustomDetails = [...]string{ "keypairnames", "controlNodeLoginUser", } -func NewValidator(cmks CmkClientMap) *Validator { +func NewValidator(cmk ProviderCmkClient) *Validator { return &Validator{ - cmks: cmks, + cmk: cmk, localAvailabilityZones: []localAvailabilityZone{}, } } @@ -44,20 +44,18 @@ type localAvailabilityZone struct { } type ProviderCmkClient interface { - GetManagementApiEndpoint() string - ValidateCloudStackConnection(ctx context.Context) error - ValidateServiceOfferingPresent(ctx context.Context, zoneId string, serviceOffering anywherev1.CloudStackResourceIdentifier) error - ValidateDiskOfferingPresent(ctx context.Context, zoneId string, diskOffering anywherev1.CloudStackResourceDiskOffering) error - ValidateTemplatePresent(ctx context.Context, domainId string, zoneId string, account string, template anywherev1.CloudStackResourceIdentifier) error - ValidateAffinityGroupsPresent(ctx context.Context, domainId string, account string, affinityGroupIds []string) error - ValidateZonePresent(ctx context.Context, zone anywherev1.CloudStackZone) (string, error) - ValidateNetworkPresent(ctx context.Context, domainId string, network anywherev1.CloudStackResourceIdentifier, zoneId string, account string, multipleZone bool) error - ValidateDomainPresent(ctx context.Context, domain string) (anywherev1.CloudStackResourceIdentifier, error) - ValidateAccountPresent(ctx context.Context, account string, domainId string) error + GetManagementApiEndpoint(profile string) (string, error) + ValidateCloudStackConnection(ctx context.Context, profile string) error + ValidateServiceOfferingPresent(ctx context.Context, profile string, zoneId string, serviceOffering anywherev1.CloudStackResourceIdentifier) error + ValidateDiskOfferingPresent(ctx context.Context, profile string, zoneId string, diskOffering anywherev1.CloudStackResourceDiskOffering) error + ValidateTemplatePresent(ctx context.Context, profile string, domainId string, zoneId string, account string, template anywherev1.CloudStackResourceIdentifier) error + ValidateAffinityGroupsPresent(ctx context.Context, profile string, domainId string, account string, affinityGroupIds []string) error + ValidateZonePresent(ctx context.Context, profile string, zone anywherev1.CloudStackZone) (string, error) + ValidateNetworkPresent(ctx context.Context, profile string, domainId string, network anywherev1.CloudStackResourceIdentifier, zoneId string, account string, multipleZone bool) error + ValidateDomainPresent(ctx context.Context, profile string, domain string) (anywherev1.CloudStackResourceIdentifier, error) + ValidateAccountPresent(ctx context.Context, profile string, account string, domainId string) error } -type CmkClientMap map[string]ProviderCmkClient - func (v *Validator) validateCloudStackAccess(ctx context.Context, datacenterConfig *anywherev1.CloudStackDatacenterConfig) error { refNamesToCheck := []string{} if len(datacenterConfig.Spec.Domain) > 0 { @@ -68,11 +66,7 @@ func (v *Validator) validateCloudStackAccess(ctx context.Context, datacenterConf } for _, refName := range refNamesToCheck { - cmk, ok := v.cmks[refName] - if !ok { - return fmt.Errorf("cannot find CloudStack profile for credentialsRef %s", refName) - } - if err := cmk.ValidateCloudStackConnection(ctx); err != nil { + if err := v.cmk.ValidateCloudStackConnection(ctx, refName); err != nil { return fmt.Errorf("failed validating connection to cloudstack %s: %v", refName, err) } } @@ -92,27 +86,27 @@ func (v *Validator) ValidateCloudStackDatacenterConfig(ctx context.Context, data return fmt.Errorf("checking management api endpoint: %v", err) } - cmk, ok := v.cmks[az.CredentialsRef] - if !ok { - return fmt.Errorf("cannot find CloudStack profile named %s for availability zone %s", az.CredentialsRef, az.Name) + cmk := v.cmk + endpoint, err := cmk.GetManagementApiEndpoint(az.CredentialsRef) + if err != nil { + return err } - endpoint := cmk.GetManagementApiEndpoint() if endpoint != az.ManagementApiEndpoint { return fmt.Errorf("cloudstack secret management url (%s) differs from cluster spec management url (%s)", endpoint, az.ManagementApiEndpoint) } - domain, err := cmk.ValidateDomainPresent(ctx, az.Domain) + domain, err := v.cmk.ValidateDomainPresent(ctx, az.CredentialsRef, az.Domain) if err != nil { return err } az.DomainId = domain.Id - if err := cmk.ValidateAccountPresent(ctx, az.Account, az.DomainId); err != nil { + if err := v.cmk.ValidateAccountPresent(ctx, az.CredentialsRef, az.Account, az.DomainId); err != nil { return err } - zoneId, err := cmk.ValidateZonePresent(ctx, az.CloudStackAvailabilityZone.Zone) + zoneId, err := v.cmk.ValidateZonePresent(ctx, az.CredentialsRef, az.CloudStackAvailabilityZone.Zone) if err != nil { return err } @@ -120,7 +114,7 @@ func (v *Validator) ValidateCloudStackDatacenterConfig(ctx context.Context, data if len(az.CloudStackAvailabilityZone.Zone.Network.Id) == 0 && len(az.CloudStackAvailabilityZone.Zone.Network.Name) == 0 { return fmt.Errorf("zone network is not set or is empty") } - if err := cmk.ValidateNetworkPresent(ctx, az.DomainId, az.CloudStackAvailabilityZone.Zone.Network, zoneId, az.Account, true); err != nil { + if err := v.cmk.ValidateNetworkPresent(ctx, az.CredentialsRef, az.DomainId, az.CloudStackAvailabilityZone.Zone.Network, zoneId, az.Account, true); err != nil { return err } } @@ -135,10 +129,6 @@ func (v *Validator) generateLocalAvailabilityZones(ctx context.Context, datacent } if len(datacenterConfig.Spec.Domain) > 0 { - _, ok := v.cmks[decoder.CloudStackGlobalAZ] - if !ok { - return fmt.Errorf("cannot find CloudStack profile named %s for default availability zone", decoder.CloudStackGlobalAZ) - } for index, zone := range datacenterConfig.Spec.Zones { availabilityZone := localAvailabilityZone{ CloudStackAvailabilityZone: &anywherev1.CloudStackAvailabilityZone{ @@ -154,10 +144,6 @@ func (v *Validator) generateLocalAvailabilityZones(ctx context.Context, datacent } } for _, az := range datacenterConfig.Spec.AvailabilityZones { - _, ok := v.cmks[az.CredentialsRef] - if !ok { - return fmt.Errorf("cannot find CloudStack profile named %s for availability zone %s", az.CredentialsRef, az.Name) - } availabilityZone := localAvailabilityZone{ CloudStackAvailabilityZone: &az, } @@ -282,23 +268,19 @@ func (v *Validator) validateMachineConfig(ctx context.Context, datacenterConfig } for _, az := range v.localAvailabilityZones { - cmk, ok := v.cmks[az.CredentialsRef] - if !ok { - return fmt.Errorf("cannot find CloudStack profile named %s for availability zone %s", az.CredentialsRef, az.Name) - } - if err := cmk.ValidateTemplatePresent(ctx, az.DomainId, az.CloudStackAvailabilityZone.Zone.Id, az.Account, machineConfig.Spec.Template); err != nil { + if err := v.cmk.ValidateTemplatePresent(ctx, az.CredentialsRef, az.DomainId, az.CloudStackAvailabilityZone.Zone.Id, az.Account, machineConfig.Spec.Template); err != nil { return fmt.Errorf("validating template: %v", err) } - if err := cmk.ValidateServiceOfferingPresent(ctx, az.CloudStackAvailabilityZone.Zone.Id, machineConfig.Spec.ComputeOffering); err != nil { + if err := v.cmk.ValidateServiceOfferingPresent(ctx, az.CredentialsRef, az.CloudStackAvailabilityZone.Zone.Id, machineConfig.Spec.ComputeOffering); err != nil { return fmt.Errorf("validating service offering: %v", err) } if len(machineConfig.Spec.DiskOffering.Id) > 0 || len(machineConfig.Spec.DiskOffering.Name) > 0 { - if err := cmk.ValidateDiskOfferingPresent(ctx, az.CloudStackAvailabilityZone.Zone.Id, machineConfig.Spec.DiskOffering); err != nil { + if err := v.cmk.ValidateDiskOfferingPresent(ctx, az.CredentialsRef, az.CloudStackAvailabilityZone.Zone.Id, machineConfig.Spec.DiskOffering); err != nil { return fmt.Errorf("validating disk offering: %v", err) } } if len(machineConfig.Spec.AffinityGroupIds) > 0 { - if err := cmk.ValidateAffinityGroupsPresent(ctx, az.DomainId, az.Account, machineConfig.Spec.AffinityGroupIds); err != nil { + if err := v.cmk.ValidateAffinityGroupsPresent(ctx, az.CredentialsRef, az.DomainId, az.Account, machineConfig.Spec.AffinityGroupIds); err != nil { return fmt.Errorf("validating affinity group ids: %v", err) } } diff --git a/pkg/providers/cloudstack/validator_test.go b/pkg/providers/cloudstack/validator_test.go index cda388da7cfc..fef38c4829a7 100644 --- a/pkg/providers/cloudstack/validator_test.go +++ b/pkg/providers/cloudstack/validator_test.go @@ -12,7 +12,6 @@ import ( "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" - "github.com/aws/eks-anywhere/pkg/providers/cloudstack/decoder" "github.com/aws/eks-anywhere/pkg/providers/cloudstack/mocks" ) @@ -44,7 +43,7 @@ func TestValidateCloudStackDatacenterConfig(t *testing.T) { ctx := context.Background() setupContext() cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { @@ -63,8 +62,7 @@ func TestValidateCloudStackDatacenterConfigWithAZ(t *testing.T) { ctx := context.Background() setupContext() cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) - cmk2 := mocks.NewMockProviderCmkClient(gomock.NewController(t)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk, "zone2": cmk2}) + validator := NewValidator(cmk) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainWithAZsFilename)) if err != nil { @@ -72,7 +70,11 @@ func TestValidateCloudStackDatacenterConfigWithAZ(t *testing.T) { } setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) - setupMockForAvailabilityZonesValidation(cmk2, ctx, datacenterConfig.Spec.AvailabilityZones) + setupMockForAvailabilityZonesValidation(cmk, ctx, datacenterConfig.Spec.AvailabilityZones) + + for _, az := range datacenterConfig.Spec.AvailabilityZones { + cmk.EXPECT().GetManagementApiEndpoint(gomock.Any()).Times(1).Return(az.ManagementApiEndpoint, nil) + } err = validator.ValidateCloudStackDatacenterConfig(ctx, datacenterConfig) if err != nil { @@ -83,13 +85,13 @@ func TestValidateCloudStackDatacenterConfigWithAZ(t *testing.T) { func TestValidateCloudStackConnection(t *testing.T) { ctx := context.Background() cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") } - cmk.EXPECT().ValidateCloudStackConnection(ctx).Return(nil) + cmk.EXPECT().ValidateCloudStackConnection(ctx, "Global").Return(nil) if err := validator.validateCloudStackAccess(ctx, datacenterConfig); err != nil { t.Fatalf("failed to validate CloudStackDataCenterConfig: %v", err) } @@ -98,7 +100,7 @@ func TestValidateCloudStackConnection(t *testing.T) { func TestValidateMachineConfigsNoControlPlaneEndpointIP(t *testing.T) { ctx := context.Background() cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { @@ -131,7 +133,7 @@ func TestValidateDatacenterConfigsNoNetwork(t *testing.T) { datacenterConfig: datacenterConfig, machineConfigsLookup: nil, } - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) datacenterConfig.Spec.Zones[0].Network.Id = "" datacenterConfig.Spec.Zones[0].Network.Name = "" setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) @@ -154,7 +156,7 @@ func TestValidateDatacenterBadManagementEndpoint(t *testing.T) { datacenterConfig: datacenterConfig, machineConfigsLookup: nil, } - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) datacenterConfig.Spec.ManagementApiEndpoint = ":1234.5234" @@ -177,7 +179,7 @@ func TestValidateDatacenterInconsistentManagementEndpoints(t *testing.T) { datacenterConfig: datacenterConfig, machineConfigsLookup: nil, } - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) datacenterConfig.Spec.ManagementApiEndpoint = "abcefg.com" @@ -194,7 +196,7 @@ func TestSetupAndValidateDiskOfferingEmpty(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -213,10 +215,10 @@ func TestSetupAndValidateDiskOfferingEmpty(t *testing.T) { setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) - cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) - cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Times(3) - cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) - cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(0) + cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) _ = validator.ValidateCloudStackDatacenterConfig(ctx, datacenterConfig) err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) @@ -233,7 +235,7 @@ func TestSetupAndValidateValidDiskOffering(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -260,10 +262,10 @@ func TestSetupAndValidateValidDiskOffering(t *testing.T) { setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) - cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) - cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Times(3) - cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Times(1) - cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1) + cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) _ = validator.ValidateCloudStackDatacenterConfig(ctx, datacenterConfig) err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) @@ -280,7 +282,7 @@ func TestSetupAndValidateInvalidDiskOfferingNotPresent(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -307,10 +309,10 @@ func TestSetupAndValidateInvalidDiskOfferingNotPresent(t *testing.T) { setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) - cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return(errors.New("match me")) - cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return(errors.New("match me")) + cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() _ = validator.ValidateCloudStackDatacenterConfig(ctx, datacenterConfig) err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) @@ -326,7 +328,7 @@ func TestSetupAndValidateInValidDiskOfferingBadMountPath(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -353,9 +355,9 @@ func TestSetupAndValidateInValidDiskOfferingBadMountPath(t *testing.T) { setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) - cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) wantErrMsg := "machine config test validation failed: mountPath: / invalid, must be non-empty and starts with /" @@ -370,7 +372,7 @@ func TestSetupAndValidateInValidDiskOfferingEmptyDevice(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -397,9 +399,9 @@ func TestSetupAndValidateInValidDiskOfferingEmptyDevice(t *testing.T) { setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) - cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) wantErrMsg := "machine config test validation failed: device: invalid, empty device" @@ -414,7 +416,7 @@ func TestSetupAndValidateInValidDiskOfferingEmptyFilesystem(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -441,9 +443,9 @@ func TestSetupAndValidateInValidDiskOfferingEmptyFilesystem(t *testing.T) { setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) - cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) wantErrMsg := "machine config test validation failed: filesystem: invalid, empty filesystem" @@ -458,7 +460,7 @@ func TestSetupAndValidateInValidDiskOfferingEmptyLabel(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -485,9 +487,9 @@ func TestSetupAndValidateInValidDiskOfferingEmptyLabel(t *testing.T) { setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) - cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) wantErrMsg := "machine config test validation failed: label: invalid, empty label" @@ -502,7 +504,7 @@ func TestSetupAndValidateUsersNil(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -521,10 +523,10 @@ func TestSetupAndValidateUsersNil(t *testing.T) { setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) - cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) - cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Times(3) - cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Times(3) - cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) _ = validator.ValidateCloudStackDatacenterConfig(ctx, datacenterConfig) err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) @@ -541,7 +543,7 @@ func TestSetupAndValidateRestrictedUserDetails(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -574,7 +576,7 @@ func TestSetupAndValidateSshAuthorizedKeysNil(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -593,10 +595,10 @@ func TestSetupAndValidateSshAuthorizedKeysNil(t *testing.T) { setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) - cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) - cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Times(3) - cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Times(3) - cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) _ = validator.ValidateCloudStackDatacenterConfig(ctx, datacenterConfig) err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) @@ -606,20 +608,19 @@ func TestSetupAndValidateSshAuthorizedKeysNil(t *testing.T) { } func setupMockForDatacenterConfigValidation(cmk *mocks.MockProviderCmkClient, ctx context.Context, datacenterConfig *v1alpha1.CloudStackDatacenterConfig) { - cmk.EXPECT().ValidateZonePresent(ctx, datacenterConfig.Spec.Zones[0]).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea8f", nil) - cmk.EXPECT().ValidateDomainPresent(ctx, datacenterConfig.Spec.Domain).AnyTimes().Return(v1alpha1.CloudStackResourceIdentifier{Id: "5300cdac-74d5-11ec-8696-c81f66d3e965", Name: datacenterConfig.Spec.Domain}, nil) - cmk.EXPECT().ValidateAccountPresent(ctx, gomock.Any(), gomock.Any()).AnyTimes().Return(nil) - cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) - cmk.EXPECT().GetManagementApiEndpoint().AnyTimes().Return(datacenterConfig.Spec.ManagementApiEndpoint) + cmk.EXPECT().ValidateZonePresent(ctx, gomock.Any(), datacenterConfig.Spec.Zones[0]).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea8f", nil) + cmk.EXPECT().ValidateDomainPresent(ctx, gomock.Any(), datacenterConfig.Spec.Domain).AnyTimes().Return(v1alpha1.CloudStackResourceIdentifier{Id: "5300cdac-74d5-11ec-8696-c81f66d3e965", Name: datacenterConfig.Spec.Domain}, nil) + cmk.EXPECT().ValidateAccountPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) + cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) + cmk.EXPECT().GetManagementApiEndpoint(gomock.Any()).AnyTimes().MaxTimes(1).Return("http://127.16.0.1:8080/client/api", nil) } func setupMockForAvailabilityZonesValidation(cmk *mocks.MockProviderCmkClient, ctx context.Context, azs []v1alpha1.CloudStackAvailabilityZone) { for _, az := range azs { - cmk.EXPECT().ValidateZonePresent(ctx, az.Zone).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea82", nil) - cmk.EXPECT().ValidateDomainPresent(ctx, az.Domain).AnyTimes().Return(v1alpha1.CloudStackResourceIdentifier{Id: "5300cdac-74d5-11ec-8696-c81f66d3e962", Name: az.Domain}, nil) - cmk.EXPECT().ValidateAccountPresent(ctx, az.Account, gomock.Any()).AnyTimes().Return(nil) - cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) - cmk.EXPECT().GetManagementApiEndpoint().AnyTimes().Return(az.ManagementApiEndpoint) + cmk.EXPECT().ValidateZonePresent(ctx, gomock.Any(), az.Zone).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea82", nil) + cmk.EXPECT().ValidateDomainPresent(ctx, gomock.Any(), az.Domain).AnyTimes().Return(v1alpha1.CloudStackResourceIdentifier{Id: "5300cdac-74d5-11ec-8696-c81f66d3e962", Name: az.Domain}, nil) + cmk.EXPECT().ValidateAccountPresent(ctx, gomock.Any(), az.Account, gomock.Any()).AnyTimes().Return(nil) + cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) } } @@ -631,7 +632,7 @@ func TestSetupAndValidateCreateClusterCPMachineGroupRefNil(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -657,7 +658,7 @@ func TestSetupAndValidateCreateClusterWorkerMachineGroupRefNil(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -683,7 +684,7 @@ func TestSetupAndValidateCreateClusterEtcdMachineGroupRefNil(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -709,7 +710,7 @@ func TestSetupAndValidateCreateClusterCPMachineGroupRefNonexistent(t *testing.T) t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -735,7 +736,7 @@ func TestSetupAndValidateCreateClusterWorkerMachineGroupRefNonexistent(t *testin t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -761,7 +762,7 @@ func TestSetupAndValidateCreateClusterEtcdMachineGroupRefNonexistent(t *testing. t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -787,7 +788,7 @@ func TestSetupAndValidateCreateClusterTemplateDifferent(t *testing.T) { t.Fatalf("unable to get machine configs from file %s", testClusterConfigMainFilename) } clusterSpec := test.NewFullClusterSpec(t, path.Join(testDataDir, testClusterConfigMainFilename)) - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) if err != nil { t.Fatalf("unable to get datacenter config from file") @@ -823,14 +824,14 @@ func TestValidateMachineConfigsHappyCase(t *testing.T) { datacenterConfig: datacenterConfig, machineConfigsLookup: machineConfigs, } - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) setupMockForDatacenterConfigValidation(cmk, ctx, datacenterConfig) - cmk.EXPECT().ValidateTemplatePresent(ctx, gomock.Any(), + cmk.EXPECT().ValidateTemplatePresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), datacenterConfig.Spec.Account, testTemplate).Times(3) - cmk.EXPECT().ValidateServiceOfferingPresent(ctx, gomock.Any(), testOffering).Times(3) - cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).Times(3) - cmk.EXPECT().ValidateAffinityGroupsPresent(ctx, gomock.Any(), datacenterConfig.Spec.Account, gomock.Any()).Times(3) + cmk.EXPECT().ValidateServiceOfferingPresent(ctx, gomock.Any(), gomock.Any(), testOffering).Times(3) + cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + cmk.EXPECT().ValidateAffinityGroupsPresent(ctx, gomock.Any(), gomock.Any(), datacenterConfig.Spec.Account, gomock.Any()).Times(3) _ = validator.ValidateCloudStackDatacenterConfig(ctx, datacenterConfig) err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) @@ -849,7 +850,7 @@ func TestValidateCloudStackMachineConfig(t *testing.T) { if err != nil { t.Fatalf("unable to get datacenter config from file") } - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) + validator := NewValidator(cmk) for _, machineConfig := range machineConfigs { err := validator.validateMachineConfig(ctx, datacenterConfig, machineConfig) @@ -885,17 +886,17 @@ func TestValidateMachineConfigsWithAffinity(t *testing.T) { machineConfig.Spec.AffinityGroupIds = []string{} } - validator := NewValidator(CmkClientMap{decoder.CloudStackGlobalAZ: cmk}) - cmk.EXPECT().ValidateZonePresent(gomock.Any(), gomock.Any()).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea8f", nil) - cmk.EXPECT().ValidateDomainPresent(gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateAccountPresent(ctx, gomock.Any(), gomock.Any()).AnyTimes().Return(nil) - cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) - cmk.EXPECT().GetManagementApiEndpoint().AnyTimes().Return("http://127.16.0.1:8080/client/api") + validator := NewValidator(cmk) + cmk.EXPECT().ValidateZonePresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea8f", nil) + cmk.EXPECT().ValidateDomainPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateAccountPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) + cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) + cmk.EXPECT().GetManagementApiEndpoint(gomock.Any()).AnyTimes().Return("http://127.16.0.1:8080/client/api", nil) - cmk.EXPECT().ValidateTemplatePresent(ctx, gomock.Any(), gomock.Any(), datacenterConfig.Spec.Account, testTemplate).AnyTimes() - cmk.EXPECT().ValidateServiceOfferingPresent(ctx, gomock.Any(), testOffering).AnyTimes() - cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateAffinityGroupsPresent(ctx, gomock.Any(), datacenterConfig.Spec.Account, gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateTemplatePresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), datacenterConfig.Spec.Account, testTemplate).AnyTimes() + cmk.EXPECT().ValidateServiceOfferingPresent(ctx, gomock.Any(), gomock.Any(), testOffering).AnyTimes() + cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateAffinityGroupsPresent(ctx, gomock.Any(), gomock.Any(), datacenterConfig.Spec.Account, gomock.Any()).AnyTimes() // Valid affinity types err = validator.ValidateClusterMachineConfigs(ctx, cloudStackClusterSpec) From cbd45c9a7f421ba9fcee883074f4a28c352d0ad4 Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Thu, 30 Jun 2022 14:19:55 -0500 Subject: [PATCH 08/22] Remove ValidateCloudStackConnection when cleaning up VMs --- internal/test/cleanup/cleanup.go | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/internal/test/cleanup/cleanup.go b/internal/test/cleanup/cleanup.go index 789ec6049df1..08106f327acd 100644 --- a/internal/test/cleanup/cleanup.go +++ b/internal/test/cleanup/cleanup.go @@ -102,20 +102,9 @@ func CleanUpCloudstackTestResources(ctx context.Context, clusterName string, dry cmk := executableBuilder.BuildCmkExecutable(tmpWriter, execConfig.Profiles) defer cmk.Close(ctx) for _, profile := range execConfig.Profiles { - if err := cleanupCloudStackVms(ctx, profile.Name, cmk, clusterName, dryRun); err != nil { + if err := cmk.CleanupVms(ctx, profile.Name, clusterName, dryRun); err != nil { cmk.Close(ctx) } } return nil } - -func cleanupCloudStackVms(ctx context.Context, profile string, cmk *executables.Cmk, clusterName string, dryRun bool) error { - if err := cmk.ValidateCloudStackConnection(ctx, profile); err != nil { - return fmt.Errorf("validating cloudstack connection with cloudmonkey: %v", err) - } - - if err := cmk.CleanupVms(ctx, profile, clusterName, dryRun); err != nil { - return fmt.Errorf("cleaning up VMs with cloudmonkey: %v", err) - } - return nil -} From 369b53d7d0b2574001ef4547ff35ec887200caf4 Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Thu, 30 Jun 2022 14:22:36 -0500 Subject: [PATCH 09/22] Remove NewProviderCustomNet because it's the same as NewProvider --- pkg/providers/cloudstack/cloudstack.go | 13 ------------- pkg/providers/cloudstack/cloudstack_test.go | 2 +- 2 files changed, 1 insertion(+), 14 deletions(-) diff --git a/pkg/providers/cloudstack/cloudstack.go b/pkg/providers/cloudstack/cloudstack.go index 1aa321b368da..732c292db03a 100644 --- a/pkg/providers/cloudstack/cloudstack.go +++ b/pkg/providers/cloudstack/cloudstack.go @@ -202,19 +202,6 @@ type ProviderKubectlClient interface { } func NewProvider(datacenterConfig *v1alpha1.CloudStackDatacenterConfig, machineConfigs map[string]*v1alpha1.CloudStackMachineConfig, clusterConfig *v1alpha1.Cluster, providerKubectlClient ProviderKubectlClient, providerCmkClient ProviderCmkClient, writer filewriter.FileWriter, now types.NowFunc, skipIpCheck bool) *cloudstackProvider { - return NewProviderCustomNet( - datacenterConfig, - machineConfigs, - clusterConfig, - providerKubectlClient, - providerCmkClient, - writer, - now, - skipIpCheck, - ) -} - -func NewProviderCustomNet(datacenterConfig *v1alpha1.CloudStackDatacenterConfig, machineConfigs map[string]*v1alpha1.CloudStackMachineConfig, clusterConfig *v1alpha1.Cluster, providerKubectlClient ProviderKubectlClient, providerCmkClient ProviderCmkClient, writer filewriter.FileWriter, now types.NowFunc, skipIpCheck bool) *cloudstackProvider { var controlPlaneMachineSpec, etcdMachineSpec *v1alpha1.CloudStackMachineConfigSpec workerNodeGroupMachineSpecs := make(map[string]v1alpha1.CloudStackMachineConfigSpec, len(machineConfigs)) if clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef != nil && machineConfigs[clusterConfig.Spec.ControlPlaneConfiguration.MachineGroupRef.Name] != nil { diff --git a/pkg/providers/cloudstack/cloudstack_test.go b/pkg/providers/cloudstack/cloudstack_test.go index 1fab79718fcf..23e42669d3f1 100644 --- a/pkg/providers/cloudstack/cloudstack_test.go +++ b/pkg/providers/cloudstack/cloudstack_test.go @@ -195,7 +195,7 @@ func newProviderWithKubectl(t *testing.T, datacenterConfig *v1alpha1.CloudStackD func newProvider(t *testing.T, datacenterConfig *v1alpha1.CloudStackDatacenterConfig, machineConfigs map[string]*v1alpha1.CloudStackMachineConfig, clusterConfig *v1alpha1.Cluster, kubectl ProviderKubectlClient, cmk ProviderCmkClient) *cloudstackProvider { _, writer := test.NewWriter(t) - return NewProviderCustomNet( + return NewProvider( datacenterConfig, machineConfigs, clusterConfig, From 260ec6eef0728c8f0db9139cbcbd7c0f8a070c45 Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Thu, 30 Jun 2022 14:23:47 -0500 Subject: [PATCH 10/22] Avoid using 'failed to' in errors --- pkg/providers/cloudstack/decoder/decoder.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/providers/cloudstack/decoder/decoder.go b/pkg/providers/cloudstack/decoder/decoder.go index f6bbd6969afd..b4b3445e1e66 100644 --- a/pkg/providers/cloudstack/decoder/decoder.go +++ b/pkg/providers/cloudstack/decoder/decoder.go @@ -24,11 +24,11 @@ func ParseCloudStackSecret() (*CloudStackExecConfig, error) { } decodedString, err := b64.StdEncoding.DecodeString(cloudStackB64EncodedSecret) if err != nil { - return nil, fmt.Errorf("failed to decode value for %s with base64: %v", EksacloudStackCloudConfigB64SecretKey, err) + return nil, fmt.Errorf("decoding value for %s with base64: %v", EksacloudStackCloudConfigB64SecretKey, err) } cfg, err := ini.Load(decodedString) if err != nil { - return nil, fmt.Errorf("failed to extract values from %s with ini: %v", EksacloudStackCloudConfigB64SecretKey, err) + return nil, fmt.Errorf("extracting values from %s with ini: %v", EksacloudStackCloudConfigB64SecretKey, err) } cloudstackProfiles := []CloudStackProfileConfig{} @@ -40,15 +40,15 @@ func ParseCloudStackSecret() (*CloudStackExecConfig, error) { apiKey, err := section.GetKey("api-key") if err != nil { - return nil, fmt.Errorf("failed to extract value of 'api-key' from %s: %v", section.Name(), err) + return nil, fmt.Errorf("extracting value of 'api-key' from %s: %v", section.Name(), err) } secretKey, err := section.GetKey("secret-key") if err != nil { - return nil, fmt.Errorf("failed to extract value of 'secret-key' from %s: %v", EksacloudStackCloudConfigB64SecretKey, err) + return nil, fmt.Errorf("extracting value of 'secret-key' from %s: %v", EksacloudStackCloudConfigB64SecretKey, err) } apiUrl, err := section.GetKey("api-url") if err != nil { - return nil, fmt.Errorf("failed to extract value of 'api-url' from %s: %v", EksacloudStackCloudConfigB64SecretKey, err) + return nil, fmt.Errorf("extracting value of 'api-url' from %s: %v", EksacloudStackCloudConfigB64SecretKey, err) } verifySslValue := "true" if verifySsl, err := section.GetKey("verify-ssl"); err == nil { From 538294331b39fc0e12acf68c1387edf0fa80abd3 Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Thu, 30 Jun 2022 14:42:23 -0500 Subject: [PATCH 11/22] Remove localAvailabilityZones from Validator --- pkg/providers/cloudstack/validator.go | 34 +++++++++++++--------- pkg/providers/cloudstack/validator_test.go | 6 ++++ 2 files changed, 26 insertions(+), 14 deletions(-) diff --git a/pkg/providers/cloudstack/validator.go b/pkg/providers/cloudstack/validator.go index bc3f1d807470..7dd70e2849f4 100644 --- a/pkg/providers/cloudstack/validator.go +++ b/pkg/providers/cloudstack/validator.go @@ -14,8 +14,7 @@ import ( ) type Validator struct { - cmk ProviderCmkClient - localAvailabilityZones []localAvailabilityZone + cmk ProviderCmkClient } // Taken from https://github.com/shapeblue/cloudstack/blob/08bb4ad9fea7e422c3d3ac6d52f4670b1e89eed7/api/src/main/java/com/cloud/vm/VmDetailConstants.java @@ -32,8 +31,7 @@ var restrictedUserCustomDetails = [...]string{ func NewValidator(cmk ProviderCmkClient) *Validator { return &Validator{ - cmk: cmk, - localAvailabilityZones: []localAvailabilityZone{}, + cmk: cmk, } } @@ -76,11 +74,12 @@ func (v *Validator) validateCloudStackAccess(ctx context.Context, datacenterConf } func (v *Validator) ValidateCloudStackDatacenterConfig(ctx context.Context, datacenterConfig *anywherev1.CloudStackDatacenterConfig) error { - if err := v.generateLocalAvailabilityZones(ctx, datacenterConfig); err != nil { + localAvailabilityZones, err := generateLocalAvailabilityZones(ctx, datacenterConfig) + if err != nil { return err } - for _, az := range v.localAvailabilityZones { + for _, az := range localAvailabilityZones { _, err := getHostnameFromUrl(az.ManagementApiEndpoint) if err != nil { return fmt.Errorf("checking management api endpoint: %v", err) @@ -123,9 +122,11 @@ func (v *Validator) ValidateCloudStackDatacenterConfig(ctx context.Context, data return nil } -func (v *Validator) generateLocalAvailabilityZones(ctx context.Context, datacenterConfig *anywherev1.CloudStackDatacenterConfig) error { +func generateLocalAvailabilityZones(ctx context.Context, datacenterConfig *anywherev1.CloudStackDatacenterConfig) ([]localAvailabilityZone, error) { + localAvailabilityZones := []localAvailabilityZone{} + if datacenterConfig == nil { - return errors.New("CloudStack Datacenter Config is null") + return nil, errors.New("CloudStack Datacenter Config is null") } if len(datacenterConfig.Spec.Domain) > 0 { @@ -140,20 +141,20 @@ func (v *Validator) generateLocalAvailabilityZones(ctx context.Context, datacent Zone: zone, }, } - v.localAvailabilityZones = append(v.localAvailabilityZones, availabilityZone) + localAvailabilityZones = append(localAvailabilityZones, availabilityZone) } } for _, az := range datacenterConfig.Spec.AvailabilityZones { availabilityZone := localAvailabilityZone{ CloudStackAvailabilityZone: &az, } - v.localAvailabilityZones = append(v.localAvailabilityZones, availabilityZone) + localAvailabilityZones = append(localAvailabilityZones, availabilityZone) } - if len(v.localAvailabilityZones) <= 0 { - return fmt.Errorf("CloudStackDatacenterConfig domain or localAvailabilityZones is not set or is empty") + if len(localAvailabilityZones) <= 0 { + return nil, fmt.Errorf("CloudStackDatacenterConfig domain or localAvailabilityZones is not set or is empty") } - return nil + return localAvailabilityZones, nil } // TODO: dry out machine configs validations @@ -267,7 +268,12 @@ func (v *Validator) validateMachineConfig(ctx context.Context, datacenterConfig } } - for _, az := range v.localAvailabilityZones { + localAvailabilityZones, err := generateLocalAvailabilityZones(ctx, datacenterConfig) + if err != nil { + return err + } + + for _, az := range localAvailabilityZones { if err := v.cmk.ValidateTemplatePresent(ctx, az.CredentialsRef, az.DomainId, az.CloudStackAvailabilityZone.Zone.Id, az.Account, machineConfig.Spec.Template); err != nil { return fmt.Errorf("validating template: %v", err) } diff --git a/pkg/providers/cloudstack/validator_test.go b/pkg/providers/cloudstack/validator_test.go index fef38c4829a7..5486bd4f2853 100644 --- a/pkg/providers/cloudstack/validator_test.go +++ b/pkg/providers/cloudstack/validator_test.go @@ -852,6 +852,12 @@ func TestValidateCloudStackMachineConfig(t *testing.T) { } validator := NewValidator(cmk) + cmk.EXPECT().ValidateTemplatePresent(ctx, gomock.Any(), gomock.Any(), + gomock.Any(), datacenterConfig.Spec.Account, testTemplate).Times(3) + cmk.EXPECT().ValidateServiceOfferingPresent(ctx, gomock.Any(), gomock.Any(), testOffering).Times(3) + cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(3) + cmk.EXPECT().ValidateAffinityGroupsPresent(ctx, gomock.Any(), gomock.Any(), datacenterConfig.Spec.Account, gomock.Any()).Times(3) + for _, machineConfig := range machineConfigs { err := validator.validateMachineConfig(ctx, datacenterConfig, machineConfig) if err != nil { From dca6a9e9f35ce5f684c3381b766e2272dda00c26 Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Thu, 30 Jun 2022 16:25:43 -0500 Subject: [PATCH 12/22] Remove multipleZone from ValidateNetworkPresent --- internal/test/cleanup/cleanup.go | 8 +++++++- pkg/dependencies/factory.go | 1 - pkg/executables/cmk.go | 17 +++-------------- pkg/executables/cmk_cmd_builder.go | 4 ---- pkg/executables/cmk_test.go | 6 +++--- pkg/providers/cloudstack/cloudstack_test.go | 2 +- pkg/providers/cloudstack/mocks/client.go | 8 ++++---- pkg/providers/cloudstack/validator.go | 9 ++++----- pkg/providers/cloudstack/validator_test.go | 6 +++--- 9 files changed, 25 insertions(+), 36 deletions(-) diff --git a/internal/test/cleanup/cleanup.go b/internal/test/cleanup/cleanup.go index 08106f327acd..3801646131ab 100644 --- a/internal/test/cleanup/cleanup.go +++ b/internal/test/cleanup/cleanup.go @@ -101,10 +101,16 @@ func CleanUpCloudstackTestResources(ctx context.Context, clusterName string, dry } cmk := executableBuilder.BuildCmkExecutable(tmpWriter, execConfig.Profiles) defer cmk.Close(ctx) + + failedProfiles := []string{} for _, profile := range execConfig.Profiles { if err := cmk.CleanupVms(ctx, profile.Name, clusterName, dryRun); err != nil { - cmk.Close(ctx) + failedProfiles = append(failedProfiles, profile.Name) } } + + if len(failedProfiles) > 0 { + return fmt.Errorf("cleaning up VMs: %+v", failedProfiles) + } return nil } diff --git a/pkg/dependencies/factory.go b/pkg/dependencies/factory.go index 6a782404b784..8c01b4cf683d 100644 --- a/pkg/dependencies/factory.go +++ b/pkg/dependencies/factory.go @@ -267,7 +267,6 @@ func (f *Factory) WithProvider(clusterConfigFile string, clusterConfig *v1alpha1 return fmt.Errorf("unable to get machine config from file %s: %v", clusterConfigFile, err) } - // map[string]*executables.Cmk and map[string]ProviderCmkClient are not compatible so we convert the map manually f.dependencies.Provider = cloudstack.NewProvider( datacenterConfig, machineConfigs, diff --git a/pkg/executables/cmk.go b/pkg/executables/cmk.go index e212bd23059d..c0adb717a33f 100644 --- a/pkg/executables/cmk.go +++ b/pkg/executables/cmk.go @@ -260,11 +260,8 @@ func (c *Cmk) ValidateDomainPresent(ctx context.Context, profile string, domain return domainIdentifier, nil } -func (c *Cmk) ValidateNetworkPresent(ctx context.Context, profile string, domainId string, network v1alpha1.CloudStackResourceIdentifier, zoneId string, account string, multipleZone bool) error { +func (c *Cmk) ValidateNetworkPresent(ctx context.Context, profile string, domainId string, network v1alpha1.CloudStackResourceIdentifier, zoneId string, account string) error { command := newCmkCommand("list networks") - if multipleZone { - applyCmkArgs(&command, withCloudStackNetworkType(Shared)) - } // account must be specified within a domainId // domainId can be specified without account if len(domainId) > 0 { @@ -279,11 +276,7 @@ func (c *Cmk) ValidateNetworkPresent(ctx context.Context, profile string, domain return fmt.Errorf("getting network info - %s: %v", result.String(), err) } if result.Len() == 0 { - if multipleZone { - return fmt.Errorf("%s network %s not found in zone %s", Shared, network, zoneId) - } else { - return fmt.Errorf("network %s not found in zone %s", network, zoneId) - } + return fmt.Errorf("network %s not found in zone %s", network, zoneId) } response := struct { @@ -310,11 +303,7 @@ func (c *Cmk) ValidateNetworkPresent(ctx context.Context, profile string, domain if len(networks) > 1 { return fmt.Errorf("duplicate network %s found", network) } else if len(networks) == 0 { - if multipleZone { - return fmt.Errorf("%s network %s not found in zoneRef %s", Shared, network, zoneId) - } else { - return fmt.Errorf("network %s not found in zoneRef %s", network, zoneId) - } + return fmt.Errorf("network %s not found in zoneRef %s", network, zoneId) } return nil } diff --git a/pkg/executables/cmk_cmd_builder.go b/pkg/executables/cmk_cmd_builder.go index 289bd148ad0a..672d2b1dc859 100644 --- a/pkg/executables/cmk_cmd_builder.go +++ b/pkg/executables/cmk_cmd_builder.go @@ -35,10 +35,6 @@ func withCloudStackZoneId(zoneId string) cmkCommandArgs { return appendArgs(fmt.Sprintf("zoneid=\"%s\"", zoneId)) } -func withCloudStackNetworkType(networkType string) cmkCommandArgs { - return appendArgs(fmt.Sprintf("type=\"%s\"", networkType)) -} - func withCloudStackId(id string) cmkCommandArgs { return appendArgs(fmt.Sprintf("id=\"%s\"", id)) } diff --git a/pkg/executables/cmk_test.go b/pkg/executables/cmk_test.go index 4d1f4d377a78..3df3cf334cf0 100644 --- a/pkg/executables/cmk_test.go +++ b/pkg/executables/cmk_test.go @@ -510,7 +510,7 @@ func TestCmkListOperations(t *testing.T) { "list", "networks", fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), fmt.Sprintf("zoneid=\"%s\"", "TEST_RESOURCE"), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainId, zones[2].Network, zones[2].Id, accountName, false) + return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainId, zones[2].Network, zones[2].Id, accountName) }, cmkResponseError: nil, wantErr: false, @@ -525,7 +525,7 @@ func TestCmkListOperations(t *testing.T) { "list", "networks", fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), fmt.Sprintf("zoneid=\"%s\"", "TEST_RESOURCE"), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainId, zones[2].Network, zones[2].Id, accountName, false) + return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainId, zones[2].Network, zones[2].Id, accountName) }, cmkResponseError: nil, wantErr: true, @@ -540,7 +540,7 @@ func TestCmkListOperations(t *testing.T) { "list", "networks", fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), fmt.Sprintf("zoneid=\"%s\"", "TEST_RESOURCE"), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainId, zones[2].Network, zones[2].Id, accountName, false) + return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainId, zones[2].Network, zones[2].Id, accountName) }, cmkResponseError: nil, wantErr: true, diff --git a/pkg/providers/cloudstack/cloudstack_test.go b/pkg/providers/cloudstack/cloudstack_test.go index 23e42669d3f1..5e617003e89b 100644 --- a/pkg/providers/cloudstack/cloudstack_test.go +++ b/pkg/providers/cloudstack/cloudstack_test.go @@ -67,7 +67,7 @@ func givenWildcardCmk(mockCtrl *gomock.Controller) ProviderCmkClient { cmk.EXPECT().ValidateCloudStackConnection(gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateDomainPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateAccountPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateNetworkPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateNetworkPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().GetManagementApiEndpoint(gomock.Any()).AnyTimes().Return("http://127.16.0.1:8080/client/api", nil) return cmk } diff --git a/pkg/providers/cloudstack/mocks/client.go b/pkg/providers/cloudstack/mocks/client.go index cd94d84ff43e..51cd7c065dc5 100644 --- a/pkg/providers/cloudstack/mocks/client.go +++ b/pkg/providers/cloudstack/mocks/client.go @@ -128,17 +128,17 @@ func (mr *MockProviderCmkClientMockRecorder) ValidateDomainPresent(arg0, arg1, a } // ValidateNetworkPresent mocks base method. -func (m *MockProviderCmkClient) ValidateNetworkPresent(arg0 context.Context, arg1, arg2 string, arg3 v1alpha1.CloudStackResourceIdentifier, arg4, arg5 string, arg6 bool) error { +func (m *MockProviderCmkClient) ValidateNetworkPresent(arg0 context.Context, arg1, arg2 string, arg3 v1alpha1.CloudStackResourceIdentifier, arg4, arg5 string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidateNetworkPresent", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret := m.ctrl.Call(m, "ValidateNetworkPresent", arg0, arg1, arg2, arg3, arg4, arg5) ret0, _ := ret[0].(error) return ret0 } // ValidateNetworkPresent indicates an expected call of ValidateNetworkPresent. -func (mr *MockProviderCmkClientMockRecorder) ValidateNetworkPresent(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { +func (mr *MockProviderCmkClientMockRecorder) ValidateNetworkPresent(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateNetworkPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateNetworkPresent), arg0, arg1, arg2, arg3, arg4, arg5, arg6) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateNetworkPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateNetworkPresent), arg0, arg1, arg2, arg3, arg4, arg5) } // ValidateServiceOfferingPresent mocks base method. diff --git a/pkg/providers/cloudstack/validator.go b/pkg/providers/cloudstack/validator.go index 7dd70e2849f4..af08d3220a56 100644 --- a/pkg/providers/cloudstack/validator.go +++ b/pkg/providers/cloudstack/validator.go @@ -49,7 +49,7 @@ type ProviderCmkClient interface { ValidateTemplatePresent(ctx context.Context, profile string, domainId string, zoneId string, account string, template anywherev1.CloudStackResourceIdentifier) error ValidateAffinityGroupsPresent(ctx context.Context, profile string, domainId string, account string, affinityGroupIds []string) error ValidateZonePresent(ctx context.Context, profile string, zone anywherev1.CloudStackZone) (string, error) - ValidateNetworkPresent(ctx context.Context, profile string, domainId string, network anywherev1.CloudStackResourceIdentifier, zoneId string, account string, multipleZone bool) error + ValidateNetworkPresent(ctx context.Context, profile string, domainId string, network anywherev1.CloudStackResourceIdentifier, zoneId string, account string) error ValidateDomainPresent(ctx context.Context, profile string, domain string) (anywherev1.CloudStackResourceIdentifier, error) ValidateAccountPresent(ctx context.Context, profile string, account string, domainId string) error } @@ -85,8 +85,7 @@ func (v *Validator) ValidateCloudStackDatacenterConfig(ctx context.Context, data return fmt.Errorf("checking management api endpoint: %v", err) } - cmk := v.cmk - endpoint, err := cmk.GetManagementApiEndpoint(az.CredentialsRef) + endpoint, err := v.cmk.GetManagementApiEndpoint(az.CredentialsRef) if err != nil { return err } @@ -113,7 +112,7 @@ func (v *Validator) ValidateCloudStackDatacenterConfig(ctx context.Context, data if len(az.CloudStackAvailabilityZone.Zone.Network.Id) == 0 && len(az.CloudStackAvailabilityZone.Zone.Network.Name) == 0 { return fmt.Errorf("zone network is not set or is empty") } - if err := v.cmk.ValidateNetworkPresent(ctx, az.CredentialsRef, az.DomainId, az.CloudStackAvailabilityZone.Zone.Network, zoneId, az.Account, true); err != nil { + if err := v.cmk.ValidateNetworkPresent(ctx, az.CredentialsRef, az.DomainId, az.CloudStackAvailabilityZone.Zone.Network, zoneId, az.Account); err != nil { return err } } @@ -152,7 +151,7 @@ func generateLocalAvailabilityZones(ctx context.Context, datacenterConfig *anywh } if len(localAvailabilityZones) <= 0 { - return nil, fmt.Errorf("CloudStackDatacenterConfig domain or localAvailabilityZones is not set or is empty") + return nil, fmt.Errorf("CloudStackDatacenterConfig domain or availabilityZones is not set or is empty") } return localAvailabilityZones, nil } diff --git a/pkg/providers/cloudstack/validator_test.go b/pkg/providers/cloudstack/validator_test.go index 5486bd4f2853..b3bedf63b1ac 100644 --- a/pkg/providers/cloudstack/validator_test.go +++ b/pkg/providers/cloudstack/validator_test.go @@ -611,7 +611,7 @@ func setupMockForDatacenterConfigValidation(cmk *mocks.MockProviderCmkClient, ct cmk.EXPECT().ValidateZonePresent(ctx, gomock.Any(), datacenterConfig.Spec.Zones[0]).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea8f", nil) cmk.EXPECT().ValidateDomainPresent(ctx, gomock.Any(), datacenterConfig.Spec.Domain).AnyTimes().Return(v1alpha1.CloudStackResourceIdentifier{Id: "5300cdac-74d5-11ec-8696-c81f66d3e965", Name: datacenterConfig.Spec.Domain}, nil) cmk.EXPECT().ValidateAccountPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) - cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) + cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) cmk.EXPECT().GetManagementApiEndpoint(gomock.Any()).AnyTimes().MaxTimes(1).Return("http://127.16.0.1:8080/client/api", nil) } @@ -620,7 +620,7 @@ func setupMockForAvailabilityZonesValidation(cmk *mocks.MockProviderCmkClient, c cmk.EXPECT().ValidateZonePresent(ctx, gomock.Any(), az.Zone).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea82", nil) cmk.EXPECT().ValidateDomainPresent(ctx, gomock.Any(), az.Domain).AnyTimes().Return(v1alpha1.CloudStackResourceIdentifier{Id: "5300cdac-74d5-11ec-8696-c81f66d3e962", Name: az.Domain}, nil) cmk.EXPECT().ValidateAccountPresent(ctx, gomock.Any(), az.Account, gomock.Any()).AnyTimes().Return(nil) - cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) + cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) } } @@ -896,7 +896,7 @@ func TestValidateMachineConfigsWithAffinity(t *testing.T) { cmk.EXPECT().ValidateZonePresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea8f", nil) cmk.EXPECT().ValidateDomainPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateAccountPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) - cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) + cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) cmk.EXPECT().GetManagementApiEndpoint(gomock.Any()).AnyTimes().Return("http://127.16.0.1:8080/client/api", nil) cmk.EXPECT().ValidateTemplatePresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), datacenterConfig.Spec.Account, testTemplate).AnyTimes() From 074a62cb87e231402239d36ff272374578d3df01 Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Fri, 1 Jul 2022 10:38:18 -0500 Subject: [PATCH 13/22] Return list of errors CleanUpCloudstackTestResources --- internal/test/cleanup/cleanup.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/test/cleanup/cleanup.go b/internal/test/cleanup/cleanup.go index 3801646131ab..08f085f43c5f 100644 --- a/internal/test/cleanup/cleanup.go +++ b/internal/test/cleanup/cleanup.go @@ -103,14 +103,16 @@ func CleanUpCloudstackTestResources(ctx context.Context, clusterName string, dry defer cmk.Close(ctx) failedProfiles := []string{} + errors := []error{} for _, profile := range execConfig.Profiles { if err := cmk.CleanupVms(ctx, profile.Name, clusterName, dryRun); err != nil { failedProfiles = append(failedProfiles, profile.Name) + errors = append(errors, err) } } if len(failedProfiles) > 0 { - return fmt.Errorf("cleaning up VMs: %+v", failedProfiles) + return fmt.Errorf("cleaning up VMs: profiles=%+v, errors=%+v", failedProfiles, errors) } return nil } From bdf278e8aa98ba2760a618c53b79237db41ec808 Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Fri, 1 Jul 2022 11:41:44 -0500 Subject: [PATCH 14/22] Increase unit test coverage --- pkg/executables/cmk.go | 4 +- pkg/executables/cmk_test.go | 445 +++++++++++------- .../testdata/cmk_list_network_multiple.json | 135 ++++++ .../testdata/cmk_list_network_none.json | 5 + .../testdata/cmk_list_template_multiple.json | 89 ++++ .../testdata/cmk_list_template_none.json | 6 + .../testdata/cmk_list_zone_multiple.json | 27 ++ .../testdata/cmk_list_zone_none.json | 5 + 8 files changed, 556 insertions(+), 160 deletions(-) create mode 100644 pkg/executables/testdata/cmk_list_network_multiple.json create mode 100644 pkg/executables/testdata/cmk_list_network_none.json create mode 100644 pkg/executables/testdata/cmk_list_template_multiple.json create mode 100644 pkg/executables/testdata/cmk_list_template_none.json create mode 100644 pkg/executables/testdata/cmk_list_zone_multiple.json create mode 100644 pkg/executables/testdata/cmk_list_zone_none.json diff --git a/pkg/executables/cmk.go b/pkg/executables/cmk.go index c0adb717a33f..94d1b92108ef 100644 --- a/pkg/executables/cmk.go +++ b/pkg/executables/cmk.go @@ -412,12 +412,12 @@ func (c *Cmk) CleanupVms(ctx context.Context, profile string, clusterName string func (c *Cmk) exec(ctx context.Context, profile string, args ...string) (stdout bytes.Buffer, err error) { if err != nil { - return stdout, fmt.Errorf("failed get environment map: %v", err) + return bytes.Buffer{}, fmt.Errorf("failed get environment map: %v", err) } configFile, err := c.buildCmkConfigFile(profile) if err != nil { - return stdout, fmt.Errorf("failed cmk validations: %v", err) + return bytes.Buffer{}, fmt.Errorf("failed cmk validations: %v", err) } argsWithConfigFile := append([]string{"-c", configFile}, args...) diff --git a/pkg/executables/cmk_test.go b/pkg/executables/cmk_test.go index 3df3cf334cf0..ce11232f75e3 100644 --- a/pkg/executables/cmk_test.go +++ b/pkg/executables/cmk_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/golang/mock/gomock" + . "github.com/onsi/gomega" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" @@ -279,14 +280,13 @@ func TestCmkListOperations(t *testing.T) { _, writer := test.NewWriter(t) configFilePath, _ := filepath.Abs(filepath.Join(writer.Dir(), "generated", cmkConfigFileName)) tests := []struct { - testName string - argumentsExecCall []string - jsonResponseFile string - cmkFunc func(cmk executables.Cmk, ctx context.Context) error - cmkResponseError error - wantErr bool - shouldSecondCallOccur bool - wantResultCount int + testName string + argumentsExecCall []string + jsonResponseFile string + cmkFunc func(cmk executables.Cmk, ctx context.Context) error + cmkResponseError error + wantErr bool + wantResultCount int }{ { testName: "listdomain success on name root", @@ -302,10 +302,9 @@ func TestCmkListOperations(t *testing.T) { } return err }, - cmkResponseError: nil, - wantErr: false, - shouldSecondCallOccur: true, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: false, + wantResultCount: 0, }, { testName: "listdomain success on name filter", @@ -321,10 +320,9 @@ func TestCmkListOperations(t *testing.T) { } return err }, - cmkResponseError: nil, - wantErr: false, - shouldSecondCallOccur: true, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: false, + wantResultCount: 0, }, { testName: "listdomain failure on multiple returns", @@ -337,10 +335,9 @@ func TestCmkListOperations(t *testing.T) { _, err := cmk.ValidateDomainPresent(ctx, execConfig.Profiles[0].Name, domainName) return err }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: true, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 0, }, { testName: "listdomain success on multiple returns", @@ -356,10 +353,9 @@ func TestCmkListOperations(t *testing.T) { } return err }, - cmkResponseError: nil, - wantErr: false, - shouldSecondCallOccur: true, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: false, + wantResultCount: 0, }, { testName: "listdomains json parse exception", @@ -372,10 +368,9 @@ func TestCmkListOperations(t *testing.T) { _, err := cmk.ValidateDomainPresent(ctx, execConfig.Profiles[0].Name, domain) return err }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: false, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 0, }, { testName: "listdomains no results", @@ -388,10 +383,9 @@ func TestCmkListOperations(t *testing.T) { _, err := cmk.ValidateDomainPresent(ctx, execConfig.Profiles[0].Name, domain) return err }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: true, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 0, }, { testName: "listaccounts success on name filter", @@ -403,10 +397,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateAccountPresent(ctx, execConfig.Profiles[0].Name, accountName, domainId) }, - cmkResponseError: nil, - wantErr: false, - shouldSecondCallOccur: true, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: false, + wantResultCount: 0, }, { testName: "listaccounts json parse exception", @@ -418,10 +411,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateAccountPresent(ctx, execConfig.Profiles[0].Name, accountName, domainId) }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: false, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 0, }, { testName: "listaccounts no results", @@ -433,10 +425,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateAccountPresent(ctx, execConfig.Profiles[0].Name, accountName, domainId) }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: true, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 0, }, { testName: "listzones success on name filter", @@ -449,10 +440,9 @@ func TestCmkListOperations(t *testing.T) { _, err := cmk.ValidateZonePresent(ctx, execConfig.Profiles[0].Name, zones[0]) return err }, - cmkResponseError: nil, - wantErr: false, - shouldSecondCallOccur: false, - wantResultCount: 1, + cmkResponseError: nil, + wantErr: false, + wantResultCount: 1, }, { testName: "listzones success on id filter", @@ -465,10 +455,54 @@ func TestCmkListOperations(t *testing.T) { _, err := cmk.ValidateZonePresent(ctx, execConfig.Profiles[0].Name, zones[2]) return err }, - cmkResponseError: nil, - wantErr: false, - shouldSecondCallOccur: true, - wantResultCount: 1, + cmkResponseError: nil, + wantErr: false, + wantResultCount: 1, + }, + { + testName: "listzones failure on multple results", + jsonResponseFile: "testdata/cmk_list_zone_multiple.json", + argumentsExecCall: []string{ + "-c", configFilePath, + "list", "zones", fmt.Sprintf("id=\"%s\"", resourceId.Id), + }, + cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { + _, err := cmk.ValidateZonePresent(ctx, execConfig.Profiles[0].Name, zones[2]) + return err + }, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 1, + }, + { + testName: "listzones failure on none results", + jsonResponseFile: "testdata/cmk_list_zone_none.json", + argumentsExecCall: []string{ + "-c", configFilePath, + "list", "zones", fmt.Sprintf("id=\"%s\"", resourceId.Id), + }, + cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { + _, err := cmk.ValidateZonePresent(ctx, execConfig.Profiles[0].Name, zones[2]) + return err + }, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 1, + }, + { + testName: "listzones failure on cmk failure", + jsonResponseFile: "testdata/cmk_list_empty_response.json", + argumentsExecCall: []string{ + "-c", configFilePath, + "list", "zones", fmt.Sprintf("name=\"%s\"", resourceName.Name), + }, + cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { + _, err := cmk.ValidateZonePresent(ctx, execConfig.Profiles[0].Name, zones[0]) + return err + }, + cmkResponseError: errors.New("cmk calling return exception"), + wantErr: true, + wantResultCount: 0, }, { testName: "listzones no results", @@ -481,10 +515,9 @@ func TestCmkListOperations(t *testing.T) { _, err := cmk.ValidateZonePresent(ctx, execConfig.Profiles[0].Name, zones[0]) return err }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: true, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 0, }, { testName: "listzones json parse exception", @@ -497,10 +530,9 @@ func TestCmkListOperations(t *testing.T) { _, err := cmk.ValidateZonePresent(ctx, execConfig.Profiles[0].Name, zones[0]) return err }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: false, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 0, }, { testName: "listnetworks success on name filter", @@ -512,10 +544,51 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainId, zones[2].Network, zones[2].Id, accountName) }, - cmkResponseError: nil, - wantErr: false, - shouldSecondCallOccur: false, - wantResultCount: 1, + cmkResponseError: nil, + wantErr: false, + wantResultCount: 1, + }, + { + testName: "listnetworks failure on multiple results", + jsonResponseFile: "testdata/cmk_list_network_multiple.json", + argumentsExecCall: []string{ + "-c", configFilePath, + "list", "networks", fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), fmt.Sprintf("zoneid=\"%s\"", "TEST_RESOURCE"), + }, + cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { + return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainId, zones[2].Network, zones[2].Id, accountName) + }, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 1, + }, + { + testName: "listnetworks failure on none results", + jsonResponseFile: "testdata/cmk_list_network_none.json", + argumentsExecCall: []string{ + "-c", configFilePath, + "list", "networks", fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), fmt.Sprintf("zoneid=\"%s\"", "TEST_RESOURCE"), + }, + cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { + return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainId, zones[2].Network, zones[2].Id, accountName) + }, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 1, + }, + { + testName: "listnetworks failure on cmk failure", + jsonResponseFile: "testdata/cmk_list_network_multiple.json", + argumentsExecCall: []string{ + "-c", configFilePath, + "list", "networks", fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), fmt.Sprintf("zoneid=\"%s\"", "TEST_RESOURCE"), + }, + cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { + return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainId, zones[2].Network, zones[2].Id, accountName) + }, + cmkResponseError: errors.New("cmk calling return exception"), + wantErr: true, + wantResultCount: 1, }, { testName: "listnetworks no results", @@ -527,10 +600,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainId, zones[2].Network, zones[2].Id, accountName) }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: true, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 0, }, { testName: "listnetworks json parse exception", @@ -542,10 +614,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateNetworkPresent(ctx, execConfig.Profiles[0].Name, domainId, zones[2].Network, zones[2].Id, accountName) }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: false, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 0, }, { testName: "listserviceofferings success on name filter", @@ -557,10 +628,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateServiceOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, resourceName) }, - cmkResponseError: nil, - wantErr: false, - shouldSecondCallOccur: false, - wantResultCount: 1, + cmkResponseError: nil, + wantErr: false, + wantResultCount: 1, }, { testName: "listserviceofferings success on id filter", @@ -572,10 +642,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateServiceOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, resourceId) }, - cmkResponseError: nil, - wantErr: false, - shouldSecondCallOccur: true, - wantResultCount: 1, + cmkResponseError: nil, + wantErr: false, + wantResultCount: 1, }, { testName: "listserviceofferings no results", @@ -587,10 +656,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateServiceOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, resourceId) }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: true, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 0, }, { testName: "listserviceofferings json parse exception", @@ -602,10 +670,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateServiceOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, resourceName) }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: false, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 0, }, { testName: "listdiskofferings success on name filter", @@ -617,10 +684,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, diskOfferingResourceName) }, - cmkResponseError: nil, - wantErr: false, - shouldSecondCallOccur: false, - wantResultCount: 1, + cmkResponseError: nil, + wantErr: false, + wantResultCount: 1, }, { testName: "listdiskofferings success on id filter", @@ -632,10 +698,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, diskOfferingResourceID) }, - cmkResponseError: nil, - wantErr: false, - shouldSecondCallOccur: true, - wantResultCount: 1, + cmkResponseError: nil, + wantErr: false, + wantResultCount: 1, }, { testName: "listdiskofferings no results", @@ -647,10 +712,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, diskOfferingResourceID) }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: true, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 0, }, { testName: "listdiskofferings no results", @@ -662,10 +726,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, diskOfferingResourceID) }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: true, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 0, }, { testName: "listdiskofferings multiple results", @@ -677,10 +740,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, diskOfferingResourceID) }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: true, - wantResultCount: 4, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 4, }, { testName: "listdiskofferings customized results with customSizeInGB > 0", @@ -692,10 +754,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, diskOfferingCustomSizeInGB) }, - cmkResponseError: nil, - wantErr: false, - shouldSecondCallOccur: true, - wantResultCount: 1, + cmkResponseError: nil, + wantErr: false, + wantResultCount: 1, }, { testName: "listdiskofferings non-customized results with customSizeInGB > 0", @@ -707,10 +768,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, diskOfferingCustomSizeInGB) }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: true, - wantResultCount: 1, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 1, }, { testName: "listdiskofferings non-customized results with customSizeInGB > 0", @@ -722,10 +782,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, diskOfferingResourceID) }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: true, - wantResultCount: 1, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 1, }, { testName: "listdiskofferings throw exception", @@ -737,10 +796,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, diskOfferingResourceID) }, - cmkResponseError: errors.New("cmk calling return exception"), - wantErr: true, - shouldSecondCallOccur: true, - wantResultCount: 0, + cmkResponseError: errors.New("cmk calling return exception"), + wantErr: true, + wantResultCount: 0, }, { testName: "listdiskofferings json parse exception", @@ -752,10 +810,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateDiskOfferingPresent(ctx, execConfig.Profiles[0].Name, zoneId, diskOfferingResourceName) }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: false, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 0, }, { testName: "validatetemplate success on name filter", @@ -767,10 +824,20 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateTemplatePresent(ctx, execConfig.Profiles[0].Name, domainId, zoneId, accountName, resourceName) }, - cmkResponseError: nil, - wantErr: false, - shouldSecondCallOccur: false, - wantResultCount: 1, + cmkResponseError: nil, + wantErr: false, + wantResultCount: 1, + }, + { + testName: "validatetemplate failure when passing invalid profile", + jsonResponseFile: "testdata/cmk_list_template_singular.json", + argumentsExecCall: nil, + cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { + return cmk.ValidateTemplatePresent(ctx, "xxx", domainId, zoneId, accountName, resourceName) + }, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 1, }, { testName: "validatetemplate success on id filter", @@ -782,10 +849,51 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateTemplatePresent(ctx, execConfig.Profiles[0].Name, domainId, zoneId, accountName, resourceId) }, - cmkResponseError: nil, - wantErr: false, - shouldSecondCallOccur: true, - wantResultCount: 1, + cmkResponseError: nil, + wantErr: false, + wantResultCount: 1, + }, + { + testName: "validatetemplate failure on multiple results", + jsonResponseFile: "testdata/cmk_list_template_multiple.json", + argumentsExecCall: []string{ + "-c", configFilePath, + "list", "templates", "templatefilter=all", "listall=true", fmt.Sprintf("id=\"%s\"", resourceId.Id), fmt.Sprintf("zoneid=\"%s\"", zoneId), fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), + }, + cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { + return cmk.ValidateTemplatePresent(ctx, execConfig.Profiles[0].Name, domainId, zoneId, accountName, resourceId) + }, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 1, + }, + { + testName: "validatetemplate failure on none results", + jsonResponseFile: "testdata/cmk_list_template_none.json", + argumentsExecCall: []string{ + "-c", configFilePath, + "list", "templates", "templatefilter=all", "listall=true", fmt.Sprintf("id=\"%s\"", resourceId.Id), fmt.Sprintf("zoneid=\"%s\"", zoneId), fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), + }, + cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { + return cmk.ValidateTemplatePresent(ctx, execConfig.Profiles[0].Name, domainId, zoneId, accountName, resourceId) + }, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 1, + }, + { + testName: "validatetemplate failure on cmk failure", + jsonResponseFile: "testdata/cmk_list_template_none.json", + argumentsExecCall: []string{ + "-c", configFilePath, + "list", "templates", "templatefilter=all", "listall=true", fmt.Sprintf("id=\"%s\"", resourceId.Id), fmt.Sprintf("zoneid=\"%s\"", zoneId), fmt.Sprintf("domainid=\"%s\"", domainId), fmt.Sprintf("account=\"%s\"", accountName), + }, + cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { + return cmk.ValidateTemplatePresent(ctx, execConfig.Profiles[0].Name, domainId, zoneId, accountName, resourceId) + }, + cmkResponseError: errors.New("cmk calling return exception"), + wantErr: true, + wantResultCount: 1, }, { testName: "validatetemplate no results", @@ -797,10 +905,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateTemplatePresent(ctx, execConfig.Profiles[0].Name, domainId, zoneId, accountName, resourceName) }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: true, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 0, }, { testName: "validatetemplate json parse exception", @@ -812,10 +919,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateTemplatePresent(ctx, execConfig.Profiles[0].Name, domainId, zoneId, accountName, resourceName) }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: false, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 0, }, { testName: "listaffinitygroups success on id filter", @@ -827,10 +933,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateAffinityGroupsPresent(ctx, execConfig.Profiles[0].Name, domainId, accountName, []string{resourceId.Id}) }, - cmkResponseError: nil, - wantErr: false, - shouldSecondCallOccur: false, - wantResultCount: 1, + cmkResponseError: nil, + wantErr: false, + wantResultCount: 1, }, { testName: "listaffinitygroups no results", @@ -842,10 +947,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateAffinityGroupsPresent(ctx, execConfig.Profiles[0].Name, domainId, accountName, []string{resourceId.Id}) }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: false, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 0, }, { testName: "listaffinitygroups json parse exception", @@ -857,10 +961,9 @@ func TestCmkListOperations(t *testing.T) { cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { return cmk.ValidateAffinityGroupsPresent(ctx, execConfig.Profiles[0].Name, domainId, accountName, []string{resourceId.Id}) }, - cmkResponseError: nil, - wantErr: true, - shouldSecondCallOccur: false, - wantResultCount: 0, + cmkResponseError: nil, + wantErr: true, + wantResultCount: 0, }, } @@ -876,8 +979,10 @@ func TestCmkListOperations(t *testing.T) { defer tctx.RestoreContext() executable := mockexecutables.NewMockExecutable(mockCtrl) - executable.EXPECT().Execute(ctx, tt.argumentsExecCall). - Return(*bytes.NewBufferString(fileContent), tt.cmkResponseError) + if tt.argumentsExecCall != nil { + executable.EXPECT().Execute(ctx, tt.argumentsExecCall). + Return(*bytes.NewBufferString(fileContent), tt.cmkResponseError) + } cmk := executables.NewCmk(executable, writer, execConfig.Profiles) err := tt.cmkFunc(*cmk, ctx) if tt.wantErr && err != nil || !tt.wantErr && err == nil { @@ -887,3 +992,27 @@ func TestCmkListOperations(t *testing.T) { }) } } + +func TestCmkGetManagementApiEndpoint(t *testing.T) { + _, writer := test.NewWriter(t) + mockCtrl := gomock.NewController(t) + tt := NewWithT(t) + + var tctx testContext + tctx.SaveContext() + defer tctx.RestoreContext() + + executable := mockexecutables.NewMockExecutable(mockCtrl) + cmk := executables.NewCmk(executable, writer, execConfigWithMultipleProfiles.Profiles) + + endpoint, err := cmk.GetManagementApiEndpoint("test_name") + tt.Expect(err).To(BeNil()) + tt.Expect(endpoint).To(Equal("http://1.1.1.1:8080/client/api")) + + endpoint, err = cmk.GetManagementApiEndpoint("test_name_2") + tt.Expect(err).To(BeNil()) + tt.Expect(endpoint).To(Equal("http://1.1.1.1:8080/client/api_2")) + + _, err = cmk.GetManagementApiEndpoint("xxx") + tt.Expect(err).NotTo(BeNil()) +} diff --git a/pkg/executables/testdata/cmk_list_network_multiple.json b/pkg/executables/testdata/cmk_list_network_multiple.json new file mode 100644 index 000000000000..d249f79fce7e --- /dev/null +++ b/pkg/executables/testdata/cmk_list_network_multiple.json @@ -0,0 +1,135 @@ +{ + "count": 1, + "network": [ + { + "acltype": "Domain", + "broadcastdomaintype": "Native", + "broadcasturi": "vlan://untagged", + "canusefordeploy": true, + "cidr": "192.168.1.0/24", + "details": {}, + "displaynetwork": true, + "displaytext": "for zone 1, shared", + "dns1": "8.8.8.8", + "dns2": "8.8.4.4", + "domain": "ROOT", + "domainid": "5300cdac-74d5-11ec-8696-c81f66d3e965", + "gateway": "192.168.1.1", + "id": "aeb3a5e6-2e80-4a73-900f-d01bbd4874b5", + "ispersistent": false, + "issystem": false, + "name": "TEST_RESOURCE", + "netmask": "255.255.255.0", + "networkdomain": "cs1cloud.internal", + "networkofferingavailability": "Optional", + "networkofferingconservemode": true, + "networkofferingdisplaytext": "Offering for Shared networks", + "networkofferingid": "928c228c-c5c3-4f26-92ca-d611d5ebda49", + "networkofferingname": "DefaultSharedNetworkOffering", + "physicalnetworkid": "67ad3d0a-5a2f-4557-8563-1bb6577eebcf", + "redundantrouter": false, + "related": "aeb3a5e6-2e80-4a73-900f-d01bbd4874b5", + "restartrequired": false, + "service": [ + { + "capability": [ + { + "canchooseservicecapability": false, + "name": "DhcpAccrossMultipleSubnets", + "value": "true" + } + ], + "name": "Dhcp" + }, + { + "name": "UserData" + }, + { + "capability": [ + { + "canchooseservicecapability": false, + "name": "AllowDnsSuffixModification", + "value": "true" + } + ], + "name": "Dns" + } + ], + "specifyipranges": true, + "state": "Setup", + "strechedl2subnet": false, + "subdomainaccess": true, + "tags": [], + "traffictype": "Guest", + "type": "Shared", + "vlan": "untagged", + "zoneid": "151e4c35-7ba4-4f74-b35d-f3d8627118cc", + "zonename": "zone1" + }, + { + "acltype": "Domain", + "broadcastdomaintype": "Native", + "broadcasturi": "vlan://untagged", + "canusefordeploy": true, + "cidr": "192.168.1.0/24", + "details": {}, + "displaynetwork": true, + "displaytext": "for zone 1, shared", + "dns1": "8.8.8.8", + "dns2": "8.8.4.4", + "domain": "ROOT", + "domainid": "5300cdac-74d5-11ec-8696-c81f66d3e966", + "gateway": "192.168.1.1", + "id": "aeb3a5e6-2e80-4a73-900f-d01bbd4874b5", + "ispersistent": false, + "issystem": false, + "name": "TEST_RESOURCE", + "netmask": "255.255.255.0", + "networkdomain": "cs1cloud.internal", + "networkofferingavailability": "Optional", + "networkofferingconservemode": true, + "networkofferingdisplaytext": "Offering for Shared networks", + "networkofferingid": "928c228c-c5c3-4f26-92ca-d611d5ebda49", + "networkofferingname": "DefaultSharedNetworkOffering", + "physicalnetworkid": "67ad3d0a-5a2f-4557-8563-1bb6577eebcf", + "redundantrouter": false, + "related": "aeb3a5e6-2e80-4a73-900f-d01bbd4874b5", + "restartrequired": false, + "service": [ + { + "capability": [ + { + "canchooseservicecapability": false, + "name": "DhcpAccrossMultipleSubnets", + "value": "true" + } + ], + "name": "Dhcp" + }, + { + "name": "UserData" + }, + { + "capability": [ + { + "canchooseservicecapability": false, + "name": "AllowDnsSuffixModification", + "value": "true" + } + ], + "name": "Dns" + } + ], + "specifyipranges": true, + "state": "Setup", + "strechedl2subnet": false, + "subdomainaccess": true, + "tags": [], + "traffictype": "Guest", + "type": "Shared", + "vlan": "untagged", + "zoneid": "151e4c35-7ba4-4f74-b35d-f3d8627118cc", + "zonename": "zone1" + } + ] +} diff --git a/pkg/executables/testdata/cmk_list_network_none.json b/pkg/executables/testdata/cmk_list_network_none.json new file mode 100644 index 000000000000..d7e24648fabf --- /dev/null +++ b/pkg/executables/testdata/cmk_list_network_none.json @@ -0,0 +1,5 @@ +{ + "count": 0, + "network": [ + ] +} diff --git a/pkg/executables/testdata/cmk_list_template_multiple.json b/pkg/executables/testdata/cmk_list_template_multiple.json new file mode 100644 index 000000000000..e3b28e7c9983 --- /dev/null +++ b/pkg/executables/testdata/cmk_list_template_multiple.json @@ -0,0 +1,89 @@ +{ + "count": 2, + "template": [ + { + "account": "system", + "bits": 0, + "checksum": "ed0e788280ff2912ea40f7f91ca7a249", + "created": "2021-11-01T15:02:10-0400", + "crossZones": true, + "deployasis": false, + "details": {}, + "directdownload": false, + "displaytext": "CentOS 5.5(64-bit) no GUI (KVM)", + "domain": "ROOT", + "domainid": "4ab50296-3b45-11ec-a097-a8a15983abb5", + "downloaddetails": [ + { + "datastore": "nfs://192.168.1.88:/export/secondary", + "downloadPercent": "100", + "downloadState": "DOWNLOADED" + } + ], + "format": "QCOW2", + "hypervisor": "KVM", + "id": "4ab79b52-3b45-11ec-a097-a8a15983abb5", + "isdynamicallyscalable": false, + "isextractable": true, + "isfeatured": true, + "ispublic": true, + "isready": true, + "name": "CentOS 5.5(64-bit) no GUI (KVM)", + "ostypeid": "4ac797d4-3b45-11ec-a097-a8a15983abb5", + "ostypename": "CentOS 5.5 (64-bit)", + "passwordenabled": false, + "physicalsize": 1769537536, + "requireshvm": false, + "size": 8589934592, + "sshkeyenabled": false, + "status": "Download Complete", + "tags": [], + "templatetype": "BUILTIN", + "url": "http://download.cloudstack.org/releases/2.2.0/eec2209b-9875-3c8d-92be-c001bd8a0faf.qcow2.bz2", + "zoneid": "4e3b338d-87a6-4189-b931-a1747edeea8f", + "zonename": "zone1" + }, + { + "account": "system", + "bits": 0, + "checksum": "ed0e788280ff2912ea40f7f91ca7a249", + "created": "2021-11-01T15:02:10-0400", + "crossZones": true, + "deployasis": false, + "details": {}, + "directdownload": false, + "displaytext": "CentOS 5.5(64-bit) no GUI (KVM)", + "domain": "ROOT", + "domainid": "4ab50296-3b45-11ec-a097-a8a15983abb5", + "downloaddetails": [ + { + "datastore": "nfs://192.168.1.88:/export/secondary", + "downloadPercent": "100", + "downloadState": "DOWNLOADED" + } + ], + "format": "QCOW2", + "hypervisor": "KVM", + "id": "4ab79b52-3b45-11ec-a097-a8a15983abb6", + "isdynamicallyscalable": false, + "isextractable": true, + "isfeatured": true, + "ispublic": true, + "isready": true, + "name": "CentOS 5.5(64-bit) no GUI (KVM)", + "ostypeid": "4ac797d4-3b45-11ec-a097-a8a15983abb5", + "ostypename": "CentOS 5.5 (64-bit)", + "passwordenabled": false, + "physicalsize": 1769537536, + "requireshvm": false, + "size": 8589934592, + "sshkeyenabled": false, + "status": "Download Complete", + "tags": [], + "templatetype": "BUILTIN", + "url": "http://download.cloudstack.org/releases/2.2.0/eec2209b-9875-3c8d-92be-c001bd8a0faf.qcow2.bz2", + "zoneid": "4e3b338d-87a6-4189-b931-a1747edeea8f", + "zonename": "zone1" + } + ] +} diff --git a/pkg/executables/testdata/cmk_list_template_none.json b/pkg/executables/testdata/cmk_list_template_none.json new file mode 100644 index 000000000000..9cde2ad853f6 --- /dev/null +++ b/pkg/executables/testdata/cmk_list_template_none.json @@ -0,0 +1,6 @@ +{ + "count": 0, + "template": [ + + ] +} diff --git a/pkg/executables/testdata/cmk_list_zone_multiple.json b/pkg/executables/testdata/cmk_list_zone_multiple.json new file mode 100644 index 000000000000..74384d8696cd --- /dev/null +++ b/pkg/executables/testdata/cmk_list_zone_multiple.json @@ -0,0 +1,27 @@ +{ + "count": 2, + "zone": [ + { + "allocationstate": "Enabled", + "dhcpprovider": "VirtualRouter", + "id": "4e3b338d-87a6-4189-b931-a1747edeea8f", + "localstorageenabled": false, + "name": "zone1", + "networktype": "Advanced", + "securitygroupsenabled": false, + "tags": [], + "zonetoken": "1f999599-dbf0-3ba6-9091-1ba87c4f91f0" + }, + { + "allocationstate": "Enabled", + "dhcpprovider": "VirtualRouter", + "id": "4e3b338d-87a6-4189-b931-a1747edeea8g", + "localstorageenabled": false, + "name": "zone1", + "networktype": "Advanced", + "securitygroupsenabled": false, + "tags": [], + "zonetoken": "1f999599-dbf0-3ba6-9091-1ba87c4f91f0" + }, + ] +} diff --git a/pkg/executables/testdata/cmk_list_zone_none.json b/pkg/executables/testdata/cmk_list_zone_none.json new file mode 100644 index 000000000000..3326fff5d6d4 --- /dev/null +++ b/pkg/executables/testdata/cmk_list_zone_none.json @@ -0,0 +1,5 @@ +{ + "count": 0, + "zone": [ + ] +} From 40c9ddbec4d73c36323d7d84f91c6e336786e4b5 Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Fri, 1 Jul 2022 12:27:00 -0500 Subject: [PATCH 15/22] Increase unit test coverage #2 --- pkg/providers/cloudstack/cloudstack_test.go | 107 ++++++++++++++- .../cloudstack/testdata/cluster_invalid.yaml | 123 ++++++++++++++++++ pkg/providers/cloudstack/validator.go | 2 +- pkg/providers/cloudstack/validator_test.go | 14 ++ 4 files changed, 244 insertions(+), 2 deletions(-) create mode 100644 pkg/providers/cloudstack/testdata/cluster_invalid.yaml diff --git a/pkg/providers/cloudstack/cloudstack_test.go b/pkg/providers/cloudstack/cloudstack_test.go index 5e617003e89b..d78fda0c2075 100644 --- a/pkg/providers/cloudstack/cloudstack_test.go +++ b/pkg/providers/cloudstack/cloudstack_test.go @@ -10,6 +10,7 @@ import ( "github.com/golang/mock/gomock" etcdv1 "github.com/mrajashree/etcdadm-controller/api/v1beta1" + . "github.com/onsi/gomega" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -38,7 +39,8 @@ const ( secret-key = test-secret1 api-url = http://127.16.0.1:8080/client/api */ - expectedCloudStackCloudConfig = "W0dsb2JhbF0KdmVyaWZ5LXNzbCA9IGZhbHNlCmFwaS1rZXkgPSB0ZXN0LWtleTEKc2VjcmV0LWtleSA9IHRlc3Qtc2VjcmV0MQphcGktdXJsID0gaHR0cDovLzEyNy4xNi4wLjE6ODA4MC9jbGllbnQvYXBpCg==" + expectedCloudStackCloudConfig = "W0dsb2JhbF0KdmVyaWZ5LXNzbCA9IGZhbHNlCmFwaS1rZXkgPSB0ZXN0LWtleTEKc2VjcmV0LWtleSA9IHRlc3Qtc2VjcmV0MQphcGktdXJsID0gaHR0cDovLzEyNy4xNi4wLjE6ODA4MC9jbGllbnQvYXBpCg==" + cloudStackCloudConfigWithInvalidUrl = "W0dsb2JhbF0KdmVyaWZ5LXNzbCA9IGZhbHNlCmFwaS1rZXkgPSB0ZXN0LWtleTEKc2VjcmV0LWtleSA9IHRlc3Qtc2VjcmV0MQphcGktdXJsID0geHh4Cg==" ) func givenClusterConfig(t *testing.T, fileName string) *v1alpha1.Cluster { @@ -239,6 +241,109 @@ func TestProviderGenerateCAPISpecForCreate(t *testing.T) { test.AssertContentToFile(t, string(md), "testdata/expected_results_main_md.yaml") } +func TestProviderSetupAndValidateCreateClusterFailureOnInvalidUrl(t *testing.T) { + tt := NewWithT(t) + mockCtrl := gomock.NewController(t) + var tctx testContext + tctx.SaveContext() + ctx := context.Background() + kubectl := mocks.NewMockProviderKubectlClient(mockCtrl) + clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename) + + datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename) + machineConfigs := givenMachineConfigs(t, testClusterConfigMainFilename) + cmk := givenWildcardCmk(mockCtrl) + provider := newProviderWithKubectl(t, datacenterConfig, machineConfigs, clusterSpec.Cluster, kubectl, cmk) + if provider == nil { + t.Fatalf("provider object is nil") + } + + os.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, cloudStackCloudConfigWithInvalidUrl) + err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec) + tt.Expect(err).NotTo(BeNil()) +} + +func TestProviderSetupAndValidateUpgradeClusterFailureOnInvalidUrl(t *testing.T) { + tt := NewWithT(t) + mockCtrl := gomock.NewController(t) + var tctx testContext + tctx.SaveContext() + ctx := context.Background() + kubectl := mocks.NewMockProviderKubectlClient(mockCtrl) + cluster := &types.Cluster{Name: "test"} + clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename) + + datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename) + machineConfigs := givenMachineConfigs(t, testClusterConfigMainFilename) + cmk := givenWildcardCmk(mockCtrl) + provider := newProviderWithKubectl(t, datacenterConfig, machineConfigs, clusterSpec.Cluster, kubectl, cmk) + if provider == nil { + t.Fatalf("provider object is nil") + } + + os.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, cloudStackCloudConfigWithInvalidUrl) + err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec) + tt.Expect(err).NotTo(BeNil()) +} + +func TestProviderSetupAndValidateDeleteClusterFailureOnInvalidUrl(t *testing.T) { + tt := NewWithT(t) + mockCtrl := gomock.NewController(t) + var tctx testContext + tctx.SaveContext() + ctx := context.Background() + kubectl := mocks.NewMockProviderKubectlClient(mockCtrl) + cluster := &types.Cluster{Name: "test"} + clusterSpec := givenClusterSpec(t, testClusterConfigMainFilename) + + datacenterConfig := givenDatacenterConfig(t, testClusterConfigMainFilename) + machineConfigs := givenMachineConfigs(t, testClusterConfigMainFilename) + cmk := givenWildcardCmk(mockCtrl) + provider := newProviderWithKubectl(t, datacenterConfig, machineConfigs, clusterSpec.Cluster, kubectl, cmk) + if provider == nil { + t.Fatalf("provider object is nil") + } + + os.Setenv(decoder.EksacloudStackCloudConfigB64SecretKey, cloudStackCloudConfigWithInvalidUrl) + err := provider.SetupAndValidateDeleteCluster(ctx, cluster) + tt.Expect(err).NotTo(BeNil()) +} + +func TestProviderSetupAndValidateCreateClusterFailureOnInvalidClusterSpec(t *testing.T) { + tt := NewWithT(t) + clusterSpecManifest := "cluster_invalid.yaml" + mockCtrl := gomock.NewController(t) + setupContext() + kubectl := mocks.NewMockProviderKubectlClient(mockCtrl) + clusterSpec := givenClusterSpec(t, clusterSpecManifest) + datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest) + machineConfigs := givenMachineConfigs(t, clusterSpecManifest) + ctx := context.Background() + cmk := givenWildcardCmk(mockCtrl) + provider := newProviderWithKubectl(t, datacenterConfig, machineConfigs, clusterSpec.Cluster, kubectl, cmk) + + err := provider.SetupAndValidateCreateCluster(ctx, clusterSpec) + tt.Expect(err).NotTo(BeNil()) +} + +func TestProviderSetupAndValidateUpgradeClusterFailureOnInvalidClusterSpec(t *testing.T) { + tt := NewWithT(t) + clusterSpecManifest := "cluster_invalid.yaml" + mockCtrl := gomock.NewController(t) + setupContext() + kubectl := mocks.NewMockProviderKubectlClient(mockCtrl) + cluster := &types.Cluster{Name: "test"} + clusterSpec := givenClusterSpec(t, clusterSpecManifest) + datacenterConfig := givenDatacenterConfig(t, clusterSpecManifest) + machineConfigs := givenMachineConfigs(t, clusterSpecManifest) + ctx := context.Background() + cmk := givenWildcardCmk(mockCtrl) + provider := newProviderWithKubectl(t, datacenterConfig, machineConfigs, clusterSpec.Cluster, kubectl, cmk) + + err := provider.SetupAndValidateUpgradeCluster(ctx, cluster, clusterSpec) + tt.Expect(err).NotTo(BeNil()) +} + func TestProviderGenerateCAPISpecForCreateWithAffinity(t *testing.T) { clusterSpecManifest := "cluster_affinity.yaml" mockCtrl := gomock.NewController(t) diff --git a/pkg/providers/cloudstack/testdata/cluster_invalid.yaml b/pkg/providers/cloudstack/testdata/cluster_invalid.yaml new file mode 100644 index 000000000000..7b2463a6a604 --- /dev/null +++ b/pkg/providers/cloudstack/testdata/cluster_invalid.yaml @@ -0,0 +1,123 @@ +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: Cluster +metadata: + name: test + namespace: test-namespace +spec: + clusterNetwork: + cni: cilium + pods: + cidrBlocks: + - 192.168.0.0/16 + services: + cidrBlocks: + - 10.96.0.0/12 + controlPlaneConfiguration: + count: 3 + endpoint: + host: 1.2.3.4 + machineGroupRef: + kind: CloudStackMachineConfig + name: test-cp + datacenterRef: + kind: CloudStackDatacenterConfig + name: test + externalEtcdConfiguration: + count: 3 + machineGroupRef: + kind: CloudStackMachineConfig + name: test-etcd + kubernetesVersion: "1.21" + workerNodeGroupConfigurations: + - count: 3 + machineGroupRef: + kind: CloudStackMachineConfig + name: test +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: CloudStackDatacenterConfig +metadata: + name: test + namespace: test-namespace +spec: + {} +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: CloudStackMachineConfig +metadata: + name: test-cp + namespace: test-namespace +spec: + computeOffering: + name: "m4-large" + users: + - name: "mySshUsername" + sshAuthorizedKeys: # The key below was manually generated and not used in any production systems + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" + template: + name: "centos7-k8s-118" + diskOffering: + name: "Small" + mountPath: "/data-small" + device: "/dev/vdb" + filesystem: "ext4" + label: "data_disk" + symlinks: + /var/log/kubernetes: /data-small/var/log/kubernetes + affinityGroupIds: + - control-plane-anti-affinity +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: CloudStackMachineConfig +metadata: + name: test + namespace: test-namespace +spec: + computeOffering: + name: "m4-large" + users: + - name: "mySshUsername" + sshAuthorizedKeys: # The key below was manually generated and not used in any production systems + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" + template: + name: "centos7-k8s-118" + diskOffering: + name: "Small" + mountPath: "/data-small" + device: "/dev/vdb" + filesystem: "ext4" + label: "data_disk" + symlinks: + /var/log/pods: /data-small/var/log/pods + /var/log/containers: /data-small/var/log/containers + affinityGroupIds: + - worker-affinity + userCustomDetails: + foo: bar +--- +apiVersion: anywhere.eks.amazonaws.com/v1alpha1 +kind: CloudStackMachineConfig +metadata: + name: test-etcd + namespace: test-namespace +spec: + computeOffering: + name: "m4-large" + users: + - name: "mySshUsername" + sshAuthorizedKeys: # The key below was manually generated and not used in any production systems + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1BK73XhIzjX+meUr7pIYh6RHbvI3tmHeQIXY5lv7aztN1UoX+bhPo3dwo2sfSQn5kuxgQdnxIZ/CTzy0p0GkEYVv3gwspCeurjmu0XmrdmaSGcGxCEWT/65NtvYrQtUE5ELxJ+N/aeZNlK2B7IWANnw/82913asXH4VksV1NYNduP0o1/G4XcwLLSyVFB078q/oEnmvdNIoS61j4/o36HVtENJgYr0idcBvwJdvcGxGnPaqOhx477t+kfJAa5n5dSA5wilIaoXH5i1Tf/HsTCM52L+iNCARvQzJYZhzbWI1MDQwzILtIBEQCJsl2XSqIupleY8CxqQ6jCXt2mhae+wPc3YmbO5rFvr2/EvC57kh3yDs1Nsuj8KOvD78KeeujbR8n8pScm3WDp62HFQ8lEKNdeRNj6kB8WnuaJvPnyZfvzOhwG65/9w13IBl7B1sWxbFnq2rMpm5uHVK7mAmjL0Tt8zoDhcE1YJEnp9xte3/pvmKPkST5Q/9ZtR9P5sI+02jY0fvPkPyC03j2gsPixG7rpOCwpOdbny4dcj0TDeeXJX8er+oVfJuLYz0pNWJcT2raDdFfcqvYA0B0IyNYlj5nWX4RuEcyT3qocLReWPnZojetvAG/H8XwOh7fEVGqHAKOVSnPXCSQJPl6s0H12jPJBDJMTydtYPEszl4/CeQ== testemail@test.com" + template: + name: "centos7-k8s-118" + diskOffering: + name: "Small" + mountPath: "/data-small" + device: "/dev/vdb" + filesystem: "ext4" + label: "data_disk" + symlinks: + /var/lib/: /data-small/var/lib + affinityGroupIds: + - etcd-affinity + +--- diff --git a/pkg/providers/cloudstack/validator.go b/pkg/providers/cloudstack/validator.go index af08d3220a56..0bc1c882fa17 100644 --- a/pkg/providers/cloudstack/validator.go +++ b/pkg/providers/cloudstack/validator.go @@ -65,7 +65,7 @@ func (v *Validator) validateCloudStackAccess(ctx context.Context, datacenterConf for _, refName := range refNamesToCheck { if err := v.cmk.ValidateCloudStackConnection(ctx, refName); err != nil { - return fmt.Errorf("failed validating connection to cloudstack %s: %v", refName, err) + return fmt.Errorf("validating connection to cloudstack %s: %v", refName, err) } } diff --git a/pkg/providers/cloudstack/validator_test.go b/pkg/providers/cloudstack/validator_test.go index b3bedf63b1ac..802dc8cb7370 100644 --- a/pkg/providers/cloudstack/validator_test.go +++ b/pkg/providers/cloudstack/validator_test.go @@ -97,6 +97,20 @@ func TestValidateCloudStackConnection(t *testing.T) { } } +func TestValidateCloudStackConnectionFailure(t *testing.T) { + ctx := context.Background() + cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) + validator := NewValidator(cmk) + datacenterConfig, err := v1alpha1.GetCloudStackDatacenterConfig(path.Join(testDataDir, testClusterConfigMainFilename)) + if err != nil { + t.Fatalf("unable to get datacenter config from file") + } + + cmk.EXPECT().ValidateCloudStackConnection(ctx, "Global").Return(errors.New("exception")) + err = validator.validateCloudStackAccess(ctx, datacenterConfig) + thenErrorExpected(t, "validating connection to cloudstack Global: exception", err) +} + func TestValidateMachineConfigsNoControlPlaneEndpointIP(t *testing.T) { ctx := context.Background() cmk := mocks.NewMockProviderCmkClient(gomock.NewController(t)) From bdbd178e1135b5124a5c32cc069b1ce85a75d026 Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Tue, 5 Jul 2022 10:26:18 -0500 Subject: [PATCH 16/22] Change error format --- internal/test/cleanup/cleanup.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/internal/test/cleanup/cleanup.go b/internal/test/cleanup/cleanup.go index 08f085f43c5f..fdbb41b91515 100644 --- a/internal/test/cleanup/cleanup.go +++ b/internal/test/cleanup/cleanup.go @@ -102,17 +102,15 @@ func CleanUpCloudstackTestResources(ctx context.Context, clusterName string, dry cmk := executableBuilder.BuildCmkExecutable(tmpWriter, execConfig.Profiles) defer cmk.Close(ctx) - failedProfiles := []string{} - errors := []error{} + errorsMap := map[string]error{} for _, profile := range execConfig.Profiles { if err := cmk.CleanupVms(ctx, profile.Name, clusterName, dryRun); err != nil { - failedProfiles = append(failedProfiles, profile.Name) - errors = append(errors, err) + errorsMap[profile.Name] = err } } - if len(failedProfiles) > 0 { - return fmt.Errorf("cleaning up VMs: profiles=%+v, errors=%+v", failedProfiles, errors) + if len(errorsMap) > 0 { + return fmt.Errorf("cleaning up VMs: %+v", errorsMap) } return nil } From 9813e4681374cc1180b5248ae296150b111da837 Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Tue, 5 Jul 2022 10:43:56 -0500 Subject: [PATCH 17/22] Change function names to be more descriptive - ValidateZonePresent -> ValidateZoneAndGetId - ValidateDomainPresent -> ValidateDomainAndGetId Change return value of ValidateDomainAndGetId - from CloudStackResourceIdentifier to string because only domainId is used --- pkg/executables/cmk.go | 21 ++++++------ pkg/executables/cmk_test.go | 38 ++++++++++----------- pkg/providers/cloudstack/cloudstack_test.go | 4 +-- pkg/providers/cloudstack/mocks/client.go | 26 +++++++------- pkg/providers/cloudstack/validator.go | 10 +++--- pkg/providers/cloudstack/validator_test.go | 12 +++---- 6 files changed, 55 insertions(+), 56 deletions(-) diff --git a/pkg/executables/cmk.go b/pkg/executables/cmk.go index 94d1b92108ef..49648875481f 100644 --- a/pkg/executables/cmk.go +++ b/pkg/executables/cmk.go @@ -187,7 +187,7 @@ func (c *Cmk) ValidateAffinityGroupsPresent(ctx context.Context, profile string, return nil } -func (c *Cmk) ValidateZonePresent(ctx context.Context, profile string, zone v1alpha1.CloudStackZone) (string, error) { +func (c *Cmk) ValidateZoneAndGetId(ctx context.Context, profile string, zone v1alpha1.CloudStackZone) (string, error) { command := newCmkCommand("list zones") if len(zone.Id) > 0 { applyCmkArgs(&command, withCloudStackId(zone.Id)) @@ -217,8 +217,8 @@ func (c *Cmk) ValidateZonePresent(ctx context.Context, profile string, zone v1al return cmkZones[0].Id, nil } -func (c *Cmk) ValidateDomainPresent(ctx context.Context, profile string, domain string) (v1alpha1.CloudStackResourceIdentifier, error) { - domainIdentifier := v1alpha1.CloudStackResourceIdentifier{Name: domain, Id: ""} +func (c *Cmk) ValidateDomainAndGetId(ctx context.Context, profile string, domain string) (string, error) { + domainId := "" command := newCmkCommand("list domains") // "list domains" API does not support querying by domain path, so here we extract the domain name which is the last part of the input domain tokens := strings.Split(domain, domainDelimiter) @@ -227,17 +227,17 @@ func (c *Cmk) ValidateDomainPresent(ctx context.Context, profile string, domain result, err := c.exec(ctx, profile, command...) if err != nil { - return domainIdentifier, fmt.Errorf("getting domain info - %s: %v", result.String(), err) + return domainId, fmt.Errorf("getting domain info - %s: %v", result.String(), err) } if result.Len() == 0 { - return domainIdentifier, fmt.Errorf("domain %s not found", domain) + return domainId, fmt.Errorf("domain %s not found", domain) } response := struct { CmkDomains []cmkDomain `json:"domain"` }{} if err = json.Unmarshal(result.Bytes(), &response); err != nil { - return domainIdentifier, fmt.Errorf("parsing response into json: %v", err) + return domainId, fmt.Errorf("parsing response into json: %v", err) } domains := response.CmkDomains var domainPath string @@ -248,16 +248,15 @@ func (c *Cmk) ValidateDomainPresent(ctx context.Context, profile string, domain } for _, d := range domains { if d.Path == domainPath { - domainIdentifier.Id = d.Id - domainIdentifier.Name = d.Name + domainId = d.Id break } } - if domainIdentifier.Id == "" { - return domainIdentifier, fmt.Errorf("domain(s) found for domain name %s, but not found a domain with domain path %s", domain, domainPath) + if domainId == "" { + return domainId, fmt.Errorf("domain(s) found for domain name %s, but not found a domain with domain path %s", domain, domainPath) } - return domainIdentifier, nil + return domainId, nil } func (c *Cmk) ValidateNetworkPresent(ctx context.Context, profile string, domainId string, network v1alpha1.CloudStackResourceIdentifier, zoneId string, account string) error { diff --git a/pkg/executables/cmk_test.go b/pkg/executables/cmk_test.go index ce11232f75e3..852a04cac0fc 100644 --- a/pkg/executables/cmk_test.go +++ b/pkg/executables/cmk_test.go @@ -296,9 +296,9 @@ func TestCmkListOperations(t *testing.T) { "list", "domains", fmt.Sprintf("name=\"%s\"", rootDomain), "listall=true", }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - domain, err := cmk.ValidateDomainPresent(ctx, execConfig.Profiles[0].Name, rootDomain) - if domain.Id != rootDomainId { - t.Fatalf("Expected domain id: %s, actual domain id: %s", rootDomainId, domain.Id) + domainId, err := cmk.ValidateDomainAndGetId(ctx, execConfig.Profiles[0].Name, rootDomain) + if domainId != rootDomainId { + t.Fatalf("Expected domain id: %s, actual domain id: %s", rootDomainId, domainId) } return err }, @@ -314,9 +314,9 @@ func TestCmkListOperations(t *testing.T) { "list", "domains", fmt.Sprintf("name=\"%s\"", domainName), "listall=true", }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - domain, err := cmk.ValidateDomainPresent(ctx, execConfig.Profiles[0].Name, domain) - if domain.Id != domainId { - t.Fatalf("Expected domain id: %s, actual domain id: %s", domainId, domain.Id) + actualDomainId, err := cmk.ValidateDomainAndGetId(ctx, execConfig.Profiles[0].Name, domain) + if actualDomainId != domainId { + t.Fatalf("Expected domain id: %s, actual domain id: %s", domainId, actualDomainId) } return err }, @@ -332,7 +332,7 @@ func TestCmkListOperations(t *testing.T) { "list", "domains", fmt.Sprintf("name=\"%s\"", domainName), "listall=true", }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateDomainPresent(ctx, execConfig.Profiles[0].Name, domainName) + _, err := cmk.ValidateDomainAndGetId(ctx, execConfig.Profiles[0].Name, domainName) return err }, cmkResponseError: nil, @@ -347,9 +347,9 @@ func TestCmkListOperations(t *testing.T) { "list", "domains", fmt.Sprintf("name=\"%s\"", domain2Name), "listall=true", }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - domain, err := cmk.ValidateDomainPresent(ctx, execConfig.Profiles[0].Name, domain2) - if domain.Id != domain2Id { - t.Fatalf("Expected domain id: %s, actual domain id: %s", domain2Id, domain.Id) + domainId, err := cmk.ValidateDomainAndGetId(ctx, execConfig.Profiles[0].Name, domain2) + if domainId != domain2Id { + t.Fatalf("Expected domain id: %s, actual domain id: %s", domain2Id, domainId) } return err }, @@ -365,7 +365,7 @@ func TestCmkListOperations(t *testing.T) { "list", "domains", fmt.Sprintf("name=\"%s\"", domainName), "listall=true", }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateDomainPresent(ctx, execConfig.Profiles[0].Name, domain) + _, err := cmk.ValidateDomainAndGetId(ctx, execConfig.Profiles[0].Name, domain) return err }, cmkResponseError: nil, @@ -380,7 +380,7 @@ func TestCmkListOperations(t *testing.T) { "list", "domains", fmt.Sprintf("name=\"%s\"", domainName), "listall=true", }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateDomainPresent(ctx, execConfig.Profiles[0].Name, domain) + _, err := cmk.ValidateDomainAndGetId(ctx, execConfig.Profiles[0].Name, domain) return err }, cmkResponseError: nil, @@ -437,7 +437,7 @@ func TestCmkListOperations(t *testing.T) { "list", "zones", fmt.Sprintf("name=\"%s\"", resourceName.Name), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateZonePresent(ctx, execConfig.Profiles[0].Name, zones[0]) + _, err := cmk.ValidateZoneAndGetId(ctx, execConfig.Profiles[0].Name, zones[0]) return err }, cmkResponseError: nil, @@ -452,7 +452,7 @@ func TestCmkListOperations(t *testing.T) { "list", "zones", fmt.Sprintf("id=\"%s\"", resourceId.Id), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateZonePresent(ctx, execConfig.Profiles[0].Name, zones[2]) + _, err := cmk.ValidateZoneAndGetId(ctx, execConfig.Profiles[0].Name, zones[2]) return err }, cmkResponseError: nil, @@ -467,7 +467,7 @@ func TestCmkListOperations(t *testing.T) { "list", "zones", fmt.Sprintf("id=\"%s\"", resourceId.Id), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateZonePresent(ctx, execConfig.Profiles[0].Name, zones[2]) + _, err := cmk.ValidateZoneAndGetId(ctx, execConfig.Profiles[0].Name, zones[2]) return err }, cmkResponseError: nil, @@ -482,7 +482,7 @@ func TestCmkListOperations(t *testing.T) { "list", "zones", fmt.Sprintf("id=\"%s\"", resourceId.Id), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateZonePresent(ctx, execConfig.Profiles[0].Name, zones[2]) + _, err := cmk.ValidateZoneAndGetId(ctx, execConfig.Profiles[0].Name, zones[2]) return err }, cmkResponseError: nil, @@ -497,7 +497,7 @@ func TestCmkListOperations(t *testing.T) { "list", "zones", fmt.Sprintf("name=\"%s\"", resourceName.Name), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateZonePresent(ctx, execConfig.Profiles[0].Name, zones[0]) + _, err := cmk.ValidateZoneAndGetId(ctx, execConfig.Profiles[0].Name, zones[0]) return err }, cmkResponseError: errors.New("cmk calling return exception"), @@ -512,7 +512,7 @@ func TestCmkListOperations(t *testing.T) { "list", "zones", fmt.Sprintf("name=\"%s\"", resourceName.Name), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateZonePresent(ctx, execConfig.Profiles[0].Name, zones[0]) + _, err := cmk.ValidateZoneAndGetId(ctx, execConfig.Profiles[0].Name, zones[0]) return err }, cmkResponseError: nil, @@ -527,7 +527,7 @@ func TestCmkListOperations(t *testing.T) { "list", "zones", fmt.Sprintf("name=\"%s\"", resourceName.Name), }, cmkFunc: func(cmk executables.Cmk, ctx context.Context) error { - _, err := cmk.ValidateZonePresent(ctx, execConfig.Profiles[0].Name, zones[0]) + _, err := cmk.ValidateZoneAndGetId(ctx, execConfig.Profiles[0].Name, zones[0]) return err }, cmkResponseError: nil, diff --git a/pkg/providers/cloudstack/cloudstack_test.go b/pkg/providers/cloudstack/cloudstack_test.go index d78fda0c2075..9ffaaba695e8 100644 --- a/pkg/providers/cloudstack/cloudstack_test.go +++ b/pkg/providers/cloudstack/cloudstack_test.go @@ -64,10 +64,10 @@ func givenWildcardCmk(mockCtrl *gomock.Controller) ProviderCmkClient { cmk.EXPECT().ValidateTemplatePresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateServiceOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateDiskOfferingPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateZonePresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateZoneAndGetId(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateAffinityGroupsPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateCloudStackConnection(gomock.Any(), gomock.Any()).AnyTimes() - cmk.EXPECT().ValidateDomainPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateDomainAndGetId(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateAccountPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateNetworkPresent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().GetManagementApiEndpoint(gomock.Any()).AnyTimes().Return("http://127.16.0.1:8080/client/api", nil) diff --git a/pkg/providers/cloudstack/mocks/client.go b/pkg/providers/cloudstack/mocks/client.go index 51cd7c065dc5..61411535977d 100644 --- a/pkg/providers/cloudstack/mocks/client.go +++ b/pkg/providers/cloudstack/mocks/client.go @@ -112,19 +112,19 @@ func (mr *MockProviderCmkClientMockRecorder) ValidateDiskOfferingPresent(arg0, a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateDiskOfferingPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateDiskOfferingPresent), arg0, arg1, arg2, arg3) } -// ValidateDomainPresent mocks base method. -func (m *MockProviderCmkClient) ValidateDomainPresent(arg0 context.Context, arg1, arg2 string) (v1alpha1.CloudStackResourceIdentifier, error) { +// ValidateDomainAndGetId mocks base method. +func (m *MockProviderCmkClient) ValidateDomainAndGetId(arg0 context.Context, arg1, arg2 string) (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidateDomainPresent", arg0, arg1, arg2) - ret0, _ := ret[0].(v1alpha1.CloudStackResourceIdentifier) + ret := m.ctrl.Call(m, "ValidateDomainAndGetId", arg0, arg1, arg2) + ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } -// ValidateDomainPresent indicates an expected call of ValidateDomainPresent. -func (mr *MockProviderCmkClientMockRecorder) ValidateDomainPresent(arg0, arg1, arg2 interface{}) *gomock.Call { +// ValidateDomainAndGetId indicates an expected call of ValidateDomainAndGetId. +func (mr *MockProviderCmkClientMockRecorder) ValidateDomainAndGetId(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateDomainPresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateDomainPresent), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateDomainAndGetId", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateDomainAndGetId), arg0, arg1, arg2) } // ValidateNetworkPresent mocks base method. @@ -169,19 +169,19 @@ func (mr *MockProviderCmkClientMockRecorder) ValidateTemplatePresent(arg0, arg1, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateTemplatePresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateTemplatePresent), arg0, arg1, arg2, arg3, arg4, arg5) } -// ValidateZonePresent mocks base method. -func (m *MockProviderCmkClient) ValidateZonePresent(arg0 context.Context, arg1 string, arg2 v1alpha1.CloudStackZone) (string, error) { +// ValidateZoneAndGetId mocks base method. +func (m *MockProviderCmkClient) ValidateZoneAndGetId(arg0 context.Context, arg1 string, arg2 v1alpha1.CloudStackZone) (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidateZonePresent", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "ValidateZoneAndGetId", arg0, arg1, arg2) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } -// ValidateZonePresent indicates an expected call of ValidateZonePresent. -func (mr *MockProviderCmkClientMockRecorder) ValidateZonePresent(arg0, arg1, arg2 interface{}) *gomock.Call { +// ValidateZoneAndGetId indicates an expected call of ValidateZoneAndGetId. +func (mr *MockProviderCmkClientMockRecorder) ValidateZoneAndGetId(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateZonePresent", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateZonePresent), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateZoneAndGetId", reflect.TypeOf((*MockProviderCmkClient)(nil).ValidateZoneAndGetId), arg0, arg1, arg2) } // MockProviderKubectlClient is a mock of ProviderKubectlClient interface. diff --git a/pkg/providers/cloudstack/validator.go b/pkg/providers/cloudstack/validator.go index 0bc1c882fa17..ed1d0ee2a083 100644 --- a/pkg/providers/cloudstack/validator.go +++ b/pkg/providers/cloudstack/validator.go @@ -48,9 +48,9 @@ type ProviderCmkClient interface { ValidateDiskOfferingPresent(ctx context.Context, profile string, zoneId string, diskOffering anywherev1.CloudStackResourceDiskOffering) error ValidateTemplatePresent(ctx context.Context, profile string, domainId string, zoneId string, account string, template anywherev1.CloudStackResourceIdentifier) error ValidateAffinityGroupsPresent(ctx context.Context, profile string, domainId string, account string, affinityGroupIds []string) error - ValidateZonePresent(ctx context.Context, profile string, zone anywherev1.CloudStackZone) (string, error) + ValidateZoneAndGetId(ctx context.Context, profile string, zone anywherev1.CloudStackZone) (string, error) ValidateNetworkPresent(ctx context.Context, profile string, domainId string, network anywherev1.CloudStackResourceIdentifier, zoneId string, account string) error - ValidateDomainPresent(ctx context.Context, profile string, domain string) (anywherev1.CloudStackResourceIdentifier, error) + ValidateDomainAndGetId(ctx context.Context, profile string, domain string) (string, error) ValidateAccountPresent(ctx context.Context, profile string, account string, domainId string) error } @@ -94,17 +94,17 @@ func (v *Validator) ValidateCloudStackDatacenterConfig(ctx context.Context, data endpoint, az.ManagementApiEndpoint) } - domain, err := v.cmk.ValidateDomainPresent(ctx, az.CredentialsRef, az.Domain) + domainId, err := v.cmk.ValidateDomainAndGetId(ctx, az.CredentialsRef, az.Domain) if err != nil { return err } - az.DomainId = domain.Id + az.DomainId = domainId if err := v.cmk.ValidateAccountPresent(ctx, az.CredentialsRef, az.Account, az.DomainId); err != nil { return err } - zoneId, err := v.cmk.ValidateZonePresent(ctx, az.CredentialsRef, az.CloudStackAvailabilityZone.Zone) + zoneId, err := v.cmk.ValidateZoneAndGetId(ctx, az.CredentialsRef, az.CloudStackAvailabilityZone.Zone) if err != nil { return err } diff --git a/pkg/providers/cloudstack/validator_test.go b/pkg/providers/cloudstack/validator_test.go index 802dc8cb7370..e22b0e943f1d 100644 --- a/pkg/providers/cloudstack/validator_test.go +++ b/pkg/providers/cloudstack/validator_test.go @@ -622,8 +622,8 @@ func TestSetupAndValidateSshAuthorizedKeysNil(t *testing.T) { } func setupMockForDatacenterConfigValidation(cmk *mocks.MockProviderCmkClient, ctx context.Context, datacenterConfig *v1alpha1.CloudStackDatacenterConfig) { - cmk.EXPECT().ValidateZonePresent(ctx, gomock.Any(), datacenterConfig.Spec.Zones[0]).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea8f", nil) - cmk.EXPECT().ValidateDomainPresent(ctx, gomock.Any(), datacenterConfig.Spec.Domain).AnyTimes().Return(v1alpha1.CloudStackResourceIdentifier{Id: "5300cdac-74d5-11ec-8696-c81f66d3e965", Name: datacenterConfig.Spec.Domain}, nil) + cmk.EXPECT().ValidateZoneAndGetId(ctx, gomock.Any(), datacenterConfig.Spec.Zones[0]).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea8f", nil) + cmk.EXPECT().ValidateDomainAndGetId(ctx, gomock.Any(), datacenterConfig.Spec.Domain).AnyTimes().Return("5300cdac-74d5-11ec-8696-c81f66d3e965", nil) cmk.EXPECT().ValidateAccountPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) cmk.EXPECT().GetManagementApiEndpoint(gomock.Any()).AnyTimes().MaxTimes(1).Return("http://127.16.0.1:8080/client/api", nil) @@ -631,8 +631,8 @@ func setupMockForDatacenterConfigValidation(cmk *mocks.MockProviderCmkClient, ct func setupMockForAvailabilityZonesValidation(cmk *mocks.MockProviderCmkClient, ctx context.Context, azs []v1alpha1.CloudStackAvailabilityZone) { for _, az := range azs { - cmk.EXPECT().ValidateZonePresent(ctx, gomock.Any(), az.Zone).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea82", nil) - cmk.EXPECT().ValidateDomainPresent(ctx, gomock.Any(), az.Domain).AnyTimes().Return(v1alpha1.CloudStackResourceIdentifier{Id: "5300cdac-74d5-11ec-8696-c81f66d3e962", Name: az.Domain}, nil) + cmk.EXPECT().ValidateZoneAndGetId(ctx, gomock.Any(), az.Zone).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea82", nil) + cmk.EXPECT().ValidateDomainAndGetId(ctx, gomock.Any(), az.Domain).AnyTimes().Return("5300cdac-74d5-11ec-8696-c81f66d3e962", nil) cmk.EXPECT().ValidateAccountPresent(ctx, gomock.Any(), az.Account, gomock.Any()).AnyTimes().Return(nil) cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) } @@ -907,8 +907,8 @@ func TestValidateMachineConfigsWithAffinity(t *testing.T) { } validator := NewValidator(cmk) - cmk.EXPECT().ValidateZonePresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea8f", nil) - cmk.EXPECT().ValidateDomainPresent(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + cmk.EXPECT().ValidateZoneAndGetId(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return("4e3b338d-87a6-4189-b931-a1747edeea8f", nil) + cmk.EXPECT().ValidateDomainAndGetId(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() cmk.EXPECT().ValidateAccountPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) cmk.EXPECT().ValidateNetworkPresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil) cmk.EXPECT().GetManagementApiEndpoint(gomock.Any()).AnyTimes().Return("http://127.16.0.1:8080/client/api", nil) From 2ddc742165bf2f73e9a1361bfe3063e44d862d29 Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Tue, 5 Jul 2022 10:48:20 -0500 Subject: [PATCH 18/22] Add default verifySslValue as a constant --- pkg/providers/cloudstack/decoder/decoder.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/providers/cloudstack/decoder/decoder.go b/pkg/providers/cloudstack/decoder/decoder.go index b4b3445e1e66..b912a822aa4d 100644 --- a/pkg/providers/cloudstack/decoder/decoder.go +++ b/pkg/providers/cloudstack/decoder/decoder.go @@ -14,6 +14,7 @@ const ( CloudStackCloudConfigB64SecretKey = "CLOUDSTACK_B64ENCODED_SECRET" EksaCloudStackHostPathToMount = "EKSA_CLOUDSTACK_HOST_PATHS_TO_MOUNT" CloudStackGlobalAZ = "Global" + defaultVerifySslValue = "true" ) // ParseCloudStackSecret parses the input b64 string into the ini object to extract out the api key, secret key, and url @@ -50,7 +51,7 @@ func ParseCloudStackSecret() (*CloudStackExecConfig, error) { if err != nil { return nil, fmt.Errorf("extracting value of 'api-url' from %s: %v", EksacloudStackCloudConfigB64SecretKey, err) } - verifySslValue := "true" + verifySslValue := defaultVerifySslValue if verifySsl, err := section.GetKey("verify-ssl"); err == nil { verifySslValue = verifySsl.Value() if _, err := strconv.ParseBool(verifySslValue); err != nil { From 187c194d111c0b7aa60ef7e234c9150b2c7a0ab4 Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Tue, 5 Jul 2022 11:04:22 -0500 Subject: [PATCH 19/22] Fix a bug where empty Zone IDs are used in validateMachineConfig --- pkg/providers/cloudstack/validator.go | 12 ++++++++---- pkg/providers/cloudstack/validator_test.go | 1 + 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/pkg/providers/cloudstack/validator.go b/pkg/providers/cloudstack/validator.go index ed1d0ee2a083..5db1e24892d8 100644 --- a/pkg/providers/cloudstack/validator.go +++ b/pkg/providers/cloudstack/validator.go @@ -108,7 +108,6 @@ func (v *Validator) ValidateCloudStackDatacenterConfig(ctx context.Context, data if err != nil { return err } - az.CloudStackAvailabilityZone.Zone.Id = zoneId if len(az.CloudStackAvailabilityZone.Zone.Network.Id) == 0 && len(az.CloudStackAvailabilityZone.Zone.Network.Name) == 0 { return fmt.Errorf("zone network is not set or is empty") } @@ -273,14 +272,19 @@ func (v *Validator) validateMachineConfig(ctx context.Context, datacenterConfig } for _, az := range localAvailabilityZones { - if err := v.cmk.ValidateTemplatePresent(ctx, az.CredentialsRef, az.DomainId, az.CloudStackAvailabilityZone.Zone.Id, az.Account, machineConfig.Spec.Template); err != nil { + zoneId, err := v.cmk.ValidateZoneAndGetId(ctx, az.CredentialsRef, az.CloudStackAvailabilityZone.Zone) + if err != nil { + return err + } + + if err := v.cmk.ValidateTemplatePresent(ctx, az.CredentialsRef, az.DomainId, zoneId, az.Account, machineConfig.Spec.Template); err != nil { return fmt.Errorf("validating template: %v", err) } - if err := v.cmk.ValidateServiceOfferingPresent(ctx, az.CredentialsRef, az.CloudStackAvailabilityZone.Zone.Id, machineConfig.Spec.ComputeOffering); err != nil { + if err := v.cmk.ValidateServiceOfferingPresent(ctx, az.CredentialsRef, zoneId, machineConfig.Spec.ComputeOffering); err != nil { return fmt.Errorf("validating service offering: %v", err) } if len(machineConfig.Spec.DiskOffering.Id) > 0 || len(machineConfig.Spec.DiskOffering.Name) > 0 { - if err := v.cmk.ValidateDiskOfferingPresent(ctx, az.CredentialsRef, az.CloudStackAvailabilityZone.Zone.Id, machineConfig.Spec.DiskOffering); err != nil { + if err := v.cmk.ValidateDiskOfferingPresent(ctx, az.CredentialsRef, zoneId, machineConfig.Spec.DiskOffering); err != nil { return fmt.Errorf("validating disk offering: %v", err) } } diff --git a/pkg/providers/cloudstack/validator_test.go b/pkg/providers/cloudstack/validator_test.go index e22b0e943f1d..1f393028eb1c 100644 --- a/pkg/providers/cloudstack/validator_test.go +++ b/pkg/providers/cloudstack/validator_test.go @@ -866,6 +866,7 @@ func TestValidateCloudStackMachineConfig(t *testing.T) { } validator := NewValidator(cmk) + cmk.EXPECT().ValidateZoneAndGetId(ctx, gomock.Any(), gomock.Any()).Times(3).Return("4e3b338d-87a6-4189-b931-a1747edeea82", nil) cmk.EXPECT().ValidateTemplatePresent(ctx, gomock.Any(), gomock.Any(), gomock.Any(), datacenterConfig.Spec.Account, testTemplate).Times(3) cmk.EXPECT().ValidateServiceOfferingPresent(ctx, gomock.Any(), gomock.Any(), testOffering).Times(3) From 2ed1d16e682740fd8eac3e2a50f88e0228b45eac Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Tue, 5 Jul 2022 17:13:55 -0500 Subject: [PATCH 20/22] Add error message when validating management api endpoint fails --- pkg/providers/cloudstack/cloudstack.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/providers/cloudstack/cloudstack.go b/pkg/providers/cloudstack/cloudstack.go index 732c292db03a..f80625da63b9 100644 --- a/pkg/providers/cloudstack/cloudstack.go +++ b/pkg/providers/cloudstack/cloudstack.go @@ -354,8 +354,8 @@ func (p *cloudstackProvider) validateEnv(ctx context.Context) error { for _, instance := range execConfig.Profiles { if err := p.validateManagementApiEndpoint(instance.ManagementUrl); err != nil { - return fmt.Errorf("CloudStack instance %s's managementApiEndpoint %s is invalid", - instance.Name, instance.ManagementUrl) + return fmt.Errorf("CloudStack instance %s's managementApiEndpoint %s is invalid: %v", + instance.Name, instance.ManagementUrl, err) } } From f6259ddc768a9a3115dcf67aa7386f6b4e2a1ccb Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Tue, 5 Jul 2022 18:23:53 -0500 Subject: [PATCH 21/22] Make MarkPass message more readable --- pkg/providers/cloudstack/validator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/providers/cloudstack/validator.go b/pkg/providers/cloudstack/validator.go index 5db1e24892d8..511b158e6a9f 100644 --- a/pkg/providers/cloudstack/validator.go +++ b/pkg/providers/cloudstack/validator.go @@ -69,7 +69,7 @@ func (v *Validator) validateCloudStackAccess(ctx context.Context, datacenterConf } } - logger.MarkPass("Connected to", "servers", refNamesToCheck) + logger.MarkPass(fmt.Sprintf("Connected to servers: %s", strings.Join(refNamesToCheck, ", "))) return nil } From 222889c7d1590b32354f7bf48b37f1ca5293e59d Mon Sep 17 00:00:00 2001 From: Wonkun Kim Date: Wed, 6 Jul 2022 10:34:38 -0500 Subject: [PATCH 22/22] Fix presubmit job failure --- .../test/testdata/main-bundle-release.yaml | 122 +++++++++--------- 1 file changed, 61 insertions(+), 61 deletions(-) diff --git a/release/pkg/test/testdata/main-bundle-release.yaml b/release/pkg/test/testdata/main-bundle-release.yaml index 14e78d8da0ad..186589b5d923 100644 --- a/release/pkg/test/testdata/main-bundle-release.yaml +++ b/release/pkg/test/testdata/main-bundle-release.yaml @@ -71,7 +71,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-20-17-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-20-18-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -242,23 +242,23 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.20.15-eks-d-1-20-17-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.20.15-eks-d-1-20-18-eks-a-v0.0.0-dev-build.1 kubeVersion: v1.20.15 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-20/kubernetes-1-20-eks-17.yaml - name: kubernetes-1-20-eks-17 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-20/kubernetes-1-20-eks-18.yaml + name: kubernetes-1-20-eks-18 ova: bottlerocket: arch: - amd64 crictl: {} - description: Bottlerocket Ova image for EKS-D 1-20-17 release + description: Bottlerocket Ova image for EKS-D 1-20-18 release etcdadm: {} - name: bottlerocket-v1.20.15-eks-d-1-20-17-eks-a-v0.0.0-dev-build.0-amd64.ova + name: bottlerocket-v1.20.15-eks-d-1-20-18-eks-a-v0.0.0-dev-build.0-amd64.ova os: linux osName: bottlerocket sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/ova/1-20/1-20-17/bottlerocket-v1.20.15-eks-d-1-20-17-eks-a-v0.0.0-dev-build.0-amd64.ova + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/ova/1-20/1-20-18/bottlerocket-v1.20.15-eks-d-1-20-18-eks-a-v0.0.0-dev-build.0-amd64.ova ubuntu: arch: - amd64 @@ -271,7 +271,7 @@ spec: sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.20.0/cri-tools-v0.0.0-dev+build.0-linux-amd64.tar.gz - description: Ubuntu Ova image for EKS-D 1-20-17 release + description: Ubuntu Ova image for EKS-D 1-20-18 release etcdadm: arch: - amd64 @@ -281,12 +281,12 @@ spec: sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm/5b496a72af3d80d64a16a650c85ce9a5882bc014/etcdadm-v0.0.0-dev+build.0-linux-amd64.tar.gz - name: ubuntu-v1.20.15-eks-d-1-20-17-eks-a-v0.0.0-dev-build.0-amd64.ova + name: ubuntu-v1.20.15-eks-d-1-20-18-eks-a-v0.0.0-dev-build.0-amd64.ova os: linux osName: ubuntu sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/ova/1-20/1-20-17/ubuntu-v1.20.15-eks-d-1-20-17-eks-a-v0.0.0-dev-build.0-amd64.ova + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/ova/1-20/1-20-18/ubuntu-v1.20.15-eks-d-1-20-18-eks-a-v0.0.0-dev-build.0-amd64.ova raw: bottlerocket: crictl: {} @@ -303,7 +303,7 @@ spec: sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.20.0/cri-tools-v0.0.0-dev+build.0-linux-amd64.tar.gz - description: Ubuntu Raw image for EKS-D 1-20-17 release + description: Ubuntu Raw image for EKS-D 1-20-18 release etcdadm: arch: - amd64 @@ -313,12 +313,12 @@ spec: sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm/5b496a72af3d80d64a16a650c85ce9a5882bc014/etcdadm-v0.0.0-dev+build.0-linux-amd64.tar.gz - name: ubuntu-v1.20.15-eks-d-1-20-17-eks-a-v0.0.0-dev-build.0-amd64.gz + name: ubuntu-v1.20.15-eks-d-1-20-18-eks-a-v0.0.0-dev-build.0-amd64.gz os: linux osName: ubuntu sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/raw/1-20/1-20-17/ubuntu-v1.20.15-eks-d-1-20-17-eks-a-v0.0.0-dev-build.0-amd64.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/raw/1-20/1-20-18/ubuntu-v1.20.15-eks-d-1-20-18-eks-a-v0.0.0-dev-build.0-amd64.gz eksa: cliTools: arch: @@ -781,7 +781,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-21-15-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-21-16-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -952,23 +952,23 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.21.13-eks-d-1-21-15-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.21.13-eks-d-1-21-16-eks-a-v0.0.0-dev-build.1 kubeVersion: v1.21.13 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-21/kubernetes-1-21-eks-15.yaml - name: kubernetes-1-21-eks-15 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-21/kubernetes-1-21-eks-16.yaml + name: kubernetes-1-21-eks-16 ova: bottlerocket: arch: - amd64 crictl: {} - description: Bottlerocket Ova image for EKS-D 1-21-15 release + description: Bottlerocket Ova image for EKS-D 1-21-16 release etcdadm: {} - name: bottlerocket-v1.21.13-eks-d-1-21-15-eks-a-v0.0.0-dev-build.0-amd64.ova + name: bottlerocket-v1.21.13-eks-d-1-21-16-eks-a-v0.0.0-dev-build.0-amd64.ova os: linux osName: bottlerocket sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/ova/1-21/1-21-15/bottlerocket-v1.21.13-eks-d-1-21-15-eks-a-v0.0.0-dev-build.0-amd64.ova + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/ova/1-21/1-21-16/bottlerocket-v1.21.13-eks-d-1-21-16-eks-a-v0.0.0-dev-build.0-amd64.ova ubuntu: arch: - amd64 @@ -981,7 +981,7 @@ spec: sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.20.0/cri-tools-v0.0.0-dev+build.0-linux-amd64.tar.gz - description: Ubuntu Ova image for EKS-D 1-21-15 release + description: Ubuntu Ova image for EKS-D 1-21-16 release etcdadm: arch: - amd64 @@ -991,25 +991,25 @@ spec: sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm/5b496a72af3d80d64a16a650c85ce9a5882bc014/etcdadm-v0.0.0-dev+build.0-linux-amd64.tar.gz - name: ubuntu-v1.21.13-eks-d-1-21-15-eks-a-v0.0.0-dev-build.0-amd64.ova + name: ubuntu-v1.21.13-eks-d-1-21-16-eks-a-v0.0.0-dev-build.0-amd64.ova os: linux osName: ubuntu sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/ova/1-21/1-21-15/ubuntu-v1.21.13-eks-d-1-21-15-eks-a-v0.0.0-dev-build.0-amd64.ova + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/ova/1-21/1-21-16/ubuntu-v1.21.13-eks-d-1-21-16-eks-a-v0.0.0-dev-build.0-amd64.ova raw: bottlerocket: arch: - amd64 crictl: {} - description: Bottlerocket Raw image for EKS-D 1-21-15 release + description: Bottlerocket Raw image for EKS-D 1-21-16 release etcdadm: {} - name: bottlerocket-v1.21.13-eks-d-1-21-15-eks-a-v0.0.0-dev-build.0-amd64.img.gz + name: bottlerocket-v1.21.13-eks-d-1-21-16-eks-a-v0.0.0-dev-build.0-amd64.img.gz os: linux osName: bottlerocket sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/raw/1-21/1-21-15/bottlerocket-v1.21.13-eks-d-1-21-15-eks-a-v0.0.0-dev-build.0-amd64.img.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/raw/1-21/1-21-16/bottlerocket-v1.21.13-eks-d-1-21-16-eks-a-v0.0.0-dev-build.0-amd64.img.gz ubuntu: arch: - amd64 @@ -1022,7 +1022,7 @@ spec: sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.20.0/cri-tools-v0.0.0-dev+build.0-linux-amd64.tar.gz - description: Ubuntu Raw image for EKS-D 1-21-15 release + description: Ubuntu Raw image for EKS-D 1-21-16 release etcdadm: arch: - amd64 @@ -1032,12 +1032,12 @@ spec: sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm/5b496a72af3d80d64a16a650c85ce9a5882bc014/etcdadm-v0.0.0-dev+build.0-linux-amd64.tar.gz - name: ubuntu-v1.21.13-eks-d-1-21-15-eks-a-v0.0.0-dev-build.0-amd64.gz + name: ubuntu-v1.21.13-eks-d-1-21-16-eks-a-v0.0.0-dev-build.0-amd64.gz os: linux osName: ubuntu sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/raw/1-21/1-21-15/ubuntu-v1.21.13-eks-d-1-21-15-eks-a-v0.0.0-dev-build.0-amd64.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/raw/1-21/1-21-16/ubuntu-v1.21.13-eks-d-1-21-16-eks-a-v0.0.0-dev-build.0-amd64.gz eksa: cliTools: arch: @@ -1500,7 +1500,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-22-8-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-22-9-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -1671,23 +1671,23 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.22.10-eks-d-1-22-8-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.22.10-eks-d-1-22-9-eks-a-v0.0.0-dev-build.1 kubeVersion: v1.22.10 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-22/kubernetes-1-22-eks-8.yaml - name: kubernetes-1-22-eks-8 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-22/kubernetes-1-22-eks-9.yaml + name: kubernetes-1-22-eks-9 ova: bottlerocket: arch: - amd64 crictl: {} - description: Bottlerocket Ova image for EKS-D 1-22-8 release + description: Bottlerocket Ova image for EKS-D 1-22-9 release etcdadm: {} - name: bottlerocket-v1.22.10-eks-d-1-22-8-eks-a-v0.0.0-dev-build.0-amd64.ova + name: bottlerocket-v1.22.10-eks-d-1-22-9-eks-a-v0.0.0-dev-build.0-amd64.ova os: linux osName: bottlerocket sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/ova/1-22/1-22-8/bottlerocket-v1.22.10-eks-d-1-22-8-eks-a-v0.0.0-dev-build.0-amd64.ova + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/ova/1-22/1-22-9/bottlerocket-v1.22.10-eks-d-1-22-9-eks-a-v0.0.0-dev-build.0-amd64.ova ubuntu: arch: - amd64 @@ -1700,7 +1700,7 @@ spec: sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.20.0/cri-tools-v0.0.0-dev+build.0-linux-amd64.tar.gz - description: Ubuntu Ova image for EKS-D 1-22-8 release + description: Ubuntu Ova image for EKS-D 1-22-9 release etcdadm: arch: - amd64 @@ -1710,25 +1710,25 @@ spec: sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm/5b496a72af3d80d64a16a650c85ce9a5882bc014/etcdadm-v0.0.0-dev+build.0-linux-amd64.tar.gz - name: ubuntu-v1.22.10-eks-d-1-22-8-eks-a-v0.0.0-dev-build.0-amd64.ova + name: ubuntu-v1.22.10-eks-d-1-22-9-eks-a-v0.0.0-dev-build.0-amd64.ova os: linux osName: ubuntu sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/ova/1-22/1-22-8/ubuntu-v1.22.10-eks-d-1-22-8-eks-a-v0.0.0-dev-build.0-amd64.ova + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/ova/1-22/1-22-9/ubuntu-v1.22.10-eks-d-1-22-9-eks-a-v0.0.0-dev-build.0-amd64.ova raw: bottlerocket: arch: - amd64 crictl: {} - description: Bottlerocket Raw image for EKS-D 1-22-8 release + description: Bottlerocket Raw image for EKS-D 1-22-9 release etcdadm: {} - name: bottlerocket-v1.22.10-eks-d-1-22-8-eks-a-v0.0.0-dev-build.0-amd64.img.gz + name: bottlerocket-v1.22.10-eks-d-1-22-9-eks-a-v0.0.0-dev-build.0-amd64.img.gz os: linux osName: bottlerocket sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/raw/1-22/1-22-8/bottlerocket-v1.22.10-eks-d-1-22-8-eks-a-v0.0.0-dev-build.0-amd64.img.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/raw/1-22/1-22-9/bottlerocket-v1.22.10-eks-d-1-22-9-eks-a-v0.0.0-dev-build.0-amd64.img.gz ubuntu: arch: - amd64 @@ -1741,7 +1741,7 @@ spec: sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.20.0/cri-tools-v0.0.0-dev+build.0-linux-amd64.tar.gz - description: Ubuntu Raw image for EKS-D 1-22-8 release + description: Ubuntu Raw image for EKS-D 1-22-9 release etcdadm: arch: - amd64 @@ -1751,12 +1751,12 @@ spec: sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm/5b496a72af3d80d64a16a650c85ce9a5882bc014/etcdadm-v0.0.0-dev+build.0-linux-amd64.tar.gz - name: ubuntu-v1.22.10-eks-d-1-22-8-eks-a-v0.0.0-dev-build.0-amd64.gz + name: ubuntu-v1.22.10-eks-d-1-22-9-eks-a-v0.0.0-dev-build.0-amd64.gz os: linux osName: ubuntu sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/raw/1-22/1-22-8/ubuntu-v1.22.10-eks-d-1-22-8-eks-a-v0.0.0-dev-build.0-amd64.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/raw/1-22/1-22-9/ubuntu-v1.22.10-eks-d-1-22-9-eks-a-v0.0.0-dev-build.0-amd64.gz eksa: cliTools: arch: @@ -2219,7 +2219,7 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: bottlerocket-bootstrap os: linux - uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-23-2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/bottlerocket-bootstrap:v1-23-3-eks-a-v0.0.0-dev-build.1 certManager: acmesolver: arch: @@ -2390,23 +2390,23 @@ spec: imageDigest: sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef name: kind-node os: linux - uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.23.7-eks-d-1-23-2-eks-a-v0.0.0-dev-build.1 + uri: public.ecr.aws/release-container-registry/kubernetes-sigs/kind/node:v1.23.7-eks-d-1-23-3-eks-a-v0.0.0-dev-build.1 kubeVersion: v1.23.7 - manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-23/kubernetes-1-23-eks-2.yaml - name: kubernetes-1-23-eks-2 + manifestUrl: https://distro.eks.amazonaws.com/kubernetes-1-23/kubernetes-1-23-eks-3.yaml + name: kubernetes-1-23-eks-3 ova: bottlerocket: arch: - amd64 crictl: {} - description: Bottlerocket Ova image for EKS-D 1-23-2 release + description: Bottlerocket Ova image for EKS-D 1-23-3 release etcdadm: {} - name: bottlerocket-v1.23.7-eks-d-1-23-2-eks-a-v0.0.0-dev-build.0-amd64.ova + name: bottlerocket-v1.23.7-eks-d-1-23-3-eks-a-v0.0.0-dev-build.0-amd64.ova os: linux osName: bottlerocket sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/ova/1-23/1-23-2/bottlerocket-v1.23.7-eks-d-1-23-2-eks-a-v0.0.0-dev-build.0-amd64.ova + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/ova/1-23/1-23-3/bottlerocket-v1.23.7-eks-d-1-23-3-eks-a-v0.0.0-dev-build.0-amd64.ova ubuntu: arch: - amd64 @@ -2419,7 +2419,7 @@ spec: sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.20.0/cri-tools-v0.0.0-dev+build.0-linux-amd64.tar.gz - description: Ubuntu Ova image for EKS-D 1-23-2 release + description: Ubuntu Ova image for EKS-D 1-23-3 release etcdadm: arch: - amd64 @@ -2429,25 +2429,25 @@ spec: sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm/5b496a72af3d80d64a16a650c85ce9a5882bc014/etcdadm-v0.0.0-dev+build.0-linux-amd64.tar.gz - name: ubuntu-v1.23.7-eks-d-1-23-2-eks-a-v0.0.0-dev-build.0-amd64.ova + name: ubuntu-v1.23.7-eks-d-1-23-3-eks-a-v0.0.0-dev-build.0-amd64.ova os: linux osName: ubuntu sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/ova/1-23/1-23-2/ubuntu-v1.23.7-eks-d-1-23-2-eks-a-v0.0.0-dev-build.0-amd64.ova + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/ova/1-23/1-23-3/ubuntu-v1.23.7-eks-d-1-23-3-eks-a-v0.0.0-dev-build.0-amd64.ova raw: bottlerocket: arch: - amd64 crictl: {} - description: Bottlerocket Raw image for EKS-D 1-23-2 release + description: Bottlerocket Raw image for EKS-D 1-23-3 release etcdadm: {} - name: bottlerocket-v1.23.7-eks-d-1-23-2-eks-a-v0.0.0-dev-build.0-amd64.img.gz + name: bottlerocket-v1.23.7-eks-d-1-23-3-eks-a-v0.0.0-dev-build.0-amd64.img.gz os: linux osName: bottlerocket sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/raw/1-23/1-23-2/bottlerocket-v1.23.7-eks-d-1-23-2-eks-a-v0.0.0-dev-build.0-amd64.img.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/raw/1-23/1-23-3/bottlerocket-v1.23.7-eks-d-1-23-3-eks-a-v0.0.0-dev-build.0-amd64.img.gz ubuntu: arch: - amd64 @@ -2460,7 +2460,7 @@ spec: sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/cri-tools/v1.20.0/cri-tools-v0.0.0-dev+build.0-linux-amd64.tar.gz - description: Ubuntu Raw image for EKS-D 1-23-2 release + description: Ubuntu Raw image for EKS-D 1-23-3 release etcdadm: arch: - amd64 @@ -2470,12 +2470,12 @@ spec: sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/etcdadm/5b496a72af3d80d64a16a650c85ce9a5882bc014/etcdadm-v0.0.0-dev+build.0-linux-amd64.tar.gz - name: ubuntu-v1.23.7-eks-d-1-23-2-eks-a-v0.0.0-dev-build.0-amd64.gz + name: ubuntu-v1.23.7-eks-d-1-23-3-eks-a-v0.0.0-dev-build.0-amd64.gz os: linux osName: ubuntu sha256: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef sha512: 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef - uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/raw/1-23/1-23-2/ubuntu-v1.23.7-eks-d-1-23-2-eks-a-v0.0.0-dev-build.0-amd64.gz + uri: https://release-bucket/artifacts/v0.0.0-dev-build.0/eks-distro/raw/1-23/1-23-3/ubuntu-v1.23.7-eks-d-1-23-3-eks-a-v0.0.0-dev-build.0-amd64.gz eksa: cliTools: arch: