From f487cc32cccd448c6cc5831fc0ce4762d03dc949 Mon Sep 17 00:00:00 2001 From: Giacomo Consonni Date: Fri, 26 Jan 2024 18:56:44 +0100 Subject: [PATCH 1/4] f: multi_az property is now available for redshift cluster --- .changelog/35508.txt | 7 ++ internal/service/redshift/cluster.go | 81 ++++++++++++++++- .../service/redshift/cluster_data_source.go | 9 ++ .../redshift/cluster_data_source_test.go | 71 +++++++++++++++ internal/service/redshift/cluster_test.go | 89 +++++++++++++++++++ website/docs/d/redshift_cluster.html.markdown | 1 + website/docs/r/redshift_cluster.html.markdown | 1 + 7 files changed, 255 insertions(+), 4 deletions(-) create mode 100644 .changelog/35508.txt diff --git a/.changelog/35508.txt b/.changelog/35508.txt new file mode 100644 index 00000000000..a785414f386 --- /dev/null +++ b/.changelog/35508.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_redshift_cluster: Add multi_az argument +``` + +```release-note:enhancement +data/aws_redshift_cluster: Add multi_az argument +``` diff --git a/internal/service/redshift/cluster.go b/internal/service/redshift/cluster.go index 08abe639646..404faa01ade 100644 --- a/internal/service/redshift/cluster.go +++ b/internal/service/redshift/cluster.go @@ -85,6 +85,7 @@ func ResourceCluster() *schema.Resource { "availability_zone_relocation_enabled": { Type: schema.TypeBool, Optional: true, + Default: false, }, "cluster_identifier": { Type: schema.TypeString, @@ -299,6 +300,11 @@ func ResourceCluster() *schema.Resource { validation.StringMatch(regexache.MustCompile(`(?i)^[a-z_]`), "first character must be a letter"), ), }, + "multi_az": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, "node_type": { Type: schema.TypeString, Required: true, @@ -447,9 +453,19 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int input.AvailabilityZone = aws.String(v.(string)) } - if v, ok := d.GetOk("availability_zone_relocation_enabled"); ok { - backupInput.AvailabilityZoneRelocation = aws.Bool(v.(bool)) - input.AvailabilityZoneRelocation = aws.Bool(v.(bool)) + azRelocation, azRelocationOk := d.GetOk("availability_zone_relocation_enabled") + multiAZ, multiAZOk := d.GetOk("multi_az") + if azRelocationOk && multiAZOk && azRelocation.(bool) && azRelocation.(bool) == multiAZ.(bool) { + return sdkdiag.AppendErrorf(diags, "availability_zone_relocation_enabled and multi_az can be both true") + } + if azRelocationOk { + backupInput.AvailabilityZoneRelocation = aws.Bool(azRelocation.(bool)) + input.AvailabilityZoneRelocation = aws.Bool(azRelocation.(bool)) + } + + if multiAZOk { + backupInput.MultiAZ = aws.Bool(multiAZ.(bool)) + input.MultiAZ = aws.Bool(multiAZ.(bool)) } if v, ok := d.GetOk("cluster_parameter_group_name"); ok { @@ -680,6 +696,11 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter d.Set("master_username", rsc.MasterUsername) d.Set("master_password_secret_arn", rsc.MasterPasswordSecretArn) d.Set("master_password_secret_kms_key_id", rsc.MasterPasswordSecretKmsKeyId) + maz, err := clusterMultiAZStatus(rsc) + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading Redshift Cluster (%s): %s", d.Id(), err) + } + d.Set("multi_az", maz) d.Set("node_type", rsc.NodeType) d.Set("number_of_nodes", rsc.NumberOfNodes) d.Set("preferred_maintenance_window", rsc.PreferredMaintenanceWindow) @@ -726,7 +747,7 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int var diags diag.Diagnostics conn := meta.(*conns.AWSClient).RedshiftConn(ctx) - if d.HasChangesExcept("aqua_configuration_status", "availability_zone", "iam_roles", "logging", "snapshot_copy", "tags", "tags_all") { + if d.HasChangesExcept("aqua_configuration_status", "availability_zone", "iam_roles", "multi_az", "logging", "snapshot_copy", "tags", "tags_all") { input := &redshift.ModifyClusterInput{ ClusterIdentifier: aws.String(d.Id()), } @@ -942,6 +963,46 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int } } + if d.HasChange("multi_az") { + multiAZEnabled := d.Get("multi_az").(bool) + azRelocationEnabled := d.Get("availability_zone_relocation_enabled").(bool) + if multiAZEnabled && azRelocationEnabled { + return sdkdiag.AppendErrorf(diags, "modifying Redshift Cluster (%s): availability_zone_relocation_enabled and multi_az can't be both true", d.Id()) + } + input := &redshift.ModifyClusterInput{ + MultiAZ: aws.Bool(azRelocationEnabled), + ClusterIdentifier: aws.String(d.Id()), + } + + log.Printf("[DEBUG] Changing cluster multi AZ configuration: %s", input) + _, err := conn.ModifyClusterWithContext(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "Changing cluster multi AZ configuration (%s): %s", d.Id(), err) + } + + if _, err = waitClusterUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for Redshift Cluster (%s) update: %s", d.Id(), err) + } + if !multiAZEnabled { + // Disabling MultiAZ, Redshift automatically enables the AZ Relocation. For that reason is necessary to align it with the current configuration + input = &redshift.ModifyClusterInput{ + AvailabilityZoneRelocation: aws.Bool(azRelocationEnabled), + ClusterIdentifier: aws.String(d.Id()), + } + log.Printf("[DEBUG] Changing cluster availability zone relocation configuration: %s\n", input) + _, err = conn.ModifyClusterWithContext(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "Changing cluster multi AZ configuration (%s): %s", d.Id(), err) + } + + if _, err = waitClusterUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for Redshift Cluster (%s) update: %s", d.Id(), err) + } + } + } + return append(diags, resourceClusterRead(ctx, d, meta)...) } @@ -1135,3 +1196,15 @@ func clusterAvailabilityZoneRelocationStatus(cluster *redshift.Cluster) (bool, e return false, fmt.Errorf("unexpected AvailabilityZoneRelocationStatus value %q returned by API", availabilityZoneRelocationStatus) } } + +func clusterMultiAZStatus(cluster *redshift.Cluster) (bool, error) { + // MultiAZ is returned as string from the API but is implemented as bool to keep consistency with other parameters. + switch multiAZStatus := aws.StringValue(cluster.MultiAZ); strings.ToLower(multiAZStatus) { + case "enabled": + return true, nil + case "disabled": + return false, nil + default: + return false, fmt.Errorf("unexpected MultiAZ value %q returned by API", multiAZStatus) + } +} diff --git a/internal/service/redshift/cluster_data_source.go b/internal/service/redshift/cluster_data_source.go index eac3eabc2fb..e4b54daac69 100644 --- a/internal/service/redshift/cluster_data_source.go +++ b/internal/service/redshift/cluster_data_source.go @@ -153,6 +153,10 @@ func DataSourceCluster() *schema.Resource { Type: schema.TypeInt, Computed: true, }, + "multi_az": { + Type: schema.TypeBool, + Computed: true, + }, "node_type": { Type: schema.TypeString, Computed: true, @@ -268,6 +272,11 @@ func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta int d.Set("kms_key_id", rsc.KmsKeyId) d.Set("master_username", rsc.MasterUsername) + maz, err := clusterMultiAZStatus(rsc) + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading Redshift Cluster (%s): %s", clusterID, err) + } + d.Set("multi_az", maz) d.Set("node_type", rsc.NodeType) d.Set("number_of_nodes", rsc.NumberOfNodes) d.Set("preferred_maintenance_window", rsc.PreferredMaintenanceWindow) diff --git a/internal/service/redshift/cluster_data_source_test.go b/internal/service/redshift/cluster_data_source_test.go index 7eb7f2fe314..cd4e61bc16a 100644 --- a/internal/service/redshift/cluster_data_source_test.go +++ b/internal/service/redshift/cluster_data_source_test.go @@ -43,6 +43,7 @@ func TestAccRedshiftClusterDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrSet(dataSourceName, "encrypted"), resource.TestCheckResourceAttrSet(dataSourceName, "endpoint"), resource.TestCheckResourceAttrSet(dataSourceName, "master_username"), + resource.TestCheckResourceAttrSet(dataSourceName, "multi_az"), resource.TestCheckResourceAttrSet(dataSourceName, "node_type"), resource.TestCheckResourceAttrSet(dataSourceName, "number_of_nodes"), resource.TestCheckResourceAttrSet(dataSourceName, "port"), @@ -126,6 +127,27 @@ func TestAccRedshiftClusterDataSource_availabilityZoneRelocationEnabled(t *testi }) } +func TestAccRedshiftClusterDataSource_multiAZEnabled(t *testing.T) { + ctx := acctest.Context(t) + dataSourceName := "data.aws_redshift_cluster.test" + resourceName := "aws_redshift_cluster.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, redshift.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccClusterDataSourceConfig_multiAZEnabled(rName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "multi_az", resourceName, "multi_az"), + ), + }, + }, + }) +} + func testAccClusterDataSourceConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_redshift_cluster" "test" { @@ -261,3 +283,52 @@ data "aws_redshift_cluster" "test" { } `, rName) } + +func testAccClusterDataSourceConfig_multiAZEnabled(rName string) string { + return fmt.Sprintf(` +resource "aws_kms_key" "test" { + description = %[1]q + + policy = < Date: Mon, 29 Jan 2024 15:36:21 -0500 Subject: [PATCH 2/4] Tweak CHANGELOG entries. --- .changelog/35508.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.changelog/35508.txt b/.changelog/35508.txt index a785414f386..f9435d6155e 100644 --- a/.changelog/35508.txt +++ b/.changelog/35508.txt @@ -1,7 +1,7 @@ ```release-note:enhancement -resource/aws_redshift_cluster: Add multi_az argument +resource/aws_redshift_cluster: Add `multi_az` argument ``` ```release-note:enhancement -data/aws_redshift_cluster: Add multi_az argument +data/aws_redshift_cluster: Add `multi_az` attribute ``` From d253db93cecc763973962645311a1b3a14dcca85 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 29 Jan 2024 15:36:45 -0500 Subject: [PATCH 3/4] d/aws_redshift_cluster: Cosmetics. --- .../service/redshift/cluster_data_source.go | 53 ++++++++----------- .../service/redshift/service_package_gen.go | 1 + website/docs/d/redshift_cluster.html.markdown | 2 +- website/docs/r/redshift_cluster.html.markdown | 2 +- 4 files changed, 25 insertions(+), 33 deletions(-) diff --git a/internal/service/redshift/cluster_data_source.go b/internal/service/redshift/cluster_data_source.go index e4b54daac69..bb5e5bcc694 100644 --- a/internal/service/redshift/cluster_data_source.go +++ b/internal/service/redshift/cluster_data_source.go @@ -14,11 +14,11 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" - "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" ) -// @SDKDataSource("aws_redshift_cluster") +// @SDKDataSource("aws_redshift_cluster", name="Cluster") func DataSourceCluster() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceClusterRead, @@ -231,11 +231,11 @@ func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta int d.Set("aqua_configuration_status", rsc.AquaConfiguration.AquaConfigurationStatus) } d.Set("availability_zone", rsc.AvailabilityZone) - azr, err := clusterAvailabilityZoneRelocationStatus(rsc) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading Redshift Cluster (%s): %s", clusterID, err) + if v, err := clusterAvailabilityZoneRelocationStatus(rsc); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } else { + d.Set("availability_zone_relocation_enabled", v) } - d.Set("availability_zone_relocation_enabled", azr) d.Set("cluster_identifier", rsc.ClusterIdentifier) d.Set("cluster_namespace_arn", rsc.ClusterNamespaceArn) if err := d.Set("cluster_nodes", flattenClusterNodes(rsc.ClusterNodes)); err != nil { @@ -254,6 +254,7 @@ func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta int } d.Set("cluster_version", rsc.ClusterVersion) d.Set("database_name", rsc.DBName) + d.Set("default_iam_role_arn", rsc.DefaultIamRoleArn) if rsc.ElasticIpStatus != nil { d.Set("elastic_ip", rsc.ElasticIpStatus.ElasticIp) } @@ -263,39 +264,29 @@ func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta int d.Set("port", rsc.Endpoint.Port) } d.Set("enhanced_vpc_routing", rsc.EnhancedVpcRouting) - - var iamRoles []string - for _, i := range rsc.IamRoles { - iamRoles = append(iamRoles, aws.StringValue(i.IamRoleArn)) - } - d.Set("iam_roles", iamRoles) - + d.Set("iam_roles", tfslices.ApplyToAll(rsc.IamRoles, func(v *redshift.ClusterIamRole) string { + return aws.StringValue(v.IamRoleArn) + })) d.Set("kms_key_id", rsc.KmsKeyId) + d.Set("maintenance_track_name", rsc.MaintenanceTrackName) + d.Set("manual_snapshot_retention_period", rsc.ManualSnapshotRetentionPeriod) d.Set("master_username", rsc.MasterUsername) - maz, err := clusterMultiAZStatus(rsc) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading Redshift Cluster (%s): %s", clusterID, err) + if v, err := clusterMultiAZStatus(rsc); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } else { + d.Set("multi_az", v) } - d.Set("multi_az", maz) d.Set("node_type", rsc.NodeType) d.Set("number_of_nodes", rsc.NumberOfNodes) d.Set("preferred_maintenance_window", rsc.PreferredMaintenanceWindow) d.Set("publicly_accessible", rsc.PubliclyAccessible) - d.Set("default_iam_role_arn", rsc.DefaultIamRoleArn) - d.Set("maintenance_track_name", rsc.MaintenanceTrackName) - d.Set("manual_snapshot_retention_period", rsc.ManualSnapshotRetentionPeriod) - if err := d.Set("tags", KeyValueTags(ctx, rsc.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) } - d.Set("vpc_id", rsc.VpcId) - - var vpcg []string - for _, g := range rsc.VpcSecurityGroups { - vpcg = append(vpcg, aws.StringValue(g.VpcSecurityGroupId)) - } - d.Set("vpc_security_group_ids", vpcg) + d.Set("vpc_security_group_ids", tfslices.ApplyToAll(rsc.VpcSecurityGroups, func(v *redshift.VpcSecurityGroupMembership) string { + return aws.StringValue(v.VpcSecurityGroupId) + })) loggingStatus, err := conn.DescribeLoggingStatusWithContext(ctx, &redshift.DescribeLoggingStatusInput{ ClusterIdentifier: aws.String(clusterID), @@ -306,11 +297,11 @@ func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta int } if loggingStatus != nil && aws.BoolValue(loggingStatus.LoggingEnabled) { - d.Set("enable_logging", loggingStatus.LoggingEnabled) d.Set("bucket_name", loggingStatus.BucketName) - d.Set("s3_key_prefix", loggingStatus.S3KeyPrefix) - d.Set("log_exports", flex.FlattenStringSet(loggingStatus.LogExports)) + d.Set("enable_logging", loggingStatus.LoggingEnabled) d.Set("log_destination_type", loggingStatus.LogDestinationType) + d.Set("log_exports", aws.StringValueSlice(loggingStatus.LogExports)) + d.Set("s3_key_prefix", loggingStatus.S3KeyPrefix) } return diags diff --git a/internal/service/redshift/service_package_gen.go b/internal/service/redshift/service_package_gen.go index cdfd308c9d2..f383b59b1fc 100644 --- a/internal/service/redshift/service_package_gen.go +++ b/internal/service/redshift/service_package_gen.go @@ -28,6 +28,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac { Factory: DataSourceCluster, TypeName: "aws_redshift_cluster", + Name: "Cluster", }, { Factory: DataSourceClusterCredentials, diff --git a/website/docs/d/redshift_cluster.html.markdown b/website/docs/d/redshift_cluster.html.markdown index 0671ac15d81..3d28aba6a5c 100644 --- a/website/docs/d/redshift_cluster.html.markdown +++ b/website/docs/d/redshift_cluster.html.markdown @@ -76,7 +76,7 @@ This data source exports the following attributes in addition to the arguments a * `iam_roles` - IAM roles associated to the cluster * `kms_key_id` - KMS encryption key associated to the cluster * `master_username` - Username for the master DB user -* `multi_az` - If the cluster is a Multi-AZ deployment. +* `multi_az` - If the cluster is a Multi-AZ deployment * `node_type` - Cluster node type * `number_of_nodes` - Number of nodes in the cluster * `maintenance_track_name` - The name of the maintenance track for the restored cluster. diff --git a/website/docs/r/redshift_cluster.html.markdown b/website/docs/r/redshift_cluster.html.markdown index e6c92d5fd9b..4cd8a9325c8 100644 --- a/website/docs/r/redshift_cluster.html.markdown +++ b/website/docs/r/redshift_cluster.html.markdown @@ -67,7 +67,7 @@ This resource supports the following arguments: Password must contain at least 8 characters and contain at least one uppercase letter, one lowercase letter, and one number. * `master_password_secret_kms_key_id` - (Optional) ID of the KMS key used to encrypt the cluster admin credentials secret. * `master_username` - (Required unless a `snapshot_identifier` is provided) Username for the master DB user. -* `multi_az` - (Optional) Specifies if the Redshift cluster is multi-AZ +* `multi_az` - (Optional) Specifies if the Redshift cluster is multi-AZ. * `vpc_security_group_ids` - (Optional) A list of Virtual Private Cloud (VPC) security groups to be associated with the cluster. * `cluster_subnet_group_name` - (Optional) The name of a cluster subnet group to be associated with this cluster. If this parameter is not provided the resulting cluster will be deployed outside virtual private cloud (VPC). * `availability_zone` - (Optional) The EC2 Availability Zone (AZ) in which you want Amazon Redshift to provision the cluster. For example, if you have several EC2 instances running in a specific Availability Zone, then you might want the cluster to be provisioned in the same zone in order to decrease network latency. Can only be changed if `availability_zone_relocation_enabled` is `true`. From 1c67285450177ece00544a7e27ce489c1f791b54 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 29 Jan 2024 16:11:56 -0500 Subject: [PATCH 4/4] r/aws_redshift_cluster: Move availability_zone_relocation_enabled+multi_az validation logic to 'CustomizeDiff'. --- internal/service/redshift/cluster.go | 218 ++++++++++++--------------- 1 file changed, 97 insertions(+), 121 deletions(-) diff --git a/internal/service/redshift/cluster.go b/internal/service/redshift/cluster.go index 404faa01ade..439577585f6 100644 --- a/internal/service/redshift/cluster.go +++ b/internal/service/redshift/cluster.go @@ -5,6 +5,7 @@ package redshift import ( "context" + "errors" "fmt" "log" "strings" @@ -22,6 +23,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -36,6 +38,7 @@ func ResourceCluster() *schema.Resource { ReadWithoutTimeout: resourceClusterRead, UpdateWithoutTimeout: resourceClusterUpdate, DeleteWithoutTimeout: resourceClusterDelete, + Importer: &schema.ResourceImporter{ StateContext: resourceClusterImport, }, @@ -400,16 +403,18 @@ func ResourceCluster() *schema.Resource { CustomizeDiff: customdiff.All( verify.SetTagsDiff, func(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { - if diff.Id() == "" { - return nil - } - if diff.Get("availability_zone_relocation_enabled").(bool) { - return nil + azRelocationEnabled, multiAZ := diff.Get("availability_zone_relocation_enabled").(bool), diff.Get("multi_az").(bool) + + if azRelocationEnabled && multiAZ { + return errors.New("`availability_zone_relocation_enabled` and `multi_az` cannot be both true") } - o, n := diff.GetChange("availability_zone") - if o.(string) != n.(string) { - return fmt.Errorf("cannot change `availability_zone` if `availability_zone_relocation_enabled` is not true") + + if diff.Id() != "" { + if o, n := diff.GetChange("availability_zone"); !azRelocationEnabled && o.(string) != n.(string) { + return errors.New("cannot change `availability_zone` if `availability_zone_relocation_enabled` is not true") + } } + return nil }, ), @@ -421,7 +426,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int conn := meta.(*conns.AWSClient).RedshiftConn(ctx) clusterID := d.Get("cluster_identifier").(string) - backupInput := &redshift.RestoreFromClusterSnapshotInput{ + inputR := &redshift.RestoreFromClusterSnapshotInput{ AllowVersionUpgrade: aws.Bool(d.Get("allow_version_upgrade").(bool)), AutomatedSnapshotRetentionPeriod: aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int))), ClusterIdentifier: aws.String(clusterID), @@ -429,8 +434,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int NodeType: aws.String(d.Get("node_type").(string)), PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), } - - input := &redshift.CreateClusterInput{ + inputC := &redshift.CreateClusterInput{ AllowVersionUpgrade: aws.Bool(d.Get("allow_version_upgrade").(bool)), AutomatedSnapshotRetentionPeriod: aws.Int64(int64(d.Get("automated_snapshot_retention_period").(int))), ClusterIdentifier: aws.String(clusterID), @@ -444,123 +448,118 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int } if v, ok := d.GetOk("aqua_configuration_status"); ok { - backupInput.AquaConfigurationStatus = aws.String(v.(string)) - input.AquaConfigurationStatus = aws.String(v.(string)) + inputR.AquaConfigurationStatus = aws.String(v.(string)) + inputC.AquaConfigurationStatus = aws.String(v.(string)) } if v, ok := d.GetOk("availability_zone"); ok { - backupInput.AvailabilityZone = aws.String(v.(string)) - input.AvailabilityZone = aws.String(v.(string)) + inputR.AvailabilityZone = aws.String(v.(string)) + inputC.AvailabilityZone = aws.String(v.(string)) } - azRelocation, azRelocationOk := d.GetOk("availability_zone_relocation_enabled") - multiAZ, multiAZOk := d.GetOk("multi_az") - if azRelocationOk && multiAZOk && azRelocation.(bool) && azRelocation.(bool) == multiAZ.(bool) { - return sdkdiag.AppendErrorf(diags, "availability_zone_relocation_enabled and multi_az can be both true") - } - if azRelocationOk { - backupInput.AvailabilityZoneRelocation = aws.Bool(azRelocation.(bool)) - input.AvailabilityZoneRelocation = aws.Bool(azRelocation.(bool)) - } - - if multiAZOk { - backupInput.MultiAZ = aws.Bool(multiAZ.(bool)) - input.MultiAZ = aws.Bool(multiAZ.(bool)) + if v, ok := d.GetOk("availability_zone_relocation_enabled"); ok { + inputR.AvailabilityZoneRelocation = aws.Bool(v.(bool)) + inputC.AvailabilityZoneRelocation = aws.Bool(v.(bool)) } if v, ok := d.GetOk("cluster_parameter_group_name"); ok { - backupInput.ClusterParameterGroupName = aws.String(v.(string)) - input.ClusterParameterGroupName = aws.String(v.(string)) + inputR.ClusterParameterGroupName = aws.String(v.(string)) + inputC.ClusterParameterGroupName = aws.String(v.(string)) } if v, ok := d.GetOk("cluster_subnet_group_name"); ok { - backupInput.ClusterSubnetGroupName = aws.String(v.(string)) - input.ClusterSubnetGroupName = aws.String(v.(string)) + inputR.ClusterSubnetGroupName = aws.String(v.(string)) + inputC.ClusterSubnetGroupName = aws.String(v.(string)) } if v, ok := d.GetOk("default_iam_role_arn"); ok { - backupInput.DefaultIamRoleArn = aws.String(v.(string)) - input.DefaultIamRoleArn = aws.String(v.(string)) + inputR.DefaultIamRoleArn = aws.String(v.(string)) + inputC.DefaultIamRoleArn = aws.String(v.(string)) } if v, ok := d.GetOk("elastic_ip"); ok { - backupInput.ElasticIp = aws.String(v.(string)) - input.ElasticIp = aws.String(v.(string)) + inputR.ElasticIp = aws.String(v.(string)) + inputC.ElasticIp = aws.String(v.(string)) } if v, ok := d.GetOk("enhanced_vpc_routing"); ok { - backupInput.EnhancedVpcRouting = aws.Bool(v.(bool)) - input.EnhancedVpcRouting = aws.Bool(v.(bool)) + inputR.EnhancedVpcRouting = aws.Bool(v.(bool)) + inputC.EnhancedVpcRouting = aws.Bool(v.(bool)) } if v, ok := d.GetOk("iam_roles"); ok { - backupInput.IamRoles = flex.ExpandStringSet(v.(*schema.Set)) - input.IamRoles = flex.ExpandStringSet(v.(*schema.Set)) + inputR.IamRoles = flex.ExpandStringSet(v.(*schema.Set)) + inputC.IamRoles = flex.ExpandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("kms_key_id"); ok { - backupInput.KmsKeyId = aws.String(v.(string)) - input.KmsKeyId = aws.String(v.(string)) + inputR.KmsKeyId = aws.String(v.(string)) + inputC.KmsKeyId = aws.String(v.(string)) } if v, ok := d.GetOk("maintenance_track_name"); ok { - backupInput.MaintenanceTrackName = aws.String(v.(string)) - input.MaintenanceTrackName = aws.String(v.(string)) + inputR.MaintenanceTrackName = aws.String(v.(string)) + inputC.MaintenanceTrackName = aws.String(v.(string)) } if v, ok := d.GetOk("manage_master_password"); ok { - backupInput.ManageMasterPassword = aws.Bool(v.(bool)) - input.ManageMasterPassword = aws.Bool(v.(bool)) + inputR.ManageMasterPassword = aws.Bool(v.(bool)) + inputC.ManageMasterPassword = aws.Bool(v.(bool)) } if v, ok := d.GetOk("manual_snapshot_retention_period"); ok { - backupInput.ManualSnapshotRetentionPeriod = aws.Int64(int64(v.(int))) - input.ManualSnapshotRetentionPeriod = aws.Int64(int64(v.(int))) + inputR.ManualSnapshotRetentionPeriod = aws.Int64(int64(v.(int))) + inputC.ManualSnapshotRetentionPeriod = aws.Int64(int64(v.(int))) } if v, ok := d.GetOk("master_password"); ok { - input.MasterUserPassword = aws.String(v.(string)) + inputC.MasterUserPassword = aws.String(v.(string)) } if v, ok := d.GetOk("master_password_secret_kms_key_id"); ok { - backupInput.MasterPasswordSecretKmsKeyId = aws.String(v.(string)) - input.MasterPasswordSecretKmsKeyId = aws.String(v.(string)) + inputR.MasterPasswordSecretKmsKeyId = aws.String(v.(string)) + inputC.MasterPasswordSecretKmsKeyId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("multi_az"); ok { + inputR.MultiAZ = aws.Bool(v.(bool)) + inputC.MultiAZ = aws.Bool(v.(bool)) } if v, ok := d.GetOk("number_of_nodes"); ok { - backupInput.NumberOfNodes = aws.Int64(int64(v.(int))) + inputR.NumberOfNodes = aws.Int64(int64(v.(int))) // NumberOfNodes set below for CreateCluster. } if v, ok := d.GetOk("preferred_maintenance_window"); ok { - backupInput.PreferredMaintenanceWindow = aws.String(v.(string)) - input.PreferredMaintenanceWindow = aws.String(v.(string)) + inputR.PreferredMaintenanceWindow = aws.String(v.(string)) + inputC.PreferredMaintenanceWindow = aws.String(v.(string)) } if v := d.Get("vpc_security_group_ids").(*schema.Set); v.Len() > 0 { - backupInput.VpcSecurityGroupIds = flex.ExpandStringSet(v) - input.VpcSecurityGroupIds = flex.ExpandStringSet(v) + inputR.VpcSecurityGroupIds = flex.ExpandStringSet(v) + inputC.VpcSecurityGroupIds = flex.ExpandStringSet(v) } if v, ok := d.GetOk("snapshot_identifier"); ok { - backupInput.SnapshotIdentifier = aws.String(v.(string)) + inputR.SnapshotIdentifier = aws.String(v.(string)) } if v, ok := d.GetOk("snapshot_arn"); ok { - backupInput.SnapshotArn = aws.String(v.(string)) + inputR.SnapshotArn = aws.String(v.(string)) } - if backupInput.SnapshotArn != nil || backupInput.SnapshotIdentifier != nil { + if inputR.SnapshotArn != nil || inputR.SnapshotIdentifier != nil { if v, ok := d.GetOk("owner_account"); ok { - backupInput.OwnerAccount = aws.String(v.(string)) + inputR.OwnerAccount = aws.String(v.(string)) } if v, ok := d.GetOk("snapshot_cluster_identifier"); ok { - backupInput.SnapshotClusterIdentifier = aws.String(v.(string)) + inputR.SnapshotClusterIdentifier = aws.String(v.(string)) } - log.Printf("[DEBUG] Restoring Redshift Cluster: %s", backupInput) - output, err := conn.RestoreFromClusterSnapshotWithContext(ctx, backupInput) + log.Printf("[DEBUG] Restoring Redshift Cluster: %s", inputR) + output, err := conn.RestoreFromClusterSnapshotWithContext(ctx, inputR) if err != nil { return sdkdiag.AppendErrorf(diags, "restoring Redshift Cluster (%s) from snapshot: %s", clusterID, err) @@ -579,17 +578,17 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int } if v, ok := d.GetOk("encrypted"); ok { - input.Encrypted = aws.Bool(v.(bool)) + inputC.Encrypted = aws.Bool(v.(bool)) } if v := d.Get("number_of_nodes").(int); v > 1 { - input.ClusterType = aws.String(clusterTypeMultiNode) - input.NumberOfNodes = aws.Int64(int64(d.Get("number_of_nodes").(int))) + inputC.ClusterType = aws.String(clusterTypeMultiNode) + inputC.NumberOfNodes = aws.Int64(int64(d.Get("number_of_nodes").(int))) } else { - input.ClusterType = aws.String(clusterTypeSingleNode) + inputC.ClusterType = aws.String(clusterTypeSingleNode) } - output, err := conn.CreateClusterWithContext(ctx, input) + output, err := conn.CreateClusterWithContext(ctx, inputC) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Redshift Cluster (%s): %s", clusterID, err) @@ -663,11 +662,11 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter } d.Set("automated_snapshot_retention_period", rsc.AutomatedSnapshotRetentionPeriod) d.Set("availability_zone", rsc.AvailabilityZone) - azr, err := clusterAvailabilityZoneRelocationStatus(rsc) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading Redshift Cluster (%s): %s", d.Id(), err) + if v, err := clusterAvailabilityZoneRelocationStatus(rsc); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } else { + d.Set("availability_zone_relocation_enabled", v) } - d.Set("availability_zone_relocation_enabled", azr) d.Set("cluster_identifier", rsc.ClusterIdentifier) d.Set("cluster_namespace_arn", rsc.ClusterNamespaceArn) if err := d.Set("cluster_nodes", flattenClusterNodes(rsc.ClusterNodes)); err != nil { @@ -687,6 +686,9 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter d.Set("default_iam_role_arn", rsc.DefaultIamRoleArn) d.Set("encrypted", rsc.Encrypted) d.Set("enhanced_vpc_routing", rsc.EnhancedVpcRouting) + d.Set("iam_roles", tfslices.ApplyToAll(rsc.IamRoles, func(v *redshift.ClusterIamRole) string { + return aws.StringValue(v.IamRoleArn) + })) d.Set("kms_key_id", rsc.KmsKeyId) if err := d.Set("logging", flattenLogging(loggingStatus)); err != nil { return sdkdiag.AppendErrorf(diags, "setting logging: %s", err) @@ -696,11 +698,11 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter d.Set("master_username", rsc.MasterUsername) d.Set("master_password_secret_arn", rsc.MasterPasswordSecretArn) d.Set("master_password_secret_kms_key_id", rsc.MasterPasswordSecretKmsKeyId) - maz, err := clusterMultiAZStatus(rsc) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading Redshift Cluster (%s): %s", d.Id(), err) + if v, err := clusterMultiAZStatus(rsc); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } else { + d.Set("multi_az", v) } - d.Set("multi_az", maz) d.Set("node_type", rsc.NodeType) d.Set("number_of_nodes", rsc.NumberOfNodes) d.Set("preferred_maintenance_window", rsc.PreferredMaintenanceWindow) @@ -708,6 +710,9 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter if err := d.Set("snapshot_copy", flattenSnapshotCopy(rsc.ClusterSnapshotCopyStatus)); err != nil { return sdkdiag.AppendErrorf(diags, "setting snapshot_copy: %s", err) } + d.Set("vpc_security_group_ids", tfslices.ApplyToAll(rsc.VpcSecurityGroups, func(v *redshift.VpcSecurityGroupMembership) string { + return aws.StringValue(v.VpcSecurityGroupId) + })) d.Set("dns_name", nil) d.Set("endpoint", nil) @@ -724,20 +729,6 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter } } - var apiList []*string - - for _, iamRole := range rsc.IamRoles { - apiList = append(apiList, iamRole.IamRoleArn) - } - d.Set("iam_roles", aws.StringValueSlice(apiList)) - - apiList = nil - - for _, vpcSecurityGroup := range rsc.VpcSecurityGroups { - apiList = append(apiList, vpcSecurityGroup.VpcSecurityGroupId) - } - d.Set("vpc_security_group_ids", aws.StringValueSlice(apiList)) - setTagsOut(ctx, rsc.Tags) return diags @@ -747,7 +738,7 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int var diags diag.Diagnostics conn := meta.(*conns.AWSClient).RedshiftConn(ctx) - if d.HasChangesExcept("aqua_configuration_status", "availability_zone", "iam_roles", "multi_az", "logging", "snapshot_copy", "tags", "tags_all") { + if d.HasChangesExcept("aqua_configuration_status", "availability_zone", "iam_roles", "logging", "multi_az", "snapshot_copy", "tags", "tags_all") { input := &redshift.ModifyClusterInput{ ClusterIdentifier: aws.String(d.Id()), } @@ -844,19 +835,10 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int } } - if d.HasChanges("iam_roles", "default_iam_role_arn") { + if d.HasChanges("default_iam_role_arn", "iam_roles") { o, n := d.GetChange("iam_roles") - if o == nil { - o = new(schema.Set) - } - if n == nil { - n = new(schema.Set) - } - - os := o.(*schema.Set) - ns := n.(*schema.Set) - add := ns.Difference(os) - del := os.Difference(ns) + os, ns := o.(*schema.Set), n.(*schema.Set) + add, del := ns.Difference(os), os.Difference(ns) input := &redshift.ModifyClusterIamRolesInput{ AddIamRoles: flex.ExpandStringSet(add), @@ -865,7 +847,6 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int DefaultIamRoleArn: aws.String(d.Get("default_iam_role_arn").(string)), } - log.Printf("[DEBUG] Modifying Redshift Cluster IAM Roles: %s", input) _, err := conn.ModifyClusterIamRolesWithContext(ctx, input) if err != nil { @@ -883,7 +864,6 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int ClusterIdentifier: aws.String(d.Id()), } - log.Printf("[DEBUG] Modifying Redshift Cluster Aqua Configuration: %s", input) _, err := conn.ModifyAquaConfigurationWithContext(ctx, input) if err != nil { @@ -891,13 +871,13 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int } if d.Get("apply_immediately").(bool) { - rebootInput := &redshift.RebootClusterInput{ + input := &redshift.RebootClusterInput{ ClusterIdentifier: aws.String(d.Id()), } _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, clusterInvalidClusterStateFaultTimeout, func() (interface{}, error) { - return conn.RebootClusterWithContext(ctx, rebootInput) + return conn.RebootClusterWithContext(ctx, input) }, redshift.ErrCodeInvalidClusterStateFault, ) @@ -907,7 +887,7 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int } if _, err := waitClusterRebooted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for Redshift Cluster (%s) Rebooted: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for Redshift Cluster (%s) reboot: %s", d.Id(), err) } if _, err := waitClusterAquaApplied(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { @@ -923,7 +903,6 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int ClusterIdentifier: aws.String(d.Id()), } - log.Printf("[DEBUG] Relocating Redshift Cluster: %s", input) _, err := conn.ModifyClusterWithContext(ctx, input) if err != nil { @@ -964,37 +943,34 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int } if d.HasChange("multi_az") { - multiAZEnabled := d.Get("multi_az").(bool) - azRelocationEnabled := d.Get("availability_zone_relocation_enabled").(bool) - if multiAZEnabled && azRelocationEnabled { - return sdkdiag.AppendErrorf(diags, "modifying Redshift Cluster (%s): availability_zone_relocation_enabled and multi_az can't be both true", d.Id()) - } + azRelocationEnabled, multiAZ := d.Get("availability_zone_relocation_enabled").(bool), d.Get("multi_az").(bool) input := &redshift.ModifyClusterInput{ - MultiAZ: aws.Bool(azRelocationEnabled), ClusterIdentifier: aws.String(d.Id()), + MultiAZ: aws.Bool(multiAZ), } - log.Printf("[DEBUG] Changing cluster multi AZ configuration: %s", input) _, err := conn.ModifyClusterWithContext(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "Changing cluster multi AZ configuration (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "modifying Redshift Cluster (%s) multi-AZ: %s", d.Id(), err) } if _, err = waitClusterUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for Redshift Cluster (%s) update: %s", d.Id(), err) } - if !multiAZEnabled { - // Disabling MultiAZ, Redshift automatically enables the AZ Relocation. For that reason is necessary to align it with the current configuration + + if !multiAZ { + // Disabling MultiAZ, Redshift automatically enables AZ Relocation. + // For that reason is necessary to align it with the current configuration. input = &redshift.ModifyClusterInput{ AvailabilityZoneRelocation: aws.Bool(azRelocationEnabled), ClusterIdentifier: aws.String(d.Id()), } - log.Printf("[DEBUG] Changing cluster availability zone relocation configuration: %s\n", input) + _, err = conn.ModifyClusterWithContext(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "Changing cluster multi AZ configuration (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "modifying Redshift Cluster (%s) AZ relocation: %s", d.Id(), err) } if _, err = waitClusterUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil {