From 82a538b280134b8e0cb76e456d999011d9883a25 Mon Sep 17 00:00:00 2001 From: Bruno Schaatsbergen Date: Thu, 25 Aug 2022 22:53:36 +0200 Subject: [PATCH 01/29] Add dual stack support for Aurora --- internal/service/rds/cluster.go | 28 ++++++++++++++++++++++++ internal/service/rds/cluster_instance.go | 15 +++++++++++++ 2 files changed, 43 insertions(+) diff --git a/internal/service/rds/cluster.go b/internal/service/rds/cluster.go index c70fcc9cf68..ece33eb1bae 100644 --- a/internal/service/rds/cluster.go +++ b/internal/service/rds/cluster.go @@ -235,6 +235,12 @@ func ResourceCluster() *schema.Resource { ForceNew: true, ValidateFunc: verify.ValidARN, }, + "network_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(NetworkType_Values(), false), + }, "master_password": { Type: schema.TypeString, Optional: true, @@ -540,6 +546,10 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { requiresModifyDbCluster = true } + if attr, ok := d.GetOk("network_type"); ok { + opts.NetworkType = aws.String(attr.(string)) + } + if attr, ok := d.GetOk("option_group_name"); ok { opts.OptionGroupName = aws.String(attr.(string)) } @@ -654,6 +664,10 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { createOpts.KmsKeyId = aws.String(attr.(string)) } + if attr, ok := d.GetOk("network_type"); ok { + createOpts.NetworkType = aws.String(attr.(string)) + } + if attr, ok := d.GetOk("iam_database_authentication_enabled"); ok { createOpts.EnableIAMDatabaseAuthentication = aws.Bool(attr.(bool)) } @@ -748,6 +762,10 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { createOpts.KmsKeyId = aws.String(attr.(string)) } + if attr, ok := d.GetOk("network_type"); ok { + createOpts.NetworkType = aws.String(attr.(string)) + } + if attr, ok := d.GetOk("enabled_cloudwatch_logs_exports"); ok && attr.(*schema.Set).Len() > 0 { createOpts.EnableCloudwatchLogsExports = flex.ExpandStringSet(attr.(*schema.Set)) } @@ -883,6 +901,10 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { createOpts.PreferredMaintenanceWindow = aws.String(v.(string)) } + if attr, ok := d.GetOk("network_type"); ok { + createOpts.NetworkType = aws.String(attr.(string)) + } + if attr, ok := d.GetOk("kms_key_id"); ok { createOpts.KmsKeyId = aws.String(attr.(string)) } @@ -1051,6 +1073,7 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error { d.Set("endpoint", dbc.Endpoint) d.Set("db_cluster_instance_class", dbc.DBClusterInstanceClass) d.Set("engine_mode", dbc.EngineMode) + d.Set("network_type", dbc.NetworkType) d.Set("engine", dbc.Engine) d.Set("hosted_zone_id", dbc.HostedZoneId) d.Set("iam_database_authentication_enabled", dbc.IAMDatabaseAuthenticationEnabled) @@ -1237,6 +1260,11 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { requestUpdate = true } + if d.HasChange("network_type") { + req.NetworkType = aws.String(d.Get("network_type").(string)) + requestUpdate = true + } + if d.HasChange("enabled_cloudwatch_logs_exports") { oraw, nraw := d.GetChange("enabled_cloudwatch_logs_exports") o := oraw.(*schema.Set) diff --git a/internal/service/rds/cluster_instance.go b/internal/service/rds/cluster_instance.go index 03c0636a20f..e37531e9c3c 100644 --- a/internal/service/rds/cluster_instance.go +++ b/internal/service/rds/cluster_instance.go @@ -142,6 +142,12 @@ func ResourceClusterInstance() *schema.Resource { Optional: true, Computed: true, }, + "network_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(NetworkType_Values(), false), + }, "performance_insights_enabled": { Type: schema.TypeBool, Optional: true, @@ -267,6 +273,10 @@ func resourceClusterInstanceCreate(d *schema.ResourceData, meta interface{}) err input.MonitoringRoleArn = aws.String(v.(string)) } + if v, ok := d.GetOk("network_type"); ok { + input.NetworkType = aws.String(v.(string)) + } + if v, ok := d.GetOk("performance_insights_enabled"); ok { input.EnablePerformanceInsights = aws.Bool(v.(bool)) } @@ -409,6 +419,7 @@ func resourceClusterInstanceRead(d *schema.ResourceData, meta interface{}) error d.Set("preferred_maintenance_window", db.PreferredMaintenanceWindow) d.Set("promotion_tier", db.PromotionTier) d.Set("publicly_accessible", db.PubliclyAccessible) + d.Set("network_type", db.NetworkType) d.Set("storage_encrypted", db.StorageEncrypted) clusterSetResourceDataEngineVersionFromClusterInstance(d, db) @@ -498,6 +509,10 @@ func resourceClusterInstanceUpdate(d *schema.ResourceData, meta interface{}) err input.PubliclyAccessible = aws.Bool(d.Get("publicly_accessible").(bool)) } + if d.HasChange("network_type") { + input.NetworkType = aws.String(d.Get("network_type").(string)) + } + log.Printf("[DEBUG] Updating RDS Cluster Instance: %s", input) _, err := tfresource.RetryWhenAWSErrMessageContains(propagationTimeout, func() (interface{}, error) { From 8446227406ed81b0243a1d2e5f87f4bc6b9dfc16 Mon Sep 17 00:00:00 2001 From: Bruno Schaatsbergen Date: Thu, 25 Aug 2022 22:56:46 +0200 Subject: [PATCH 02/29] Create 26489.txt --- .changelog/26489.txt | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .changelog/26489.txt diff --git a/.changelog/26489.txt b/.changelog/26489.txt new file mode 100644 index 00000000000..e3e72aea05e --- /dev/null +++ b/.changelog/26489.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_rds_cluster: Add `network_type` attribute +``` + +```release-note:enhancement +resource/aws_rds_cluster_instance: Add `network_type` attribute +``` From 46ee86f390600d5963de0de88048330c4406fe7a Mon Sep 17 00:00:00 2001 From: Bruno Schaatsbergen Date: Sat, 3 Sep 2022 02:14:53 +0200 Subject: [PATCH 03/29] Add `TestAccRDSClusterInstance_networkType` --- internal/service/rds/cluster_test.go | 56 ++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/internal/service/rds/cluster_test.go b/internal/service/rds/cluster_test.go index bab9c304ab5..8253576f442 100644 --- a/internal/service/rds/cluster_test.go +++ b/internal/service/rds/cluster_test.go @@ -779,6 +779,39 @@ func TestAccRDSCluster_kmsKey(t *testing.T) { }) } +func TestAccRDSCluster_networkType(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbCluster1 rds.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, rds.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig__networkType(rName, "IPV4"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &dbCluster1), + resource.TestCheckResourceAttr(resourceName, "network_type", "IPV4"), + ), + }, + { + Config: testAccClusterConfig__networkType(rName, "DUAL"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &dbCluster1), + resource.TestCheckResourceAttr(resourceName, "network_type", "DUAL"), + ), + }, + }, + }) +} + func TestAccRDSCluster_encrypted(t *testing.T) { var v rds.DBCluster resourceName := "aws_rds_cluster.test" @@ -3353,6 +3386,29 @@ resource "aws_rds_cluster" "alternate" { `, rName)) } +func testAccClusterConfig__networkType(rName string, networkType string) string { + return acctest.ConfigCompose( + acctest.ConfigVPCWithSubnetsIPv6(rName, 2), + fmt.Sprintf(` +resource "aws_db_subnet_group" "test" { + name = %[1]q + subnet_ids = aws_subnet.test[*].id +} + +resource "aws_rds_cluster" "test" { + cluster_identifier = %[1]q + db_subnet_group_name = aws_db_subnet_group.test.name + network_type = %[2]q + engine = "aurora-postgresql" + engine_version = "14.3" + master_password = "barbarbarbar" + master_username = "foo" + skip_final_snapshot = true + apply_immediately = true +} +`, rName, networkType)) +} + func testAccClusterConfig_deletionProtection(rName string, deletionProtection bool) string { return fmt.Sprintf(` resource "aws_rds_cluster" "test" { From 9605f61f60df8444f7fab237459d9e3a733591f5 Mon Sep 17 00:00:00 2001 From: Bruno Schaatsbergen Date: Sat, 3 Sep 2022 02:26:20 +0200 Subject: [PATCH 04/29] Update docs to reflect the new `network_type` arg --- website/docs/r/rds_cluster.html.markdown | 1 + website/docs/r/rds_cluster_instance.html.markdown | 1 + 2 files changed, 2 insertions(+) diff --git a/website/docs/r/rds_cluster.html.markdown b/website/docs/r/rds_cluster.html.markdown index 78d34fe5053..b335e583c56 100644 --- a/website/docs/r/rds_cluster.html.markdown +++ b/website/docs/r/rds_cluster.html.markdown @@ -186,6 +186,7 @@ The following arguments are supported: * `preferred_backup_window` - (Optional) The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.Time in UTC. Default: A 30-minute window selected at random from an 8-hour block of time per regionE.g., 04:00-09:00 * `preferred_maintenance_window` - (Optional) The weekly time range during which system maintenance can occur, in (UTC) e.g., wed:04:00-wed:04:30 * `replication_source_identifier` - (Optional) ARN of a source DB cluster or DB instance if this DB cluster is to be created as a Read Replica. If DB Cluster is part of a Global Cluster, use the [`lifecycle` configuration block `ignore_changes` argument](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) to prevent Terraform from showing differences for this argument instead of configuring this value. +* `network_type` - (Optional) The network type of the DB instance. Valid values: `IPV4`, `DUAL`. * `restore_to_point_in_time` - (Optional) Nested attribute for [point in time restore](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_PIT.html). More details below. * `scaling_configuration` - (Optional) Nested attribute with scaling properties. Only valid when `engine_mode` is set to `serverless`. More details below. * `serverlessv2_scaling_configuration`- (Optional) Nested attribute with scaling properties for ServerlessV2. Only valid when `engine_mode` is set to `provisioned`. More details below. diff --git a/website/docs/r/rds_cluster_instance.html.markdown b/website/docs/r/rds_cluster_instance.html.markdown index 8a97b3ffc76..d00e7707935 100644 --- a/website/docs/r/rds_cluster_instance.html.markdown +++ b/website/docs/r/rds_cluster_instance.html.markdown @@ -82,6 +82,7 @@ what IAM permissions are needed to allow Enhanced Monitoring for RDS Instances. * `performance_insights_enabled` - (Optional) Specifies whether Performance Insights is enabled or not. * `performance_insights_kms_key_id` - (Optional) ARN for the KMS key to encrypt Performance Insights data. When specifying `performance_insights_kms_key_id`, `performance_insights_enabled` needs to be set to true. * `performance_insights_retention_period` - (Optional) Amount of time in days to retain Performance Insights data. Valida values are `7`, `731` (2 years) or a multiple of `31`. When specifying `performance_insights_retention_period`, `performance_insights_enabled` needs to be set to true. Defaults to '7'. +* `network_type` - (Optional) The network type of the DB instance. Valid values: `IPV4`, `DUAL`. * `copy_tags_to_snapshot` – (Optional, boolean) Indicates whether to copy all of the user-defined tags from the DB instance to snapshots of the DB instance. Default `false`. * `ca_cert_identifier` - (Optional) The identifier of the CA certificate for the DB instance. * `tags` - (Optional) A map of tags to assign to the instance. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. From 2e37b1dcc1efd66570fbc5cc4f38e325d5f37515 Mon Sep 17 00:00:00 2001 From: Bruno Schaatsbergen Date: Sat, 3 Sep 2022 02:27:19 +0200 Subject: [PATCH 05/29] strip off the `1` --- internal/service/rds/cluster_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/service/rds/cluster_test.go b/internal/service/rds/cluster_test.go index 8253576f442..ffc10b12263 100644 --- a/internal/service/rds/cluster_test.go +++ b/internal/service/rds/cluster_test.go @@ -784,7 +784,7 @@ func TestAccRDSCluster_networkType(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var dbCluster1 rds.DBCluster + var dbCluster rds.DBCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_rds_cluster.test" @@ -797,14 +797,14 @@ func TestAccRDSCluster_networkType(t *testing.T) { { Config: testAccClusterConfig__networkType(rName, "IPV4"), Check: resource.ComposeTestCheckFunc( - testAccCheckClusterExists(resourceName, &dbCluster1), + testAccCheckClusterExists(resourceName, &dbCluster), resource.TestCheckResourceAttr(resourceName, "network_type", "IPV4"), ), }, { Config: testAccClusterConfig__networkType(rName, "DUAL"), Check: resource.ComposeTestCheckFunc( - testAccCheckClusterExists(resourceName, &dbCluster1), + testAccCheckClusterExists(resourceName, &dbCluster), resource.TestCheckResourceAttr(resourceName, "network_type", "DUAL"), ), }, From 1a1c70b3fac86aeab18c7dea5eedc6303dd28e32 Mon Sep 17 00:00:00 2001 From: Bruno Schaatsbergen Date: Sat, 3 Sep 2022 02:59:52 +0200 Subject: [PATCH 06/29] Create `TestAccRDSClusterInstance_networkType` --- internal/service/rds/cluster_instance_test.go | 72 +++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/internal/service/rds/cluster_instance_test.go b/internal/service/rds/cluster_instance_test.go index 7f68ff54c43..2a44cc5b330 100644 --- a/internal/service/rds/cluster_instance_test.go +++ b/internal/service/rds/cluster_instance_test.go @@ -516,6 +516,39 @@ func TestAccRDSClusterInstance_monitoringInterval(t *testing.T) { }) } +func TestAccRDSClusterInstance_networkType(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var v rds.DBInstance + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_cluster_instance.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, rds.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterInstanceDestroy, + Steps: []resource.TestStep{ + { + Config: testAccClusterInstanceConfig_networkType(rName, "IPV4"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterInstanceExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "network_type", "IPV4"), + ), + }, + { + Config: testAccClusterInstanceConfig_networkType(rName, "DUAL"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterInstanceExists(resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "network_type", "DUAL"), + ), + }, + }, + }) +} + func TestAccRDSClusterInstance_MonitoringRoleARN_enabledToDisabled(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -1548,6 +1581,45 @@ resource "aws_rds_cluster_instance" "test" { `, rName)) } +func testAccClusterInstanceConfig_networkType(rName string, networkType string) string { + return acctest.ConfigCompose( + acctest.ConfigVPCWithSubnetsIPv6(rName, 2), + fmt.Sprintf(` +resource "aws_db_subnet_group" "test" { + name = %[1]q + subnet_ids = aws_subnet.test[*].id +} + +resource "aws_rds_cluster" "test" { + cluster_identifier = %[1]q + db_subnet_group_name = aws_db_subnet_group.test.name + engine = "aurora-postgresql" + engine_version = "14.3" + network_type = %[2]q + master_password = "barbarbarbar" + master_username = "foo" + skip_final_snapshot = true + apply_immediately = true +} + +data "aws_rds_orderable_db_instance" "test" { + engine = aws_rds_cluster.test.engine + engine_version = aws_rds_cluster.test.engine_version + supports_performance_insights = true + preferred_instance_classes = ["db.t3.medium", "db.r5.large", "db.r4.large"] +} + +resource "aws_rds_cluster_instance" "test" { + cluster_identifier = aws_rds_cluster.test.id + engine = aws_rds_cluster.test.engine + identifier = %[1]q + instance_class = data.aws_rds_orderable_db_instance.test.instance_class + performance_insights_enabled = true + apply_immediately = true +} +`, rName, networkType)) +} + func testAccClusterInstanceConfig_performanceInsightsEnabledAuroraMySQL1(rName, engine string) string { return fmt.Sprintf(` resource "aws_rds_cluster" "test" { From 862b6b4e4febfefe2562e9fa277c34a191d66fb6 Mon Sep 17 00:00:00 2001 From: Bruno Schaatsbergen Date: Sun, 4 Sep 2022 18:19:25 +0200 Subject: [PATCH 07/29] Treat network type as a computed arg --- internal/service/rds/cluster_instance.go | 14 +--- internal/service/rds/cluster_instance_test.go | 72 ------------------- 2 files changed, 2 insertions(+), 84 deletions(-) diff --git a/internal/service/rds/cluster_instance.go b/internal/service/rds/cluster_instance.go index e37531e9c3c..2cda99cf4cc 100644 --- a/internal/service/rds/cluster_instance.go +++ b/internal/service/rds/cluster_instance.go @@ -143,10 +143,8 @@ func ResourceClusterInstance() *schema.Resource { Computed: true, }, "network_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(NetworkType_Values(), false), + Type: schema.TypeString, + Computed: true, }, "performance_insights_enabled": { Type: schema.TypeBool, @@ -273,10 +271,6 @@ func resourceClusterInstanceCreate(d *schema.ResourceData, meta interface{}) err input.MonitoringRoleArn = aws.String(v.(string)) } - if v, ok := d.GetOk("network_type"); ok { - input.NetworkType = aws.String(v.(string)) - } - if v, ok := d.GetOk("performance_insights_enabled"); ok { input.EnablePerformanceInsights = aws.Bool(v.(bool)) } @@ -509,10 +503,6 @@ func resourceClusterInstanceUpdate(d *schema.ResourceData, meta interface{}) err input.PubliclyAccessible = aws.Bool(d.Get("publicly_accessible").(bool)) } - if d.HasChange("network_type") { - input.NetworkType = aws.String(d.Get("network_type").(string)) - } - log.Printf("[DEBUG] Updating RDS Cluster Instance: %s", input) _, err := tfresource.RetryWhenAWSErrMessageContains(propagationTimeout, func() (interface{}, error) { diff --git a/internal/service/rds/cluster_instance_test.go b/internal/service/rds/cluster_instance_test.go index 2a44cc5b330..7f68ff54c43 100644 --- a/internal/service/rds/cluster_instance_test.go +++ b/internal/service/rds/cluster_instance_test.go @@ -516,39 +516,6 @@ func TestAccRDSClusterInstance_monitoringInterval(t *testing.T) { }) } -func TestAccRDSClusterInstance_networkType(t *testing.T) { - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - - var v rds.DBInstance - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_rds_cluster_instance.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, rds.EndpointsID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckClusterInstanceDestroy, - Steps: []resource.TestStep{ - { - Config: testAccClusterInstanceConfig_networkType(rName, "IPV4"), - Check: resource.ComposeTestCheckFunc( - testAccCheckClusterInstanceExists(resourceName, &v), - resource.TestCheckResourceAttr(resourceName, "network_type", "IPV4"), - ), - }, - { - Config: testAccClusterInstanceConfig_networkType(rName, "DUAL"), - Check: resource.ComposeTestCheckFunc( - testAccCheckClusterInstanceExists(resourceName, &v), - resource.TestCheckResourceAttr(resourceName, "network_type", "DUAL"), - ), - }, - }, - }) -} - func TestAccRDSClusterInstance_MonitoringRoleARN_enabledToDisabled(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -1581,45 +1548,6 @@ resource "aws_rds_cluster_instance" "test" { `, rName)) } -func testAccClusterInstanceConfig_networkType(rName string, networkType string) string { - return acctest.ConfigCompose( - acctest.ConfigVPCWithSubnetsIPv6(rName, 2), - fmt.Sprintf(` -resource "aws_db_subnet_group" "test" { - name = %[1]q - subnet_ids = aws_subnet.test[*].id -} - -resource "aws_rds_cluster" "test" { - cluster_identifier = %[1]q - db_subnet_group_name = aws_db_subnet_group.test.name - engine = "aurora-postgresql" - engine_version = "14.3" - network_type = %[2]q - master_password = "barbarbarbar" - master_username = "foo" - skip_final_snapshot = true - apply_immediately = true -} - -data "aws_rds_orderable_db_instance" "test" { - engine = aws_rds_cluster.test.engine - engine_version = aws_rds_cluster.test.engine_version - supports_performance_insights = true - preferred_instance_classes = ["db.t3.medium", "db.r5.large", "db.r4.large"] -} - -resource "aws_rds_cluster_instance" "test" { - cluster_identifier = aws_rds_cluster.test.id - engine = aws_rds_cluster.test.engine - identifier = %[1]q - instance_class = data.aws_rds_orderable_db_instance.test.instance_class - performance_insights_enabled = true - apply_immediately = true -} -`, rName, networkType)) -} - func testAccClusterInstanceConfig_performanceInsightsEnabledAuroraMySQL1(rName, engine string) string { return fmt.Sprintf(` resource "aws_rds_cluster" "test" { From 070a0db54391061d840748d85145d60d0bde9778 Mon Sep 17 00:00:00 2001 From: Bruno Schaatsbergen Date: Sun, 4 Sep 2022 18:20:31 +0200 Subject: [PATCH 08/29] Move `network_type` from arg to attribute ref --- website/docs/r/rds_cluster_instance.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/rds_cluster_instance.html.markdown b/website/docs/r/rds_cluster_instance.html.markdown index d00e7707935..478412389e4 100644 --- a/website/docs/r/rds_cluster_instance.html.markdown +++ b/website/docs/r/rds_cluster_instance.html.markdown @@ -82,7 +82,6 @@ what IAM permissions are needed to allow Enhanced Monitoring for RDS Instances. * `performance_insights_enabled` - (Optional) Specifies whether Performance Insights is enabled or not. * `performance_insights_kms_key_id` - (Optional) ARN for the KMS key to encrypt Performance Insights data. When specifying `performance_insights_kms_key_id`, `performance_insights_enabled` needs to be set to true. * `performance_insights_retention_period` - (Optional) Amount of time in days to retain Performance Insights data. Valida values are `7`, `731` (2 years) or a multiple of `31`. When specifying `performance_insights_retention_period`, `performance_insights_enabled` needs to be set to true. Defaults to '7'. -* `network_type` - (Optional) The network type of the DB instance. Valid values: `IPV4`, `DUAL`. * `copy_tags_to_snapshot` – (Optional, boolean) Indicates whether to copy all of the user-defined tags from the DB instance to snapshots of the DB instance. Default `false`. * `ca_cert_identifier` - (Optional) The identifier of the CA certificate for the DB instance. * `tags` - (Optional) A map of tags to assign to the instance. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -103,6 +102,7 @@ In addition to all arguments above, the following attributes are exported: * `port` - The database port * `storage_encrypted` - Specifies whether the DB cluster is encrypted. * `kms_key_id` - The ARN for the KMS encryption key if one is set to the cluster. +* `network_type` - The network type of the DB instance. * `dbi_resource_id` - The region-unique, immutable identifier for the DB instance. * `performance_insights_enabled` - Specifies whether Performance Insights is enabled or not. * `performance_insights_kms_key_id` - The ARN for the KMS encryption key used by Performance Insights. From 85cab00b24ef89015832363a81fa62937034df11 Mon Sep 17 00:00:00 2001 From: Bruno Schaatsbergen Date: Sun, 4 Sep 2022 18:21:20 +0200 Subject: [PATCH 09/29] Update 26489.txt --- .changelog/26489.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.changelog/26489.txt b/.changelog/26489.txt index e3e72aea05e..d92450de18b 100644 --- a/.changelog/26489.txt +++ b/.changelog/26489.txt @@ -1,7 +1,7 @@ ```release-note:enhancement -resource/aws_rds_cluster: Add `network_type` attribute +resource/aws_rds_cluster: Add `network_type` argument ``` ```release-note:enhancement -resource/aws_rds_cluster_instance: Add `network_type` attribute +resource/aws_rds_cluster_instance: Add `network_type` attribute reference ``` From b8aec46f08943bf2a08b18099d5f15146e31a276 Mon Sep 17 00:00:00 2001 From: Bruno Schaatsbergen Date: Sun, 4 Sep 2022 18:37:00 +0200 Subject: [PATCH 10/29] Remove dupe underscore --- internal/service/rds/cluster_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/service/rds/cluster_test.go b/internal/service/rds/cluster_test.go index ffc10b12263..43b64ed473c 100644 --- a/internal/service/rds/cluster_test.go +++ b/internal/service/rds/cluster_test.go @@ -795,14 +795,14 @@ func TestAccRDSCluster_networkType(t *testing.T) { CheckDestroy: testAccCheckClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccClusterConfig__networkType(rName, "IPV4"), + Config: testAccClusterConfig_networkType(rName, "IPV4"), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(resourceName, &dbCluster), resource.TestCheckResourceAttr(resourceName, "network_type", "IPV4"), ), }, { - Config: testAccClusterConfig__networkType(rName, "DUAL"), + Config: testAccClusterConfig_networkType(rName, "DUAL"), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(resourceName, &dbCluster), resource.TestCheckResourceAttr(resourceName, "network_type", "DUAL"), @@ -3386,7 +3386,7 @@ resource "aws_rds_cluster" "alternate" { `, rName)) } -func testAccClusterConfig__networkType(rName string, networkType string) string { +func testAccClusterConfig_networkType(rName string, networkType string) string { return acctest.ConfigCompose( acctest.ConfigVPCWithSubnetsIPv6(rName, 2), fmt.Sprintf(` From 2371979c1bc99da3020d381c6ffe618f632b1597 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 6 Sep 2022 08:05:03 -0400 Subject: [PATCH 11/29] Tweak CHANGELOG entry. --- .changelog/26489.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/26489.txt b/.changelog/26489.txt index d92450de18b..55d7c37431e 100644 --- a/.changelog/26489.txt +++ b/.changelog/26489.txt @@ -3,5 +3,5 @@ resource/aws_rds_cluster: Add `network_type` argument ``` ```release-note:enhancement -resource/aws_rds_cluster_instance: Add `network_type` attribute reference +resource/aws_rds_cluster_instance: Add `network_type` attribute ``` From 43d8963d0b32e8ef5ec48f6e187cdac88ce0ccef Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 6 Sep 2022 08:10:49 -0400 Subject: [PATCH 12/29] r/aws_rds_cluster: Alphabetize attributes. --- internal/service/rds/cluster.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/service/rds/cluster.go b/internal/service/rds/cluster.go index ece33eb1bae..8d83b80d838 100644 --- a/internal/service/rds/cluster.go +++ b/internal/service/rds/cluster.go @@ -235,12 +235,6 @@ func ResourceCluster() *schema.Resource { ForceNew: true, ValidateFunc: verify.ValidARN, }, - "network_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(NetworkType_Values(), false), - }, "master_password": { Type: schema.TypeString, Optional: true, @@ -252,6 +246,12 @@ func ResourceCluster() *schema.Resource { Optional: true, ForceNew: true, }, + "network_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(NetworkType_Values(), false), + }, "port": { Type: schema.TypeInt, Optional: true, From bc8b51933df54d5be2ba8fc921d4c43526c32503 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 6 Sep 2022 08:41:11 -0400 Subject: [PATCH 13/29] r/aws_rds_cluster: Tidy up resource Delete. Acceptance test output: % make testacc TESTARGS='-run=TestAccRDSCluster_basic' PKG=rds ACCTEST_PARALLELISM=2 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/rds/... -v -count 1 -parallel 2 -run=TestAccRDSCluster_basic -timeout 180m === RUN TestAccRDSCluster_basic === PAUSE TestAccRDSCluster_basic === CONT TestAccRDSCluster_basic --- PASS: TestAccRDSCluster_basic (161.34s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/rds 165.297s --- internal/service/rds/cluster.go | 71 +++++++++++++--------------- internal/service/rds/cluster_test.go | 68 +++++++++++--------------- 2 files changed, 62 insertions(+), 77 deletions(-) diff --git a/internal/service/rds/cluster.go b/internal/service/rds/cluster.go index 8d83b80d838..7aa27357fd7 100644 --- a/internal/service/rds/cluster.go +++ b/internal/service/rds/cluster.go @@ -459,15 +459,6 @@ func ResourceCluster() *schema.Resource { } } -func resourceClusterImport( - d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - // Neither skip_final_snapshot nor final_snapshot_identifier can be fetched - // from any API call, so we need to default skip_final_snapshot to true so - // that final_snapshot_identifier is not required - d.Set("skip_final_snapshot", true) - return []*schema.ResourceData{d}, nil -} - func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).RDSConn defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig @@ -1403,73 +1394,79 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { func resourceClusterDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).RDSConn - log.Printf("[DEBUG] Destroying RDS Cluster (%s)", d.Id()) // Automatically remove from global cluster to bypass this error on deletion: // InvalidDBClusterStateFault: This cluster is a part of a global cluster, please remove it from globalcluster first if d.Get("global_cluster_identifier").(string) != "" { + globalClusterID := d.Get("global_cluster_identifier").(string) input := &rds.RemoveFromGlobalClusterInput{ DbClusterIdentifier: aws.String(d.Get("arn").(string)), - GlobalClusterIdentifier: aws.String(d.Get("global_cluster_identifier").(string)), + GlobalClusterIdentifier: aws.String(globalClusterID), } log.Printf("[DEBUG] Removing RDS Cluster from RDS Global Cluster: %s", input) _, err := conn.RemoveFromGlobalCluster(input) if err != nil && !tfawserr.ErrCodeEquals(err, rds.ErrCodeGlobalClusterNotFoundFault) && !tfawserr.ErrMessageContains(err, "InvalidParameterValue", "is not found in global cluster") { - return fmt.Errorf("error removing RDS Cluster (%s) from RDS Global Cluster: %s", d.Id(), err) + return fmt.Errorf("removing RDS Cluster (%s) from RDS Global Cluster (%s): %w", d.Id(), globalClusterID, err) } } - deleteOpts := rds.DeleteDBClusterInput{ + skipFinalSnapshot := d.Get("skip_final_snapshot").(bool) + input := &rds.DeleteDBClusterInput{ DBClusterIdentifier: aws.String(d.Id()), + SkipFinalSnapshot: aws.Bool(skipFinalSnapshot), } - skipFinalSnapshot := d.Get("skip_final_snapshot").(bool) - deleteOpts.SkipFinalSnapshot = aws.Bool(skipFinalSnapshot) - if !skipFinalSnapshot { - if name, present := d.GetOk("final_snapshot_identifier"); present { - deleteOpts.FinalDBSnapshotIdentifier = aws.String(name.(string)) + if v, ok := d.GetOk("final_snapshot_identifier"); ok { + input.FinalDBSnapshotIdentifier = aws.String(v.(string)) } else { - return fmt.Errorf("RDS Cluster FinalSnapshotIdentifier is required when a final snapshot is required") + return fmt.Errorf("RDS Cluster final_snapshot_identifier is required when skip_final_snapshot is false") } } - log.Printf("[DEBUG] RDS Cluster delete options: %s", deleteOpts) - - err := resource.Retry(clusterTimeoutDelete, func() *resource.RetryError { - _, err := conn.DeleteDBCluster(&deleteOpts) - if err != nil { + log.Printf("[DEBUG] Deleting RDS Cluster: %s", d.Id()) + _, err := tfresource.RetryWhen(clusterTimeoutDelete, + func() (interface{}, error) { + return conn.DeleteDBCluster(input) + }, + func(err error) (bool, error) { if tfawserr.ErrMessageContains(err, rds.ErrCodeInvalidDBClusterStateFault, "is not currently in the available state") { - return resource.RetryableError(err) + return true, err } + if tfawserr.ErrMessageContains(err, rds.ErrCodeInvalidDBClusterStateFault, "cluster is a part of a global cluster") { - return resource.RetryableError(err) - } - if tfawserr.ErrCodeEquals(err, rds.ErrCodeDBClusterNotFoundFault) { - return nil + return true, err } - return resource.NonRetryableError(err) - } - return nil - }) - if tfresource.TimedOut(err) { - _, err = conn.DeleteDBCluster(&deleteOpts) + return false, err + }, + ) + + if tfawserr.ErrCodeEquals(err, rds.ErrCodeDBClusterNotFoundFault) { + return nil } if err != nil { - return fmt.Errorf("error deleting RDS Cluster (%s): %s", d.Id(), err) + return fmt.Errorf("deleting RDS Cluster (%s): %w", d.Id(), err) } if err := WaitForClusterDeletion(conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - return fmt.Errorf("error waiting for RDS Cluster (%s) deletion: %s", d.Id(), err) + return fmt.Errorf("waiting for RDS Cluster (%s) delete: %w", d.Id(), err) } return nil } +func resourceClusterImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + // Neither skip_final_snapshot nor final_snapshot_identifier can be fetched + // from any API call, so we need to default skip_final_snapshot to true so + // that final_snapshot_identifier is not required + d.Set("skip_final_snapshot", true) + return []*schema.ResourceData{d}, nil +} + func resourceClusterStateRefreshFunc(conn *rds.RDS, dbClusterIdentifier string) resource.StateRefreshFunc { return func() (interface{}, string, error) { resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ diff --git a/internal/service/rds/cluster_test.go b/internal/service/rds/cluster_test.go index 43b64ed473c..d7ac6701000 100644 --- a/internal/service/rds/cluster_test.go +++ b/internal/service/rds/cluster_test.go @@ -17,6 +17,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfrds "github.com/hashicorp/terraform-provider-aws/internal/service/rds" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func init() { @@ -35,7 +37,7 @@ func testAccErrorCheckSkip(t *testing.T) resource.ErrorCheckFunc { func TestAccRDSCluster_basic(t *testing.T) { var dbCluster rds.DBCluster - clusterName := sdkacctest.RandomWithPrefix("tf-aurora-cluster") + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_rds_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -45,22 +47,23 @@ func TestAccRDSCluster_basic(t *testing.T) { CheckDestroy: testAccCheckClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccClusterConfig_basic(clusterName), - Check: resource.ComposeTestCheckFunc( + Config: testAccClusterConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckClusterExists(resourceName, &dbCluster), - acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "rds", fmt.Sprintf("cluster:%s", clusterName)), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "rds", fmt.Sprintf("cluster:%s", rName)), resource.TestCheckResourceAttr(resourceName, "backtrack_window", "0"), + resource.TestCheckResourceAttrSet(resourceName, "cluster_resource_id"), resource.TestCheckResourceAttr(resourceName, "copy_tags_to_snapshot", "false"), - resource.TestCheckResourceAttr(resourceName, "storage_encrypted", "false"), resource.TestCheckResourceAttr(resourceName, "db_cluster_parameter_group_name", "default.aurora5.6"), - resource.TestCheckResourceAttrSet(resourceName, "reader_endpoint"), - resource.TestCheckResourceAttrSet(resourceName, "cluster_resource_id"), + resource.TestCheckResourceAttr(resourceName, "enabled_cloudwatch_logs_exports.#", "0"), resource.TestCheckResourceAttr(resourceName, "engine", "aurora"), resource.TestCheckResourceAttrSet(resourceName, "engine_version"), resource.TestCheckResourceAttr(resourceName, "global_cluster_identifier", ""), resource.TestCheckResourceAttrSet(resourceName, "hosted_zone_id"), - resource.TestCheckResourceAttr(resourceName, "enabled_cloudwatch_logs_exports.#", "0"), + resource.TestCheckResourceAttr(resourceName, "network_type", "IPV4"), + resource.TestCheckResourceAttrSet(resourceName, "reader_endpoint"), resource.TestCheckResourceAttr(resourceName, "scaling_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "storage_encrypted", "false"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), ), }, @@ -2139,25 +2142,17 @@ func testAccCheckClusterDestroyWithProvider(s *terraform.State, provider *schema continue } - // Try to find the Group - var err error - resp, err := conn.DescribeDBClusters( - &rds.DescribeDBClustersInput{ - DBClusterIdentifier: aws.String(rs.Primary.ID), - }) - - if err == nil { - if len(resp.DBClusters) != 0 && - *resp.DBClusters[0].DBClusterIdentifier == rs.Primary.ID { - return fmt.Errorf("DB Cluster %s still exists", rs.Primary.ID) - } - } + _, err := tfrds.FindDBClusterByID(conn, rs.Primary.ID) - if tfawserr.ErrCodeEquals(err, rds.ErrCodeDBClusterNotFoundFault) { + if tfresource.NotFound(err) { continue } - return err + if err != nil { + return err + } + + return fmt.Errorf("RDS Cluster %s still exists", rs.Primary.ID) } return nil @@ -2222,27 +2217,20 @@ func testAccCheckClusterExistsWithProvider(n string, v *rds.DBCluster, providerF } if rs.Primary.ID == "" { - return fmt.Errorf("No DB Instance ID is set") + return fmt.Errorf("No RDS Cluster ID is set") } - provider := providerF() - conn := provider.Meta().(*conns.AWSClient).RDSConn - resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ - DBClusterIdentifier: aws.String(rs.Primary.ID), - }) + conn := providerF().Meta().(*conns.AWSClient).RDSConn + + output, err := tfrds.FindDBClusterByID(conn, rs.Primary.ID) if err != nil { return err } - for _, c := range resp.DBClusters { - if *c.DBClusterIdentifier == rs.Primary.ID { - *v = *c - return nil - } - } + *v = *output - return fmt.Errorf("DB Cluster (%s) not found", rs.Primary.ID) + return nil } } @@ -2259,10 +2247,10 @@ func testAccCheckClusterRecreated(i, j *rds.DBCluster) resource.TestCheckFunc { func testAccClusterConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_rds_cluster" "test" { - cluster_identifier = %q - database_name = "mydb" - master_username = "foo" - master_password = "mustbeeightcharaters" + cluster_identifier = %[1]q + database_name = "test" + master_username = "tfacctest" + master_password = "avoid-plaintext-passwords" db_cluster_parameter_group_name = "default.aurora5.6" skip_final_snapshot = true } From c6f6ba5625664df2b0f729b0110b2e18cc6c25ab Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 6 Sep 2022 09:21:44 -0400 Subject: [PATCH 14/29] r/aws_rds_cluster: Tidy up resource Read. Acceptance test output: % make testacc TESTARGS='-run=TestAccRDSCluster_basic' PKG=rds ACCTEST_PARALLELISM=2 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/rds/... -v -count 1 -parallel 2 -run=TestAccRDSCluster_basic -timeout 180m === RUN TestAccRDSCluster_basic === PAUSE TestAccRDSCluster_basic === CONT TestAccRDSCluster_basic --- PASS: TestAccRDSCluster_basic (173.13s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/rds 177.144s --- internal/service/rds/cluster.go | 115 +++++++++++++------------------- internal/service/rds/find.go | 68 +++++++++++++++++++ internal/service/rds/flex.go | 38 ++++++++--- 3 files changed, 144 insertions(+), 77 deletions(-) diff --git a/internal/service/rds/cluster.go b/internal/service/rds/cluster.go index 7aa27357fd7..1f12c802011 100644 --- a/internal/service/rds/cluster.go +++ b/internal/service/rds/cluster.go @@ -1022,29 +1022,23 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error { } if err != nil { - return fmt.Errorf("error reading RDS Cluster (%s): %w", d.Id(), err) + return fmt.Errorf("reading RDS Cluster (%s): %w", d.Id(), err) } - if err := d.Set("availability_zones", aws.StringValueSlice(dbc.AvailabilityZones)); err != nil { - return fmt.Errorf("error setting availability_zones: %s", err) - } - - d.Set("arn", dbc.DBClusterArn) + d.Set("allocated_storage", dbc.AllocatedStorage) + clusterARN := aws.StringValue(dbc.DBClusterArn) + d.Set("arn", clusterARN) + d.Set("availability_zones", aws.StringValueSlice(dbc.AvailabilityZones)) d.Set("backtrack_window", dbc.BacktrackWindow) d.Set("backup_retention_period", dbc.BackupRetentionPeriod) d.Set("cluster_identifier", dbc.DBClusterIdentifier) - d.Set("copy_tags_to_snapshot", dbc.CopyTagsToSnapshot) - - var cm []string - for _, m := range dbc.DBClusterMembers { - cm = append(cm, aws.StringValue(m.DBInstanceIdentifier)) - } - if err := d.Set("cluster_members", cm); err != nil { - return fmt.Errorf("error setting cluster_members: %s", err) + var clusterMembers []string + for _, v := range dbc.DBClusterMembers { + clusterMembers = append(clusterMembers, aws.StringValue(v.DBInstanceIdentifier)) } - + d.Set("cluster_members", clusterMembers) d.Set("cluster_resource_id", dbc.DbClusterResourceId) - + d.Set("copy_tags_to_snapshot", dbc.CopyTagsToSnapshot) // Only set the DatabaseName if it is not nil. There is a known API bug where // RDS accepts a DatabaseName but does not return it, causing a perpetual // diff. @@ -1052,97 +1046,84 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error { if dbc.DatabaseName != nil { d.Set("database_name", dbc.DatabaseName) } - + d.Set("db_cluster_instance_class", dbc.DBClusterInstanceClass) d.Set("db_cluster_parameter_group_name", dbc.DBClusterParameterGroup) d.Set("db_subnet_group_name", dbc.DBSubnetGroup) d.Set("deletion_protection", dbc.DeletionProtection) - - if err := d.Set("enabled_cloudwatch_logs_exports", aws.StringValueSlice(dbc.EnabledCloudwatchLogsExports)); err != nil { - return fmt.Errorf("error setting enabled_cloudwatch_logs_exports: %s", err) - } - + d.Set("enabled_cloudwatch_logs_exports", aws.StringValueSlice(dbc.EnabledCloudwatchLogsExports)) + d.Set("enable_http_endpoint", dbc.HttpEndpointEnabled) d.Set("endpoint", dbc.Endpoint) - d.Set("db_cluster_instance_class", dbc.DBClusterInstanceClass) - d.Set("engine_mode", dbc.EngineMode) - d.Set("network_type", dbc.NetworkType) d.Set("engine", dbc.Engine) + d.Set("engine_mode", dbc.EngineMode) + clusterSetResourceDataEngineVersionFromCluster(d, dbc) d.Set("hosted_zone_id", dbc.HostedZoneId) d.Set("iam_database_authentication_enabled", dbc.IAMDatabaseAuthenticationEnabled) - - clusterSetResourceDataEngineVersionFromCluster(d, dbc) - - var roles []string - for _, r := range dbc.AssociatedRoles { - roles = append(roles, aws.StringValue(r.RoleArn)) - } - if err := d.Set("iam_roles", roles); err != nil { - return fmt.Errorf("error setting iam_roles: %s", err) + var iamRoleARNs []string + for _, v := range dbc.AssociatedRoles { + iamRoleARNs = append(iamRoleARNs, aws.StringValue(v.RoleArn)) } - + d.Set("iam_roles", iamRoleARNs) + d.Set("iops", dbc.Iops) d.Set("kms_key_id", dbc.KmsKeyId) d.Set("master_username", dbc.MasterUsername) + d.Set("network_type", dbc.NetworkType) d.Set("port", dbc.Port) d.Set("preferred_backup_window", dbc.PreferredBackupWindow) d.Set("preferred_maintenance_window", dbc.PreferredMaintenanceWindow) d.Set("reader_endpoint", dbc.ReaderEndpoint) d.Set("replication_source_identifier", dbc.ReplicationSourceIdentifier) - - if err := d.Set("scaling_configuration", flattenScalingConfigurationInfo(dbc.ScalingConfigurationInfo)); err != nil { - return fmt.Errorf("error setting scaling_configuration: %s", err) + if dbc.ScalingConfigurationInfo != nil { + if err := d.Set("scaling_configuration", []interface{}{flattenScalingConfigurationInfo(dbc.ScalingConfigurationInfo)}); err != nil { + return fmt.Errorf("setting scaling_configuration: %w", err) + } + } else { + d.Set("scaling_configuration", nil) } - - d.Set("allocated_storage", dbc.AllocatedStorage) - d.Set("storage_type", dbc.StorageType) - d.Set("iops", dbc.Iops) - d.Set("storage_encrypted", dbc.StorageEncrypted) - if dbc.ServerlessV2ScalingConfiguration != nil { if err := d.Set("serverlessv2_scaling_configuration", []interface{}{flattenServerlessV2ScalingConfigurationInfo(dbc.ServerlessV2ScalingConfiguration)}); err != nil { - return fmt.Errorf("error setting serverlessv2_scaling_configuration: %w", err) + return fmt.Errorf("setting serverlessv2_scaling_configuration: %w", err) } } else { d.Set("serverlessv2_scaling_configuration", nil) } - - d.Set("enable_http_endpoint", dbc.HttpEndpointEnabled) - - var vpcg []string - for _, g := range dbc.VpcSecurityGroups { - vpcg = append(vpcg, aws.StringValue(g.VpcSecurityGroupId)) - } - if err := d.Set("vpc_security_group_ids", vpcg); err != nil { - return fmt.Errorf("error setting vpc_security_group_ids: %s", err) + d.Set("storage_encrypted", dbc.StorageEncrypted) + d.Set("storage_type", dbc.StorageType) + var securityGroupIDs []string + for _, v := range dbc.VpcSecurityGroups { + securityGroupIDs = append(securityGroupIDs, aws.StringValue(v.VpcSecurityGroupId)) } + d.Set("vpc_security_group_ids", securityGroupIDs) + + tags, err := ListTags(conn, clusterARN) - tags, err := ListTags(conn, aws.StringValue(dbc.DBClusterArn)) if err != nil { - return fmt.Errorf("error listing tags for RDS Cluster (%s): %s", aws.StringValue(dbc.DBClusterArn), err) + return fmt.Errorf("listing tags for RDS Cluster (%s): %w", d.Id(), err) } + tags = tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig) //lintignore:AWSR002 if err := d.Set("tags", tags.RemoveDefaultConfig(defaultTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %w", err) + return fmt.Errorf("setting tags: %w", err) } if err := d.Set("tags_all", tags.Map()); err != nil { - return fmt.Errorf("error setting tags_all: %w", err) + return fmt.Errorf("setting tags_all: %w", err) } // Fetch and save Global Cluster if engine mode global d.Set("global_cluster_identifier", "") - if aws.StringValue(dbc.EngineMode) == "global" || aws.StringValue(dbc.EngineMode) == "provisioned" { - globalCluster, err := DescribeGlobalClusterFromClusterARN(conn, aws.StringValue(dbc.DBClusterArn)) + if aws.StringValue(dbc.EngineMode) == EngineModeGlobal || aws.StringValue(dbc.EngineMode) == EngineModeProvisioned { + globalCluster, err := FindGlobalClusterByDBClusterARN(conn, aws.StringValue(dbc.DBClusterArn)) - // Ignore the following API error for regions/partitions that do not support RDS Global Clusters: - // InvalidParameterValue: Access Denied to API Version: APIGlobalDatabases - if err != nil && !tfawserr.ErrMessageContains(err, "InvalidParameterValue", "Access Denied to API Version: APIGlobalDatabases") { - return fmt.Errorf("error reading RDS Global Cluster information for DB Cluster (%s): %s", d.Id(), err) - } - - if globalCluster != nil { + if err == nil { d.Set("global_cluster_identifier", globalCluster.GlobalClusterIdentifier) + } else if tfresource.NotFound(err) || tfawserr.ErrMessageContains(err, errCodeInvalidParameterValue, "Access Denied to API Version: APIGlobalDatabases") { + // Ignore the following API error for regions/partitions that do not support RDS Global Clusters: + // InvalidParameterValue: Access Denied to API Version: APIGlobalDatabases + } else { + return fmt.Errorf("reading RDS Global Cluster for RDS Cluster (%s): %w", d.Id(), err) } } diff --git a/internal/service/rds/find.go b/internal/service/rds/find.go index 832b87dbf69..ef176ccad11 100644 --- a/internal/service/rds/find.go +++ b/internal/service/rds/find.go @@ -375,3 +375,71 @@ func findDBInstanceAutomatedBackups(conn *rds.RDS, input *rds.DescribeDBInstance return output, nil } + +func FindGlobalClusterByDBClusterARN(conn *rds.RDS, dbClusterARN string) (*rds.GlobalCluster, error) { + input := &rds.DescribeGlobalClustersInput{} + globalClusters, err := findGlobalClusters(conn, input) + + if err != nil { + return nil, err + } + + for _, globalCluster := range globalClusters { + for _, v := range globalCluster.GlobalClusterMembers { + if aws.StringValue(v.DBClusterArn) == dbClusterARN { + return globalCluster, nil + } + } + } + + return nil, &resource.NotFoundError{LastRequest: dbClusterARN} +} + +func findGlobalCluster(conn *rds.RDS, input *rds.DescribeGlobalClustersInput) (*rds.GlobalCluster, error) { + output, err := findGlobalClusters(conn, input) + + if err != nil { + return nil, err + } + + if len(output) == 0 || output[0] == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + if count := len(output); count > 1 { + return nil, tfresource.NewTooManyResultsError(count, input) + } + + return output[0], nil +} + +func findGlobalClusters(conn *rds.RDS, input *rds.DescribeGlobalClustersInput) ([]*rds.GlobalCluster, error) { + var output []*rds.GlobalCluster + + err := conn.DescribeGlobalClustersPages(input, func(page *rds.DescribeGlobalClustersOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.GlobalClusters { + if v != nil { + output = append(output, v) + } + } + + return !lastPage + }) + + if tfawserr.ErrCodeEquals(err, rds.ErrCodeGlobalClusterNotFoundFault) { + return nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + return output, nil +} diff --git a/internal/service/rds/flex.go b/internal/service/rds/flex.go index 376509354e4..363fd4cd25d 100644 --- a/internal/service/rds/flex.go +++ b/internal/service/rds/flex.go @@ -30,20 +30,38 @@ func ExpandClusterScalingConfiguration(l []interface{}) *rds.ScalingConfiguratio return scalingConfiguration } -func flattenScalingConfigurationInfo(scalingConfigurationInfo *rds.ScalingConfigurationInfo) []interface{} { - if scalingConfigurationInfo == nil { - return []interface{}{} +func flattenScalingConfigurationInfo(apiObject *rds.ScalingConfigurationInfo) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.AutoPause; v != nil { + tfMap["auto_pause"] = aws.BoolValue(v) + } + + if v := apiObject.MaxCapacity; v != nil { + tfMap["max_capacity"] = aws.Int64Value(v) } - m := map[string]interface{}{ - "auto_pause": aws.BoolValue(scalingConfigurationInfo.AutoPause), - "max_capacity": aws.Int64Value(scalingConfigurationInfo.MaxCapacity), - "min_capacity": aws.Int64Value(scalingConfigurationInfo.MinCapacity), - "seconds_until_auto_pause": aws.Int64Value(scalingConfigurationInfo.SecondsUntilAutoPause), - "timeout_action": aws.StringValue(scalingConfigurationInfo.TimeoutAction), + if v := apiObject.MaxCapacity; v != nil { + tfMap["max_capacity"] = aws.Int64Value(v) } - return []interface{}{m} + if v := apiObject.MinCapacity; v != nil { + tfMap["min_capacity"] = aws.Int64Value(v) + } + + if v := apiObject.SecondsUntilAutoPause; v != nil { + tfMap["seconds_until_auto_pause"] = aws.Int64Value(v) + } + + if v := apiObject.TimeoutAction; v != nil { + tfMap["timeout_action"] = aws.StringValue(v) + } + + return tfMap } func expandServerlessV2ScalingConfiguration(tfMap map[string]interface{}) *rds.ServerlessV2ScalingConfiguration { From b9189c64168cd115386c9e37a2b3ba33b66102e1 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 6 Sep 2022 09:42:55 -0400 Subject: [PATCH 15/29] r/aws_rds_cluster: Add 'TestAccRDSCluster_disappears' (#13826). Acceptance test output: % make testacc TESTARGS='-run=TestAccRDSCluster_disappears' PKG=rds ACCTEST_PARALLELISM=2 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/rds/... -v -count 1 -parallel 2 -run=TestAccRDSCluster_disappears -timeout 180m === RUN TestAccRDSCluster_disappears === PAUSE TestAccRDSCluster_disappears === CONT TestAccRDSCluster_disappears --- PASS: TestAccRDSCluster_disappears (106.05s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/rds 110.200s --- internal/service/rds/cluster_test.go | 103 ++++++++++++++++----------- internal/service/rds/find.go | 21 ++++++ 2 files changed, 84 insertions(+), 40 deletions(-) diff --git a/internal/service/rds/cluster_test.go b/internal/service/rds/cluster_test.go index d7ac6701000..a9ddb42e51e 100644 --- a/internal/service/rds/cluster_test.go +++ b/internal/service/rds/cluster_test.go @@ -85,6 +85,69 @@ func TestAccRDSCluster_basic(t *testing.T) { }) } +func TestAccRDSCluster_disappears(t *testing.T) { + var dbCluster rds.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, rds.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &dbCluster), + acctest.CheckResourceDisappears(acctest.Provider, tfrds.ResourceCluster(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccRDSCluster_tags(t *testing.T) { + var dbCluster1, dbCluster2, dbCluster3 rds.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, rds.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_tags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &dbCluster1), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + Config: testAccClusterConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &dbCluster2), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccClusterConfig_tags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &dbCluster3), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + func TestAccRDSCluster_allowMajorVersionUpgrade(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -620,46 +683,6 @@ func TestAccRDSCluster_missingUserNameCausesError(t *testing.T) { }) } -func TestAccRDSCluster_tags(t *testing.T) { - var dbCluster1, dbCluster2, dbCluster3 rds.DBCluster - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_rds_cluster.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, rds.EndpointsID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccClusterConfig_tags1(rName, "key1", "value1"), - Check: resource.ComposeTestCheckFunc( - testAccCheckClusterExists(resourceName, &dbCluster1), - resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), - resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), - ), - }, - { - Config: testAccClusterConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), - Check: resource.ComposeTestCheckFunc( - testAccCheckClusterExists(resourceName, &dbCluster2), - resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), - resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), - resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), - ), - }, - { - Config: testAccClusterConfig_tags1(rName, "key2", "value2"), - Check: resource.ComposeTestCheckFunc( - testAccCheckClusterExists(resourceName, &dbCluster3), - resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), - resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), - ), - }, - }, - }) -} - func TestAccRDSCluster_EnabledCloudWatchLogsExports_mySQL(t *testing.T) { var dbCluster1, dbCluster2, dbCluster3 rds.DBCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) diff --git a/internal/service/rds/find.go b/internal/service/rds/find.go index ef176ccad11..316dc4ed471 100644 --- a/internal/service/rds/find.go +++ b/internal/service/rds/find.go @@ -395,6 +395,27 @@ func FindGlobalClusterByDBClusterARN(conn *rds.RDS, dbClusterARN string) (*rds.G return nil, &resource.NotFoundError{LastRequest: dbClusterARN} } +func FindGlobalClusterByID(conn *rds.RDS, id string) (*rds.GlobalCluster, error) { + input := &rds.DescribeGlobalClustersInput{ + GlobalClusterIdentifier: aws.String(id), + } + + output, err := findGlobalCluster(conn, input) + + if err != nil { + return nil, err + } + + // Eventual consistency check. + if aws.StringValue(output.GlobalClusterIdentifier) != id { + return nil, &resource.NotFoundError{ + LastRequest: input, + } + } + + return output, nil +} + func findGlobalCluster(conn *rds.RDS, input *rds.DescribeGlobalClustersInput) (*rds.GlobalCluster, error) { output, err := findGlobalClusters(conn, input) From 6caf6a541c10888e32672421cb760d0f6a42306b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 6 Sep 2022 10:06:29 -0400 Subject: [PATCH 16/29] r/aws_rds_cluster: 'WaitForClusterDeletion' -> 'waitDBClusterDeleted'. Acceptance test output: % make testacc TESTARGS='-run=TestAccRDSCluster_basic' PKG=rds ACCTEST_PARALLELISM=2 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/rds/... -v -count 1 -parallel 2 -run=TestAccRDSCluster_basic -timeout 180m === RUN TestAccRDSCluster_basic === PAUSE TestAccRDSCluster_basic === CONT TestAccRDSCluster_basic --- PASS: TestAccRDSCluster_basic (169.79s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/rds 176.258s --- internal/service/rds/cluster.go | 24 +----------------------- internal/service/rds/consts.go | 15 +++++++++++++++ internal/service/rds/status.go | 16 ++++++++++++++++ internal/service/rds/wait.go | 25 +++++++++++++++++++++++++ 4 files changed, 57 insertions(+), 23 deletions(-) diff --git a/internal/service/rds/cluster.go b/internal/service/rds/cluster.go index 1f12c802011..af41e9c5c7b 100644 --- a/internal/service/rds/cluster.go +++ b/internal/service/rds/cluster.go @@ -1433,7 +1433,7 @@ func resourceClusterDelete(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("deleting RDS Cluster (%s): %w", d.Id(), err) } - if err := WaitForClusterDeletion(conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + if _, err := waitDBClusterDeleted(conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { return fmt.Errorf("waiting for RDS Cluster (%s) delete: %w", d.Id(), err) } @@ -1510,13 +1510,6 @@ var resourceClusterCreatePendingStates = []string{ "rebooting", } -var resourceClusterDeletePendingStates = []string{ - "available", - "deleting", - "backing-up", - "modifying", -} - var resourceClusterUpdatePendingStates = []string{ "backing-up", "configuring-iam-database-auth", @@ -1540,21 +1533,6 @@ func waitForClusterUpdate(conn *rds.RDS, id string, timeout time.Duration) error return err } -func WaitForClusterDeletion(conn *rds.RDS, id string, timeout time.Duration) error { - stateConf := &resource.StateChangeConf{ - Pending: resourceClusterDeletePendingStates, - Target: []string{"destroyed"}, - Refresh: resourceClusterStateRefreshFunc(conn, id), - Timeout: timeout, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - _, err := stateConf.WaitForState() - - return err -} - func clusterSetResourceDataEngineVersionFromCluster(d *schema.ResourceData, c *rds.DBCluster) { oldVersion := d.Get("engine_version").(string) newVersion := aws.StringValue(c.EngineVersion) diff --git a/internal/service/rds/consts.go b/internal/service/rds/consts.go index 64e5b852fb8..4670d9a142c 100644 --- a/internal/service/rds/consts.go +++ b/internal/service/rds/consts.go @@ -8,6 +8,21 @@ const ( ClusterRoleStatusPending = "PENDING" ) +const ( + ClusterStatusAvailable = "available" + ClusterStatusBackingUp = "backing-up" + ClusterStatusConfiguringIAMDatabaseAuth = "configuring-iam-database-auth" + ClusterStatusCreating = "creating" + ClusterStatusDeleting = "deleting" + ClusterStatusMigrating = "migrating" + ClusterStatusModifying = "modifying" + ClusterStatusPreparingDataMigration = "preparing-data-migration" + ClusterStatusRebooting = "rebooting" + ClusterStatusRenaming = "renaming" + ClusterStatusResettingMasterCredentials = "resetting-master-credentials" + ClusterStatusUpgrading = "upgrading" +) + const ( storageTypeStandard = "standard" storageTypeGP2 = "gp2" diff --git a/internal/service/rds/status.go b/internal/service/rds/status.go index c5945c4be2e..898f919dbb9 100644 --- a/internal/service/rds/status.go +++ b/internal/service/rds/status.go @@ -50,6 +50,22 @@ func statusDBProxyEndpoint(conn *rds.RDS, id string) resource.StateRefreshFunc { } } +func statusDBCluster(conn *rds.RDS, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := FindDBClusterByID(conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.Status), nil + } +} + func statusDBClusterRole(conn *rds.RDS, dbClusterID, roleARN string) resource.StateRefreshFunc { return func() (interface{}, string, error) { output, err := FindDBClusterRoleByDBClusterIDAndRoleARN(conn, dbClusterID, roleARN) diff --git a/internal/service/rds/wait.go b/internal/service/rds/wait.go index ebc492b8114..72f300b5de8 100644 --- a/internal/service/rds/wait.go +++ b/internal/service/rds/wait.go @@ -116,6 +116,31 @@ func waitDBProxyEndpointDeleted(conn *rds.RDS, id string, timeout time.Duration) return nil, err } +func waitDBClusterDeleted(conn *rds.RDS, id string, timeout time.Duration) (*rds.DBCluster, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + ClusterStatusAvailable, + ClusterStatusBackingUp, + ClusterStatusDeleting, + ClusterStatusModifying, + }, + Target: []string{}, + Refresh: statusDBCluster(conn, id), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + ContinuousTargetOccurence: 3, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*rds.DBCluster); ok { + return output, err + } + + return nil, err +} + func waitDBClusterRoleAssociationCreated(conn *rds.RDS, dbClusterID, roleARN string) (*rds.DBClusterRole, error) { stateConf := &resource.StateChangeConf{ Pending: []string{ClusterRoleStatusPending}, From 386649a4f1c5176f8b67a1649848e2f10b3e0f21 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 6 Sep 2022 13:35:14 -0400 Subject: [PATCH 17/29] r/aws_rds_cluster: Tidy up resource Create. Acceptance test output: % make testacc TESTARGS='-run=TestAccRDSCluster_basic\|TestAccRDSCluster_s3Restore\|TestAccRDSCluster_pointInTimeRestore\|TestAccRDSCluster_updateIAMRoles\|TestAccRDSCluster_snapshotIdentifier' PKG=rds ACCTEST_PARALLELISM=2 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/rds/... -v -count 1 -parallel 2 -run=TestAccRDSCluster_basic\|TestAccRDSCluster_s3Restore\|TestAccRDSCluster_pointInTimeRestore\|TestAccRDSCluster_updateIAMRoles\|TestAccRDSCluster_snapshotIdentifier -timeout 180m === RUN TestAccRDSCluster_basic === PAUSE TestAccRDSCluster_basic === RUN TestAccRDSCluster_s3Restore === PAUSE TestAccRDSCluster_s3Restore === RUN TestAccRDSCluster_pointInTimeRestore === PAUSE TestAccRDSCluster_pointInTimeRestore === RUN TestAccRDSCluster_updateIAMRoles === PAUSE TestAccRDSCluster_updateIAMRoles === RUN TestAccRDSCluster_snapshotIdentifier === PAUSE TestAccRDSCluster_snapshotIdentifier === CONT TestAccRDSCluster_basic === CONT TestAccRDSCluster_updateIAMRoles --- PASS: TestAccRDSCluster_basic (161.33s) === CONT TestAccRDSCluster_pointInTimeRestore --- PASS: TestAccRDSCluster_updateIAMRoles (168.23s) === CONT TestAccRDSCluster_snapshotIdentifier --- PASS: TestAccRDSCluster_snapshotIdentifier (358.47s) === CONT TestAccRDSCluster_s3Restore --- PASS: TestAccRDSCluster_pointInTimeRestore (371.90s) === CONT TestAccRDSCluster_s3Restore cluster_test.go:546: Step 1/1 error: Error running apply: exit status 1 Error: waiting for RDS Cluster (tf-test-20220906170825745800000001) create: unexpected state 'migration-failed', wanted target 'available'. last error: %!s() with aws_rds_cluster.test, on terraform_plugin_test.tf line 74, in resource "aws_rds_cluster" "test": 74: resource "aws_rds_cluster" "test" { --- FAIL: TestAccRDSCluster_s3Restore (1558.92s) FAIL FAIL github.com/hashicorp/terraform-provider-aws/internal/service/rds 2089.773s FAIL make: *** [testacc] Error 1 --- internal/service/rds/cluster.go | 615 ++++++++++++-------------- internal/service/rds/flex.go | 31 +- internal/service/rds/flex_test.go | 51 --- internal/service/rds/instance_test.go | 8 +- internal/service/rds/wait.go | 64 ++- 5 files changed, 371 insertions(+), 398 deletions(-) diff --git a/internal/service/rds/cluster.go b/internal/service/rds/cluster.go index af41e9c5c7b..453e17c99ca 100644 --- a/internal/service/rds/cluster.go +++ b/internal/service/rds/cluster.go @@ -483,106 +483,103 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { identifier = resource.PrefixedUniqueId("tf-") } - if _, ok := d.GetOk("snapshot_identifier"); ok { - opts := rds.RestoreDBClusterFromSnapshotInput{ - CopyTagsToSnapshot: aws.Bool(d.Get("copy_tags_to_snapshot").(bool)), - DBClusterIdentifier: aws.String(identifier), - DeletionProtection: aws.Bool(d.Get("deletion_protection").(bool)), - Engine: aws.String(d.Get("engine").(string)), - EngineMode: aws.String(d.Get("engine_mode").(string)), - ScalingConfiguration: ExpandClusterScalingConfiguration(d.Get("scaling_configuration").([]interface{})), - SnapshotIdentifier: aws.String(d.Get("snapshot_identifier").(string)), - Tags: Tags(tags.IgnoreAWS()), + if v, ok := d.GetOk("snapshot_identifier"); ok { + input := &rds.RestoreDBClusterFromSnapshotInput{ + CopyTagsToSnapshot: aws.Bool(d.Get("copy_tags_to_snapshot").(bool)), + DBClusterIdentifier: aws.String(identifier), + DeletionProtection: aws.Bool(d.Get("deletion_protection").(bool)), + Engine: aws.String(d.Get("engine").(string)), + EngineMode: aws.String(d.Get("engine_mode").(string)), + SnapshotIdentifier: aws.String(v.(string)), + Tags: Tags(tags.IgnoreAWS()), } - if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { - opts.AvailabilityZones = flex.ExpandStringSet(attr) + if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 { + input.AvailabilityZones = flex.ExpandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("backtrack_window"); ok { - opts.BacktrackWindow = aws.Int64(int64(v.(int))) + input.BacktrackWindow = aws.Int64(int64(v.(int))) } - if attr, ok := d.GetOk("backup_retention_period"); ok { - modifyDbClusterInput.BackupRetentionPeriod = aws.Int64(int64(attr.(int))) + if v, ok := d.GetOk("backup_retention_period"); ok { + modifyDbClusterInput.BackupRetentionPeriod = aws.Int64(int64(v.(int))) requiresModifyDbCluster = true } - if attr, ok := d.GetOk("database_name"); ok { - opts.DatabaseName = aws.String(attr.(string)) + if v, ok := d.GetOk("database_name"); ok { + input.DatabaseName = aws.String(v.(string)) } - if attr, ok := d.GetOk("db_cluster_parameter_group_name"); ok { - opts.DBClusterParameterGroupName = aws.String(attr.(string)) + if v, ok := d.GetOk("db_cluster_parameter_group_name"); ok { + input.DBClusterParameterGroupName = aws.String(v.(string)) } - if attr, ok := d.GetOk("db_subnet_group_name"); ok { - opts.DBSubnetGroupName = aws.String(attr.(string)) + if v, ok := d.GetOk("db_subnet_group_name"); ok { + input.DBSubnetGroupName = aws.String(v.(string)) } - if attr, ok := d.GetOk("enabled_cloudwatch_logs_exports"); ok && attr.(*schema.Set).Len() > 0 { - opts.EnableCloudwatchLogsExports = flex.ExpandStringSet(attr.(*schema.Set)) + if v, ok := d.GetOk("enabled_cloudwatch_logs_exports"); ok && v.(*schema.Set).Len() > 0 { + input.EnableCloudwatchLogsExports = flex.ExpandStringSet(v.(*schema.Set)) } - if attr, ok := d.GetOk("engine_version"); ok { - opts.EngineVersion = aws.String(attr.(string)) + if v, ok := d.GetOk("engine_version"); ok { + input.EngineVersion = aws.String(v.(string)) } - if attr, ok := d.GetOk("kms_key_id"); ok { - opts.KmsKeyId = aws.String(attr.(string)) + if v, ok := d.GetOk("kms_key_id"); ok { + input.KmsKeyId = aws.String(v.(string)) } - if attr, ok := d.GetOk("master_password"); ok { - modifyDbClusterInput.MasterUserPassword = aws.String(attr.(string)) + if v, ok := d.GetOk("master_password"); ok { + modifyDbClusterInput.MasterUserPassword = aws.String(v.(string)) requiresModifyDbCluster = true } - if attr, ok := d.GetOk("network_type"); ok { - opts.NetworkType = aws.String(attr.(string)) + if v, ok := d.GetOk("network_type"); ok { + input.NetworkType = aws.String(v.(string)) } - if attr, ok := d.GetOk("option_group_name"); ok { - opts.OptionGroupName = aws.String(attr.(string)) + if v, ok := d.GetOk("option_group_name"); ok { + input.OptionGroupName = aws.String(v.(string)) } - if attr, ok := d.GetOk("port"); ok { - opts.Port = aws.Int64(int64(attr.(int))) + if v, ok := d.GetOk("port"); ok { + input.Port = aws.Int64(int64(v.(int))) } - if attr, ok := d.GetOk("preferred_backup_window"); ok { - modifyDbClusterInput.PreferredBackupWindow = aws.String(attr.(string)) + if v, ok := d.GetOk("preferred_backup_window"); ok { + modifyDbClusterInput.PreferredBackupWindow = aws.String(v.(string)) requiresModifyDbCluster = true } - if attr, ok := d.GetOk("preferred_maintenance_window"); ok { - modifyDbClusterInput.PreferredMaintenanceWindow = aws.String(attr.(string)) + if v, ok := d.GetOk("preferred_maintenance_window"); ok { + modifyDbClusterInput.PreferredMaintenanceWindow = aws.String(v.(string)) requiresModifyDbCluster = true } + if v, ok := d.GetOk("scaling_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.ScalingConfiguration = expandScalingConfiguration(v.([]interface{})[0].(map[string]interface{})) + } + if v, ok := d.GetOk("serverlessv2_scaling_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { modifyDbClusterInput.ServerlessV2ScalingConfiguration = expandServerlessV2ScalingConfiguration(v.([]interface{})[0].(map[string]interface{})) + requiresModifyDbCluster = true } - if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - opts.VpcSecurityGroupIds = flex.ExpandStringSet(attr) + if v, ok := d.GetOk("vpc_security_group_ids"); ok && v.(*schema.Set).Len() > 0 { + input.VpcSecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) } - log.Printf("[DEBUG] RDS Cluster restore from snapshot configuration: %s", opts) - err := resource.Retry(propagationTimeout, func() *resource.RetryError { - _, err := conn.RestoreDBClusterFromSnapshot(&opts) - if err != nil { - if tfawserr.ErrMessageContains(err, "InvalidParameterValue", "IAM role ARN value is invalid or does not include the required permissions") { - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - } - return nil - }) - if tfresource.TimedOut(err) { - _, err = conn.RestoreDBClusterFromSnapshot(&opts) - } + log.Printf("[DEBUG] Creating RDS Cluster: %s", input) + _, err := tfresource.RetryWhenAWSErrMessageContains(propagationTimeout, + func() (interface{}, error) { + return conn.RestoreDBClusterFromSnapshot(input) + }, + errCodeInvalidParameterValue, "IAM role ARN value is invalid or does not include the required permissions") + if err != nil { - return fmt.Errorf("Error creating RDS Cluster: %s", err) + return fmt.Errorf("creating RDS Cluster (restore from snapshot) (%s): %w", identifier, err) } } else if v, ok := d.GetOk("s3_import"); ok { if _, ok := d.GetOk("master_password"); !ok { @@ -591,399 +588,367 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { if _, ok := d.GetOk("master_username"); !ok { return fmt.Errorf(`provider.aws: aws_db_instance: %s: "master_username": required field is not set`, d.Get("name").(string)) } - s3_bucket := v.([]interface{})[0].(map[string]interface{}) - createOpts := &rds.RestoreDBClusterFromS3Input{ + + tfMap := v.([]interface{})[0].(map[string]interface{}) + input := &rds.RestoreDBClusterFromS3Input{ CopyTagsToSnapshot: aws.Bool(d.Get("copy_tags_to_snapshot").(bool)), DBClusterIdentifier: aws.String(identifier), DeletionProtection: aws.Bool(d.Get("deletion_protection").(bool)), Engine: aws.String(d.Get("engine").(string)), MasterUsername: aws.String(d.Get("master_username").(string)), MasterUserPassword: aws.String(d.Get("master_password").(string)), - S3BucketName: aws.String(s3_bucket["bucket_name"].(string)), - S3IngestionRoleArn: aws.String(s3_bucket["ingestion_role"].(string)), - S3Prefix: aws.String(s3_bucket["bucket_prefix"].(string)), - SourceEngine: aws.String(s3_bucket["source_engine"].(string)), - SourceEngineVersion: aws.String(s3_bucket["source_engine_version"].(string)), + S3BucketName: aws.String(tfMap["bucket_name"].(string)), + S3IngestionRoleArn: aws.String(tfMap["ingestion_role"].(string)), + S3Prefix: aws.String(tfMap["bucket_prefix"].(string)), + SourceEngine: aws.String(tfMap["source_engine"].(string)), + SourceEngineVersion: aws.String(tfMap["source_engine_version"].(string)), Tags: Tags(tags.IgnoreAWS()), } - if v, ok := d.GetOk("backtrack_window"); ok { - createOpts.BacktrackWindow = aws.Int64(int64(v.(int))) + if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 { + input.AvailabilityZones = flex.ExpandStringSet(v.(*schema.Set)) } - if v := d.Get("database_name"); v.(string) != "" { - createOpts.DatabaseName = aws.String(v.(string)) + if v, ok := d.GetOk("backtrack_window"); ok { + input.BacktrackWindow = aws.Int64(int64(v.(int))) } - if attr, ok := d.GetOk("port"); ok { - createOpts.Port = aws.Int64(int64(attr.(int))) + if v, ok := d.GetOk("backup_retention_period"); ok { + input.BackupRetentionPeriod = aws.Int64(int64(v.(int))) } - if attr, ok := d.GetOk("db_subnet_group_name"); ok { - createOpts.DBSubnetGroupName = aws.String(attr.(string)) + if v := d.Get("database_name"); v.(string) != "" { + input.DatabaseName = aws.String(v.(string)) } - if attr, ok := d.GetOk("db_cluster_parameter_group_name"); ok { - createOpts.DBClusterParameterGroupName = aws.String(attr.(string)) + if v, ok := d.GetOk("db_cluster_parameter_group_name"); ok { + input.DBClusterParameterGroupName = aws.String(v.(string)) } - if attr, ok := d.GetOk("engine_version"); ok { - createOpts.EngineVersion = aws.String(attr.(string)) + if v, ok := d.GetOk("db_subnet_group_name"); ok { + input.DBSubnetGroupName = aws.String(v.(string)) } - if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - createOpts.VpcSecurityGroupIds = flex.ExpandStringSet(attr) + if v, ok := d.GetOk("enabled_cloudwatch_logs_exports"); ok && v.(*schema.Set).Len() > 0 { + input.EnableCloudwatchLogsExports = flex.ExpandStringSet(v.(*schema.Set)) } - if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { - createOpts.AvailabilityZones = flex.ExpandStringSet(attr) + if v, ok := d.GetOk("engine_version"); ok { + input.EngineVersion = aws.String(v.(string)) } - if v, ok := d.GetOk("backup_retention_period"); ok { - createOpts.BackupRetentionPeriod = aws.Int64(int64(v.(int))) + if v, ok := d.GetOk("iam_database_authentication_enabled"); ok { + input.EnableIAMDatabaseAuthentication = aws.Bool(v.(bool)) } - if v, ok := d.GetOk("preferred_backup_window"); ok { - createOpts.PreferredBackupWindow = aws.String(v.(string)) + if v, ok := d.GetOk("kms_key_id"); ok { + input.KmsKeyId = aws.String(v.(string)) } - if v, ok := d.GetOk("preferred_maintenance_window"); ok { - createOpts.PreferredMaintenanceWindow = aws.String(v.(string)) + if v, ok := d.GetOk("network_type"); ok { + input.NetworkType = aws.String(v.(string)) } - if attr, ok := d.GetOk("kms_key_id"); ok { - createOpts.KmsKeyId = aws.String(attr.(string)) + if v, ok := d.GetOk("port"); ok { + input.Port = aws.Int64(int64(v.(int))) } - if attr, ok := d.GetOk("network_type"); ok { - createOpts.NetworkType = aws.String(attr.(string)) + if v, ok := d.GetOk("preferred_backup_window"); ok { + input.PreferredBackupWindow = aws.String(v.(string)) } - if attr, ok := d.GetOk("iam_database_authentication_enabled"); ok { - createOpts.EnableIAMDatabaseAuthentication = aws.Bool(attr.(bool)) + if v, ok := d.GetOk("preferred_maintenance_window"); ok { + input.PreferredMaintenanceWindow = aws.String(v.(string)) } - if attr, ok := d.GetOk("enabled_cloudwatch_logs_exports"); ok && attr.(*schema.Set).Len() > 0 { - createOpts.EnableCloudwatchLogsExports = flex.ExpandStringSet(attr.(*schema.Set)) + if v, ok := d.GetOkExists("storage_encrypted"); ok { + input.StorageEncrypted = aws.Bool(v.(bool)) } - if attr, ok := d.GetOkExists("storage_encrypted"); ok { - createOpts.StorageEncrypted = aws.Bool(attr.(bool)) + if v, ok := d.GetOk("vpc_security_group_ids"); ok && v.(*schema.Set).Len() > 0 { + input.VpcSecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) } - log.Printf("[DEBUG] RDS Cluster restore options: %s", createOpts) - // Retry for IAM/S3 eventual consistency - var resp *rds.RestoreDBClusterFromS3Output - err := resource.Retry(5*time.Minute, func() *resource.RetryError { - var err error - resp, err = conn.RestoreDBClusterFromS3(createOpts) - if err != nil { + log.Printf("[DEBUG] Creating RDS Cluster: %s", input) + _, err := tfresource.RetryWhen(propagationTimeout, + func() (interface{}, error) { + return conn.RestoreDBClusterFromS3(input) + }, + func(err error) (bool, error) { // InvalidParameterValue: Files from the specified Amazon S3 bucket cannot be downloaded. // Make sure that you have created an AWS Identity and Access Management (IAM) role that lets Amazon RDS access Amazon S3 for you. - if tfawserr.ErrMessageContains(err, "InvalidParameterValue", "Files from the specified Amazon S3 bucket cannot be downloaded") { - return resource.RetryableError(err) + if tfawserr.ErrMessageContains(err, errCodeInvalidParameterValue, "Files from the specified Amazon S3 bucket cannot be downloaded") { + return true, err } - if tfawserr.ErrMessageContains(err, "InvalidParameterValue", "S3_SNAPSHOT_INGESTION") { - return resource.RetryableError(err) + + if tfawserr.ErrMessageContains(err, errCodeInvalidParameterValue, "S3_SNAPSHOT_INGESTION") { + return true, err } - if tfawserr.ErrMessageContains(err, "InvalidParameterValue", "S3 bucket cannot be found") { - return resource.RetryableError(err) + + if tfawserr.ErrMessageContains(err, errCodeInvalidParameterValue, "S3 bucket cannot be found") { + return true, err } - return resource.NonRetryableError(err) - } - log.Printf("[DEBUG]: RDS Cluster create response: %s", resp) - return nil - }) - if tfresource.TimedOut(err) { - resp, err = conn.RestoreDBClusterFromS3(createOpts) - } + + return false, err + }, + ) if err != nil { - log.Printf("[ERROR] Error creating RDS Cluster: %s", err) - return err + return fmt.Errorf("creating RDS Cluster (restore from S3) (%s): %w", identifier, err) } - } else if v, ok := d.GetOk("restore_to_point_in_time"); ok { - pointInTime := v.([]interface{})[0].(map[string]interface{}) - createOpts := &rds.RestoreDBClusterToPointInTimeInput{ + tfMap := v.([]interface{})[0].(map[string]interface{}) + input := &rds.RestoreDBClusterToPointInTimeInput{ DBClusterIdentifier: aws.String(identifier), DeletionProtection: aws.Bool(d.Get("deletion_protection").(bool)), - SourceDBClusterIdentifier: aws.String(pointInTime["source_cluster_identifier"].(string)), + SourceDBClusterIdentifier: aws.String(tfMap["source_cluster_identifier"].(string)), Tags: Tags(tags.IgnoreAWS()), } - if v, ok := pointInTime["restore_to_time"].(string); ok && v != "" { - restoreToTime, _ := time.Parse(time.RFC3339, v) - createOpts.RestoreToTime = aws.Time(restoreToTime) + if v, ok := tfMap["restore_to_time"].(string); ok && v != "" { + v, _ := time.Parse(time.RFC3339, v) + input.RestoreToTime = aws.Time(v) } - if v, ok := pointInTime["use_latest_restorable_time"].(bool); ok && v { - createOpts.UseLatestRestorableTime = aws.Bool(v) + if v, ok := tfMap["use_latest_restorable_time"].(bool); ok && v { + input.UseLatestRestorableTime = aws.Bool(v) } - if createOpts.RestoreToTime == nil && createOpts.UseLatestRestorableTime == nil { + if input.RestoreToTime == nil && input.UseLatestRestorableTime == nil { return fmt.Errorf(`provider.aws: aws_rds_cluster: %s: Either "restore_to_time" or "use_latest_restorable_time" must be set`, d.Get("database_name").(string)) } - if attr, ok := pointInTime["restore_type"].(string); ok { - createOpts.RestoreType = aws.String(attr) + if v, ok := d.GetOk("backtrack_window"); ok { + input.BacktrackWindow = aws.Int64(int64(v.(int))) } - if v, ok := d.GetOk("backtrack_window"); ok { - createOpts.BacktrackWindow = aws.Int64(int64(v.(int))) + if v, ok := d.GetOk("backup_retention_period"); ok { + modifyDbClusterInput.BackupRetentionPeriod = aws.Int64(int64(v.(int))) + requiresModifyDbCluster = true } - if attr, ok := d.GetOk("db_subnet_group_name"); ok { - createOpts.DBSubnetGroupName = aws.String(attr.(string)) + if v, ok := d.GetOk("db_cluster_parameter_group_name"); ok { + input.DBClusterParameterGroupName = aws.String(v.(string)) } - if attr, ok := d.GetOk("port"); ok { - createOpts.Port = aws.Int64(int64(attr.(int))) + if v, ok := d.GetOk("db_subnet_group_name"); ok { + input.DBSubnetGroupName = aws.String(v.(string)) } - if attr, ok := d.GetOk("option_group_name"); ok { - createOpts.OptionGroupName = aws.String(attr.(string)) + if v, ok := d.GetOk("enabled_cloudwatch_logs_exports"); ok && v.(*schema.Set).Len() > 0 { + input.EnableCloudwatchLogsExports = flex.ExpandStringSet(v.(*schema.Set)) } - if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - createOpts.VpcSecurityGroupIds = flex.ExpandStringSet(attr) + if v, ok := d.GetOk("iam_database_authentication_enabled"); ok { + input.EnableIAMDatabaseAuthentication = aws.Bool(v.(bool)) } - if attr, ok := d.GetOk("kms_key_id"); ok { - createOpts.KmsKeyId = aws.String(attr.(string)) + if v, ok := d.GetOk("kms_key_id"); ok { + input.KmsKeyId = aws.String(v.(string)) } - if attr, ok := d.GetOk("network_type"); ok { - createOpts.NetworkType = aws.String(attr.(string)) + if v, ok := d.GetOk("master_password"); ok { + modifyDbClusterInput.MasterUserPassword = aws.String(v.(string)) + requiresModifyDbCluster = true } - if attr, ok := d.GetOk("enabled_cloudwatch_logs_exports"); ok && attr.(*schema.Set).Len() > 0 { - createOpts.EnableCloudwatchLogsExports = flex.ExpandStringSet(attr.(*schema.Set)) + if v, ok := d.GetOk("network_type"); ok { + input.NetworkType = aws.String(v.(string)) } - if attr, ok := d.GetOk("iam_database_authentication_enabled"); ok { - createOpts.EnableIAMDatabaseAuthentication = aws.Bool(attr.(bool)) + if v, ok := d.GetOk("option_group_name"); ok { + input.OptionGroupName = aws.String(v.(string)) } - if attr, ok := d.GetOk("db_cluster_parameter_group_name"); ok { - createOpts.DBClusterParameterGroupName = aws.String(attr.(string)) + if v, ok := d.GetOk("port"); ok { + input.Port = aws.Int64(int64(v.(int))) } - requireUpdateAttrs := []string{ - "master_password", - "backup_retention_period", - "preferred_backup_window", - "preferred_maintenance_window", - "scaling_configuration", + if v, ok := d.GetOk("preferred_backup_window"); ok { + modifyDbClusterInput.PreferredBackupWindow = aws.String(v.(string)) + requiresModifyDbCluster = true } - for _, attr := range requireUpdateAttrs { - if val, ok := d.GetOk(attr); ok { - requiresModifyDbCluster = true - switch attr { - case "master_password": - modifyDbClusterInput.MasterUserPassword = aws.String(val.(string)) - case "backup_retention_period": - modifyDbClusterInput.BackupRetentionPeriod = aws.Int64(int64(val.(int))) - case "preferred_backup_window": - modifyDbClusterInput.PreferredBackupWindow = aws.String(val.(string)) - case "preferred_maintenance_window": - modifyDbClusterInput.PreferredMaintenanceWindow = aws.String(val.(string)) - case "scaling_configuration": - modifyDbClusterInput.ScalingConfiguration = ExpandClusterScalingConfiguration(d.Get("scaling_configuration").([]interface{})) - case "serverlessv2_scaling_configuration": - if len(val.([]interface{})) > 0 && val.([]interface{})[0] != nil { - modifyDbClusterInput.ServerlessV2ScalingConfiguration = expandServerlessV2ScalingConfiguration(v.([]interface{})[0].(map[string]interface{})) - } - } - } + if v, ok := d.GetOk("preferred_maintenance_window"); ok { + modifyDbClusterInput.PreferredMaintenanceWindow = aws.String(v.(string)) + requiresModifyDbCluster = true } - log.Printf("[DEBUG] RDS Cluster restore options: %s", createOpts) + if v, ok := tfMap["restore_type"].(string); ok { + input.RestoreType = aws.String(v) + } - resp, err := conn.RestoreDBClusterToPointInTime(createOpts) - if err != nil { - log.Printf("[ERROR] Error restoring RDS Cluster: %s", err) - return err + if v, ok := d.GetOk("scaling_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + modifyDbClusterInput.ScalingConfiguration = expandScalingConfiguration(v.([]interface{})[0].(map[string]interface{})) + requiresModifyDbCluster = true } - log.Printf("[DEBUG]: RDS Cluster restore response: %s", resp) - } else { + if v, ok := d.GetOk("serverlessv2_scaling_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + modifyDbClusterInput.ServerlessV2ScalingConfiguration = expandServerlessV2ScalingConfiguration(v.([]interface{})[0].(map[string]interface{})) + requiresModifyDbCluster = true + } - createOpts := &rds.CreateDBClusterInput{ - CopyTagsToSnapshot: aws.Bool(d.Get("copy_tags_to_snapshot").(bool)), - DBClusterIdentifier: aws.String(identifier), - DeletionProtection: aws.Bool(d.Get("deletion_protection").(bool)), - Engine: aws.String(d.Get("engine").(string)), - EngineMode: aws.String(d.Get("engine_mode").(string)), - ScalingConfiguration: ExpandClusterScalingConfiguration(d.Get("scaling_configuration").([]interface{})), - Tags: Tags(tags.IgnoreAWS()), + if v, ok := d.GetOk("vpc_security_group_ids"); ok && v.(*schema.Set).Len() > 0 { + input.VpcSecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) } - // Note: Username and password credentials are required and valid - // unless the cluster is a read-replica. This also applies to clusters - // within a global cluster. Providing a password and/or username for - // a replica will result in an InvalidParameterValue error. - if v, ok := d.GetOk("master_password"); ok { - createOpts.MasterUserPassword = aws.String(v.(string)) + log.Printf("[DEBUG] Creating RDS Cluster: %s", input) + _, err := conn.RestoreDBClusterToPointInTime(input) + + if err != nil { + return fmt.Errorf("creating RDS Cluster (restore to point-in-time) (%s): %w", identifier, err) + } + } else { + input := &rds.CreateDBClusterInput{ + CopyTagsToSnapshot: aws.Bool(d.Get("copy_tags_to_snapshot").(bool)), + DBClusterIdentifier: aws.String(identifier), + DeletionProtection: aws.Bool(d.Get("deletion_protection").(bool)), + Engine: aws.String(d.Get("engine").(string)), + EngineMode: aws.String(d.Get("engine_mode").(string)), + Tags: Tags(tags.IgnoreAWS()), } - if v, ok := d.GetOk("master_username"); ok { - createOpts.MasterUsername = aws.String(v.(string)) + if v, ok := d.GetOkExists("allocated_storage"); ok { + input.AllocatedStorage = aws.Int64(int64(v.(int))) } - if v, ok := d.GetOk("enable_http_endpoint"); ok { - createOpts.EnableHttpEndpoint = aws.Bool(v.(bool)) + if v, ok := d.GetOk("availability_zones"); ok && v.(*schema.Set).Len() > 0 { + input.AvailabilityZones = flex.ExpandStringSet(v.(*schema.Set)) } if v, ok := d.GetOk("backtrack_window"); ok { - createOpts.BacktrackWindow = aws.Int64(int64(v.(int))) + input.BacktrackWindow = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("backup_retention_period"); ok { + input.BackupRetentionPeriod = aws.Int64(int64(v.(int))) } if v := d.Get("database_name"); v.(string) != "" { - createOpts.DatabaseName = aws.String(v.(string)) + input.DatabaseName = aws.String(v.(string)) } - if attr, ok := d.GetOk("port"); ok { - createOpts.Port = aws.Int64(int64(attr.(int))) + if v, ok := d.GetOk("db_cluster_instance_class"); ok { + input.DBClusterInstanceClass = aws.String(v.(string)) } - if attr, ok := d.GetOk("db_subnet_group_name"); ok { - createOpts.DBSubnetGroupName = aws.String(attr.(string)) + if v, ok := d.GetOk("db_cluster_parameter_group_name"); ok { + input.DBClusterParameterGroupName = aws.String(v.(string)) } - if attr, ok := d.GetOk("db_cluster_parameter_group_name"); ok { - createOpts.DBClusterParameterGroupName = aws.String(attr.(string)) + if v, ok := d.GetOk("db_subnet_group_name"); ok { + input.DBSubnetGroupName = aws.String(v.(string)) } - if attr, ok := d.GetOk("db_cluster_instance_class"); ok { - createOpts.DBClusterInstanceClass = aws.String(attr.(string)) + if v, ok := d.GetOk("enable_global_write_forwarding"); ok { + input.EnableGlobalWriteForwarding = aws.Bool(v.(bool)) } - if attr, ok := d.GetOk("engine_version"); ok { - createOpts.EngineVersion = aws.String(attr.(string)) + if v, ok := d.GetOk("enable_http_endpoint"); ok { + input.EnableHttpEndpoint = aws.Bool(v.(bool)) } - if attr, ok := d.GetOk("global_cluster_identifier"); ok { - createOpts.GlobalClusterIdentifier = aws.String(attr.(string)) + if v, ok := d.GetOk("enabled_cloudwatch_logs_exports"); ok && v.(*schema.Set).Len() > 0 { + input.EnableCloudwatchLogsExports = flex.ExpandStringSet(v.(*schema.Set)) } - if attr, ok := d.GetOk("enable_global_write_forwarding"); ok { - createOpts.EnableGlobalWriteForwarding = aws.Bool(attr.(bool)) + if v, ok := d.GetOk("engine_version"); ok { + input.EngineVersion = aws.String(v.(string)) } - if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - createOpts.VpcSecurityGroupIds = flex.ExpandStringSet(attr) + if v, ok := d.GetOk("global_cluster_identifier"); ok { + input.GlobalClusterIdentifier = aws.String(v.(string)) } - if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { - createOpts.AvailabilityZones = flex.ExpandStringSet(attr) + if v, ok := d.GetOk("iam_database_authentication_enabled"); ok { + input.EnableIAMDatabaseAuthentication = aws.Bool(v.(bool)) } - if v, ok := d.GetOk("backup_retention_period"); ok { - createOpts.BackupRetentionPeriod = aws.Int64(int64(v.(int))) + if v, ok := d.GetOkExists("iops"); ok { + input.Iops = aws.Int64(int64(v.(int))) } - if v, ok := d.GetOk("preferred_backup_window"); ok { - createOpts.PreferredBackupWindow = aws.String(v.(string)) + if v, ok := d.GetOk("kms_key_id"); ok { + input.KmsKeyId = aws.String(v.(string)) } - if v, ok := d.GetOk("preferred_maintenance_window"); ok { - createOpts.PreferredMaintenanceWindow = aws.String(v.(string)) + // Note: Username and password credentials are required and valid + // unless the cluster is a read-replica. This also applies to clusters + // within a global cluster. Providing a password and/or username for + // a replica will result in an InvalidParameterValue error. + if v, ok := d.GetOk("master_password"); ok { + input.MasterUserPassword = aws.String(v.(string)) } - if attr, ok := d.GetOk("network_type"); ok { - createOpts.NetworkType = aws.String(attr.(string)) + if v, ok := d.GetOk("master_username"); ok { + input.MasterUsername = aws.String(v.(string)) } - if attr, ok := d.GetOk("kms_key_id"); ok { - createOpts.KmsKeyId = aws.String(attr.(string)) + if v, ok := d.GetOk("network_type"); ok { + input.NetworkType = aws.String(v.(string)) } - if attr, ok := d.GetOk("source_region"); ok { - createOpts.SourceRegion = aws.String(attr.(string)) + if v, ok := d.GetOk("port"); ok { + input.Port = aws.Int64(int64(v.(int))) } - if attr, ok := d.GetOk("iam_database_authentication_enabled"); ok { - createOpts.EnableIAMDatabaseAuthentication = aws.Bool(attr.(bool)) + if v, ok := d.GetOk("preferred_backup_window"); ok { + input.PreferredBackupWindow = aws.String(v.(string)) } - if attr, ok := d.GetOk("enabled_cloudwatch_logs_exports"); ok && attr.(*schema.Set).Len() > 0 { - createOpts.EnableCloudwatchLogsExports = flex.ExpandStringSet(attr.(*schema.Set)) + if v, ok := d.GetOk("preferred_maintenance_window"); ok { + input.PreferredMaintenanceWindow = aws.String(v.(string)) } - if attr, ok := d.GetOk("replication_source_identifier"); ok && createOpts.GlobalClusterIdentifier == nil { - createOpts.ReplicationSourceIdentifier = aws.String(attr.(string)) + if v, ok := d.GetOk("replication_source_identifier"); ok && input.GlobalClusterIdentifier == nil { + input.ReplicationSourceIdentifier = aws.String(v.(string)) } - if attr, ok := d.GetOkExists("allocated_storage"); ok { - createOpts.AllocatedStorage = aws.Int64(int64(attr.(int))) + if v, ok := d.GetOk("scaling_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.ScalingConfiguration = expandScalingConfiguration(v.([]interface{})[0].(map[string]interface{})) } - if attr, ok := d.GetOkExists("storage_type"); ok { - createOpts.StorageType = aws.String(attr.(string)) + if v, ok := d.GetOk("serverlessv2_scaling_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.ServerlessV2ScalingConfiguration = expandServerlessV2ScalingConfiguration(v.([]interface{})[0].(map[string]interface{})) } - if attr, ok := d.GetOkExists("iops"); ok { - createOpts.Iops = aws.Int64(int64(attr.(int))) + if v, ok := d.GetOk("source_region"); ok { + input.SourceRegion = aws.String(v.(string)) } - if v, ok := d.GetOk("serverlessv2_scaling_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - createOpts.ServerlessV2ScalingConfiguration = expandServerlessV2ScalingConfiguration(v.([]interface{})[0].(map[string]interface{})) + if v, ok := d.GetOkExists("storage_encrypted"); ok { + input.StorageEncrypted = aws.Bool(v.(bool)) } - if attr, ok := d.GetOkExists("storage_encrypted"); ok { - createOpts.StorageEncrypted = aws.Bool(attr.(bool)) + if v, ok := d.GetOkExists("storage_type"); ok { + input.StorageType = aws.String(v.(string)) } - log.Printf("[DEBUG] RDS Cluster create options: %s", createOpts) - var resp *rds.CreateDBClusterOutput - err := resource.Retry(propagationTimeout, func() *resource.RetryError { - var err error - resp, err = conn.CreateDBCluster(createOpts) - if err != nil { - if tfawserr.ErrMessageContains(err, "InvalidParameterValue", "IAM role ARN value is invalid or does not include the required permissions") { - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - } - return nil - }) - if tfresource.TimedOut(err) { - resp, err = conn.CreateDBCluster(createOpts) + if v, ok := d.GetOk("vpc_security_group_ids"); ok && v.(*schema.Set).Len() > 0 { + input.VpcSecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) } + + log.Printf("[DEBUG] Creating RDS Cluster: %s", input) + _, err := tfresource.RetryWhenAWSErrMessageContains(propagationTimeout, + func() (interface{}, error) { + return conn.CreateDBCluster(input) + }, + errCodeInvalidParameterValue, "IAM role ARN value is invalid or does not include the required permissions") + if err != nil { - return fmt.Errorf("error creating RDS cluster: %s", err) + return fmt.Errorf("creating RDS Cluster (%s): %w", identifier, err) } - - log.Printf("[DEBUG]: RDS Cluster create response: %s", resp) } d.SetId(identifier) - log.Printf("[INFO] RDS Cluster ID: %s", d.Id()) - - log.Println("[INFO] Waiting for RDS Cluster to be available") - - stateConf := &resource.StateChangeConf{ - Pending: resourceClusterCreatePendingStates, - Target: []string{"available"}, - Refresh: resourceClusterStateRefreshFunc(conn, d.Id()), - Timeout: d.Timeout(schema.TimeoutCreate), - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - } - - // Wait, catching any errors - _, err := stateConf.WaitForState() - if err != nil { - return fmt.Errorf("Error waiting for RDS Cluster state to be \"available\": %s", err) + if _, err := waitDBClusterCreated(conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return fmt.Errorf("waiting for RDS Cluster (%s) create: %w", d.Id(), err) } - if v, ok := d.GetOk("iam_roles"); ok { - for _, role := range v.(*schema.Set).List() { - err := setIAMRoleToCluster(d.Id(), role.(string), conn) - if err != nil { + if v, ok := d.GetOk("iam_roles"); ok && v.(*schema.Set).Len() > 0 { + for _, v := range v.(*schema.Set).List() { + if err := addIAMRoleToCluster(conn, d.Id(), v.(string)); err != nil { return err } } @@ -992,16 +957,15 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { if requiresModifyDbCluster { modifyDbClusterInput.DBClusterIdentifier = aws.String(d.Id()) - log.Printf("[INFO] RDS Cluster (%s) configuration requires ModifyDBCluster: %s", d.Id(), modifyDbClusterInput) + log.Printf("[INFO] Modifying RDS Cluster: %s", modifyDbClusterInput) _, err := conn.ModifyDBCluster(modifyDbClusterInput) + if err != nil { - return fmt.Errorf("error modifying RDS Cluster (%s): %s", d.Id(), err) + return fmt.Errorf("updating RDS Cluster (%s): %w", d.Id(), err) } - log.Printf("[INFO] Waiting for RDS Cluster (%s) to be available", d.Id()) - err = waitForClusterUpdate(conn, d.Id(), d.Timeout(schema.TimeoutCreate)) - if err != nil { - return fmt.Errorf("error waiting for RDS Cluster (%s) to be available: %s", d.Id(), err) + if _, err := waitDBClusterUpdated(conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return fmt.Errorf("waiting for RDS Cluster (%s) update: %w", d.Id(), err) } } @@ -1253,7 +1217,9 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { } if d.HasChange("scaling_configuration") { - req.ScalingConfiguration = ExpandClusterScalingConfiguration(d.Get("scaling_configuration").([]interface{})) + if v, ok := d.GetOk("scaling_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + req.ScalingConfiguration = expandScalingConfiguration(v.([]interface{})[0].(map[string]interface{})) + } requestUpdate = true } @@ -1341,22 +1307,17 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { if nraw == nil { nraw = new(schema.Set) } - os := oraw.(*schema.Set) ns := nraw.(*schema.Set) - removeRoles := os.Difference(ns) - enableRoles := ns.Difference(os) - for _, role := range enableRoles.List() { - err := setIAMRoleToCluster(d.Id(), role.(string), conn) - if err != nil { + for _, v := range ns.Difference(os).List() { + if err := addIAMRoleToCluster(conn, d.Id(), v.(string)); err != nil { return err } } - for _, role := range removeRoles.List() { - err := removeIAMRoleFromCluster(d.Id(), role.(string), conn) - if err != nil { + for _, v := range os.Difference(ns).List() { + if err := removeIAMRoleFromCluster(conn, d.Id(), v.(string)); err != nil { return err } } @@ -1366,7 +1327,7 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { o, n := d.GetChange("tags_all") if err := UpdateTags(conn, d.Get("arn").(string), o, n); err != nil { - return fmt.Errorf("error updating tags: %s", err) + return fmt.Errorf("updating RDS Cluster (%s) tags: %w", d.Get("arn").(string), err) } } @@ -1482,32 +1443,34 @@ func resourceClusterStateRefreshFunc(conn *rds.RDS, dbClusterIdentifier string) } } -func setIAMRoleToCluster(clusterIdentifier string, roleArn string, conn *rds.RDS) error { - params := &rds.AddRoleToDBClusterInput{ - DBClusterIdentifier: aws.String(clusterIdentifier), - RoleArn: aws.String(roleArn), +func addIAMRoleToCluster(conn *rds.RDS, clusterID, roleARN string) error { + input := &rds.AddRoleToDBClusterInput{ + DBClusterIdentifier: aws.String(clusterID), + RoleArn: aws.String(roleARN), } - _, err := conn.AddRoleToDBCluster(params) - return err -} -func removeIAMRoleFromCluster(clusterIdentifier string, roleArn string, conn *rds.RDS) error { - params := &rds.RemoveRoleFromDBClusterInput{ - DBClusterIdentifier: aws.String(clusterIdentifier), - RoleArn: aws.String(roleArn), + _, err := conn.AddRoleToDBCluster(input) + + if err != nil { + return fmt.Errorf("adding IAM Role (%s) to RDS Cluster (%s): %w", roleARN, clusterID, err) } - _, err := conn.RemoveRoleFromDBCluster(params) - return err + + return nil } -var resourceClusterCreatePendingStates = []string{ - "creating", - "backing-up", - "modifying", - "preparing-data-migration", - "migrating", - "resetting-master-credentials", - "rebooting", +func removeIAMRoleFromCluster(conn *rds.RDS, clusterID, roleARN string) error { + input := &rds.RemoveRoleFromDBClusterInput{ + DBClusterIdentifier: aws.String(clusterID), + RoleArn: aws.String(roleARN), + } + + _, err := conn.RemoveRoleFromDBCluster(input) + + if err != nil { + return fmt.Errorf("removing IAM Role (%s) from RDS Cluster (%s): %w", roleARN, clusterID, err) + } + + return err } var resourceClusterUpdatePendingStates = []string{ diff --git a/internal/service/rds/flex.go b/internal/service/rds/flex.go index 363fd4cd25d..80481db4997 100644 --- a/internal/service/rds/flex.go +++ b/internal/service/rds/flex.go @@ -9,25 +9,34 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/flex" ) -func ExpandClusterScalingConfiguration(l []interface{}) *rds.ScalingConfiguration { - if len(l) == 0 || l[0] == nil { +func expandScalingConfiguration(tfMap map[string]interface{}) *rds.ScalingConfiguration { + if tfMap == nil { return nil } - m := l[0].(map[string]interface{}) + apiObject := &rds.ScalingConfiguration{} + + if v, ok := tfMap["auto_pause"].(bool); ok { + apiObject.AutoPause = aws.Bool(v) + } + + if v, ok := tfMap["max_capacity"].(int); ok { + apiObject.MaxCapacity = aws.Int64(int64(v)) + } + + if v, ok := tfMap["min_capacity"].(int); ok { + apiObject.MinCapacity = aws.Int64(int64(v)) + } - scalingConfiguration := &rds.ScalingConfiguration{ - AutoPause: aws.Bool(m["auto_pause"].(bool)), - MaxCapacity: aws.Int64(int64(m["max_capacity"].(int))), - MinCapacity: aws.Int64(int64(m["min_capacity"].(int))), - SecondsUntilAutoPause: aws.Int64(int64(m["seconds_until_auto_pause"].(int))), + if v, ok := tfMap["seconds_until_auto_pause"].(int); ok { + apiObject.SecondsUntilAutoPause = aws.Int64(int64(v)) } - if vTimeoutAction, ok := m["timeout_action"].(string); ok && vTimeoutAction != "" { - scalingConfiguration.TimeoutAction = aws.String(vTimeoutAction) + if v, ok := tfMap["timeout_action"].(string); ok && v != "" { + apiObject.TimeoutAction = aws.String(v) } - return scalingConfiguration + return apiObject } func flattenScalingConfigurationInfo(apiObject *rds.ScalingConfigurationInfo) map[string]interface{} { diff --git a/internal/service/rds/flex_test.go b/internal/service/rds/flex_test.go index dc787da503e..249831d2eb1 100644 --- a/internal/service/rds/flex_test.go +++ b/internal/service/rds/flex_test.go @@ -76,54 +76,3 @@ func TestFlattenParameters(t *testing.T) { } } } - -// TestExpandRdsClusterScalingConfiguration_serverless removed in v3.0.0 -// as all engine_modes are treated equal when expanding scaling_configuration -// and an override of min_capacity is no longer needed -// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/11698 -func TestExpandClusterScalingConfiguration_basic(t *testing.T) { - type testCase struct { - EngineMode string - Input []interface{} - ExpectNil bool - } - cases := []testCase{} - - // RDS Cluster Scaling Configuration is only valid for serverless, but we're relying on AWS errors. - // If Terraform adds whole-resource validation, we can do our own validation at plan time. - for _, engineMode := range []string{"global", "multimaster", "parallelquery", "provisioned", "serverless"} { - cases = append(cases, []testCase{ - { - EngineMode: engineMode, - Input: []interface{}{ - map[string]interface{}{ - "auto_pause": false, - "max_capacity": 32, - "min_capacity": 4, - "seconds_until_auto_pause": 600, - "timeout_action": "ForceApplyCapacityChange", - }, - }, - ExpectNil: false, - }, - { - EngineMode: engineMode, - Input: []interface{}{}, - ExpectNil: true, - }, { - EngineMode: engineMode, - Input: []interface{}{ - nil, - }, - ExpectNil: true, - }, - }...) - } - - for _, tc := range cases { - output := ExpandClusterScalingConfiguration(tc.Input) - if tc.ExpectNil != (output == nil) { - t.Errorf("EngineMode %q: Expected nil: %t, Got: %v", tc.EngineMode, tc.ExpectNil, output) - } - } -} diff --git a/internal/service/rds/instance_test.go b/internal/service/rds/instance_test.go index 73ff82962f4..8a83e474557 100644 --- a/internal/service/rds/instance_test.go +++ b/internal/service/rds/instance_test.go @@ -176,7 +176,7 @@ func TestAccRDSInstance_disappears(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccInstanceConfig_basic(rName), - Check: resource.ComposeAggregateTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(resourceName, &v), acctest.CheckResourceDisappears(acctest.Provider, tfrds.ResourceInstance(), resourceName), ), @@ -203,7 +203,7 @@ func TestAccRDSInstance_tags(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccInstanceConfig_tags1(rName, "key1", "value1"), - Check: resource.ComposeAggregateTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), @@ -223,7 +223,7 @@ func TestAccRDSInstance_tags(t *testing.T) { }, { Config: testAccInstanceConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), - Check: resource.ComposeAggregateTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), @@ -232,7 +232,7 @@ func TestAccRDSInstance_tags(t *testing.T) { }, { Config: testAccInstanceConfig_tags1(rName, "key2", "value2"), - Check: resource.ComposeAggregateTestCheckFunc( + Check: resource.ComposeTestCheckFunc( testAccCheckInstanceExists(resourceName, &v), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), diff --git a/internal/service/rds/wait.go b/internal/service/rds/wait.go index 72f300b5de8..487dba64ff2 100644 --- a/internal/service/rds/wait.go +++ b/internal/service/rds/wait.go @@ -116,6 +116,33 @@ func waitDBProxyEndpointDeleted(conn *rds.RDS, id string, timeout time.Duration) return nil, err } +func waitDBClusterCreated(conn *rds.RDS, id string, timeout time.Duration) (*rds.DBCluster, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + ClusterStatusBackingUp, + ClusterStatusCreating, + ClusterStatusMigrating, + ClusterStatusModifying, + ClusterStatusPreparingDataMigration, + ClusterStatusRebooting, + ClusterStatusResettingMasterCredentials, + }, + Target: []string{ClusterStatusAvailable}, + Refresh: statusDBCluster(conn, id), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*rds.DBCluster); ok { + return output, err + } + + return nil, err +} + func waitDBClusterDeleted(conn *rds.RDS, id string, timeout time.Duration) (*rds.DBCluster, error) { stateConf := &resource.StateChangeConf{ Pending: []string{ @@ -124,12 +151,37 @@ func waitDBClusterDeleted(conn *rds.RDS, id string, timeout time.Duration) (*rds ClusterStatusDeleting, ClusterStatusModifying, }, - Target: []string{}, - Refresh: statusDBCluster(conn, id), - Timeout: timeout, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, - ContinuousTargetOccurence: 3, + Target: []string{}, + Refresh: statusDBCluster(conn, id), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*rds.DBCluster); ok { + return output, err + } + + return nil, err +} + +func waitDBClusterUpdated(conn *rds.RDS, id string, timeout time.Duration) (*rds.DBCluster, error) { + stateConf := &resource.StateChangeConf{ + Pending: []string{ + ClusterStatusBackingUp, + ClusterStatusConfiguringIAMDatabaseAuth, + ClusterStatusModifying, + ClusterStatusRenaming, + ClusterStatusResettingMasterCredentials, + ClusterStatusUpgrading, + }, + Target: []string{ClusterStatusAvailable}, + Refresh: statusDBCluster(conn, id), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, } outputRaw, err := stateConf.WaitForState() From eb220bd0b19151ba26ea22d38c6b63616da4a27c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 6 Sep 2022 14:36:55 -0400 Subject: [PATCH 18/29] r/aws_rds_cluster: Tidy up resource Update. --- internal/service/rds/cluster.go | 306 +++++++++---------------- internal/service/rds/global_cluster.go | 57 +++++ 2 files changed, 169 insertions(+), 194 deletions(-) diff --git a/internal/service/rds/cluster.go b/internal/service/rds/cluster.go index 453e17c99ca..4e32040f971 100644 --- a/internal/service/rds/cluster.go +++ b/internal/service/rds/cluster.go @@ -1096,180 +1096,155 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error { func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).RDSConn - requestUpdate := false - req := &rds.ModifyDBClusterInput{ - ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), - DBClusterIdentifier: aws.String(d.Id()), - } - - if v, ok := d.GetOk("allow_major_version_upgrade"); ok { - req.AllowMajorVersionUpgrade = aws.Bool(v.(bool)) - } - - if d.HasChange("backtrack_window") { - req.BacktrackWindow = aws.Int64(int64(d.Get("backtrack_window").(int))) - requestUpdate = true - } + if d.HasChangesExcept( + "allow_major_version_upgrade", + "final_snapshot_identifier", + "global_cluster_identifier", + "iam_roles", + "replication_source_identifier", + "skip_final_snapshot", + "tags", "tags_all") { + input := &rds.ModifyDBClusterInput{ + ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), + DBClusterIdentifier: aws.String(d.Id()), + } - if d.HasChange("copy_tags_to_snapshot") { - req.CopyTagsToSnapshot = aws.Bool(d.Get("copy_tags_to_snapshot").(bool)) - requestUpdate = true - } + if d.HasChange("allocated_storage") { + input.AllocatedStorage = aws.Int64(int64(d.Get("allocated_storage").(int))) + } - if d.HasChange("db_instance_parameter_group_name") { - req.DBInstanceParameterGroupName = aws.String(d.Get("db_instance_parameter_group_name").(string)) - requestUpdate = true - } + if v, ok := d.GetOk("allow_major_version_upgrade"); ok { + input.AllowMajorVersionUpgrade = aws.Bool(v.(bool)) + } - if d.HasChange("master_password") { - req.MasterUserPassword = aws.String(d.Get("master_password").(string)) - requestUpdate = true - } + if d.HasChange("backtrack_window") { + input.BacktrackWindow = aws.Int64(int64(d.Get("backtrack_window").(int))) + } - if d.HasChange("db_cluster_instance_class") { - req.EngineVersion = aws.String(d.Get("db_cluster_instance_class").(string)) - requestUpdate = true - } + if d.HasChange("backup_retention_period") { + input.BackupRetentionPeriod = aws.Int64(int64(d.Get("backup_retention_period").(int))) + } - if d.HasChange("engine_version") { - req.EngineVersion = aws.String(d.Get("engine_version").(string)) - requestUpdate = true - } + if d.HasChange("copy_tags_to_snapshot") { + input.CopyTagsToSnapshot = aws.Bool(d.Get("copy_tags_to_snapshot").(bool)) + } - if d.HasChange("vpc_security_group_ids") { - if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 { - req.VpcSecurityGroupIds = flex.ExpandStringSet(attr) - } else { - req.VpcSecurityGroupIds = []*string{} + if d.HasChange("db_cluster_instance_class") { + input.EngineVersion = aws.String(d.Get("db_cluster_instance_class").(string)) } - requestUpdate = true - } - if d.HasChange("port") { - req.Port = aws.Int64(int64(d.Get("port").(int))) - requestUpdate = true - } + if d.HasChange("db_cluster_parameter_group_name") { + input.DBClusterParameterGroupName = aws.String(d.Get("db_cluster_parameter_group_name").(string)) + } - if d.HasChange("storage_type") { - req.StorageType = aws.String(d.Get("storage_type").(string)) - requestUpdate = true - } + if d.HasChange("db_instance_parameter_group_name") { + input.DBInstanceParameterGroupName = aws.String(d.Get("db_instance_parameter_group_name").(string)) + } - if d.HasChange("allocated_storage") { - req.AllocatedStorage = aws.Int64(int64(d.Get("allocated_storage").(int))) - requestUpdate = true - } + if d.HasChange("deletion_protection") { + input.DeletionProtection = aws.Bool(d.Get("deletion_protection").(bool)) + } - if d.HasChange("iops") { - req.Iops = aws.Int64(int64(d.Get("iops").(int))) - requestUpdate = true - } + if d.HasChange("enable_global_write_forwarding") { + input.EnableGlobalWriteForwarding = aws.Bool(d.Get("enable_global_write_forwarding").(bool)) + } - if d.HasChange("preferred_backup_window") { - req.PreferredBackupWindow = aws.String(d.Get("preferred_backup_window").(string)) - requestUpdate = true - } + if d.HasChange("enable_http_endpoint") { + input.EnableHttpEndpoint = aws.Bool(d.Get("enable_http_endpoint").(bool)) + } - if d.HasChange("preferred_maintenance_window") { - req.PreferredMaintenanceWindow = aws.String(d.Get("preferred_maintenance_window").(string)) - requestUpdate = true - } + if d.HasChange("enabled_cloudwatch_logs_exports") { + oraw, nraw := d.GetChange("enabled_cloudwatch_logs_exports") + o := oraw.(*schema.Set) + n := nraw.(*schema.Set) - if d.HasChange("backup_retention_period") { - req.BackupRetentionPeriod = aws.Int64(int64(d.Get("backup_retention_period").(int))) - requestUpdate = true - } + input.CloudwatchLogsExportConfiguration = &rds.CloudwatchLogsExportConfiguration{ + DisableLogTypes: flex.ExpandStringSet(o.Difference(n)), + EnableLogTypes: flex.ExpandStringSet(n.Difference(o)), + } + } - if d.HasChange("db_cluster_parameter_group_name") { - req.DBClusterParameterGroupName = aws.String(d.Get("db_cluster_parameter_group_name").(string)) - requestUpdate = true - } + if d.HasChange("engine_version") { + input.EngineVersion = aws.String(d.Get("engine_version").(string)) + } - if d.HasChange("deletion_protection") { - req.DeletionProtection = aws.Bool(d.Get("deletion_protection").(bool)) - requestUpdate = true - } + if d.HasChange("iam_database_authentication_enabled") { + input.EnableIAMDatabaseAuthentication = aws.Bool(d.Get("iam_database_authentication_enabled").(bool)) + } - if d.HasChange("iam_database_authentication_enabled") { - req.EnableIAMDatabaseAuthentication = aws.Bool(d.Get("iam_database_authentication_enabled").(bool)) - requestUpdate = true - } + if d.HasChange("iops") { + input.Iops = aws.Int64(int64(d.Get("iops").(int))) + } - if d.HasChange("network_type") { - req.NetworkType = aws.String(d.Get("network_type").(string)) - requestUpdate = true - } + if d.HasChange("master_password") { + input.MasterUserPassword = aws.String(d.Get("master_password").(string)) + } - if d.HasChange("enabled_cloudwatch_logs_exports") { - oraw, nraw := d.GetChange("enabled_cloudwatch_logs_exports") - o := oraw.(*schema.Set) - n := nraw.(*schema.Set) + if d.HasChange("network_type") { + input.NetworkType = aws.String(d.Get("network_type").(string)) + } - enable := n.Difference(o) - disable := o.Difference(n) + if d.HasChange("port") { + input.Port = aws.Int64(int64(d.Get("port").(int))) + } - req.CloudwatchLogsExportConfiguration = &rds.CloudwatchLogsExportConfiguration{ - EnableLogTypes: flex.ExpandStringSet(enable), - DisableLogTypes: flex.ExpandStringSet(disable), + if d.HasChange("preferred_backup_window") { + input.PreferredBackupWindow = aws.String(d.Get("preferred_backup_window").(string)) } - requestUpdate = true - } - if d.HasChange("scaling_configuration") { - if v, ok := d.GetOk("scaling_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - req.ScalingConfiguration = expandScalingConfiguration(v.([]interface{})[0].(map[string]interface{})) + if d.HasChange("preferred_maintenance_window") { + input.PreferredMaintenanceWindow = aws.String(d.Get("preferred_maintenance_window").(string)) } - requestUpdate = true - } - if d.HasChange("serverlessv2_scaling_configuration") { - if v, ok := d.GetOk("serverlessv2_scaling_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - req.ServerlessV2ScalingConfiguration = expandServerlessV2ScalingConfiguration(v.([]interface{})[0].(map[string]interface{})) - requestUpdate = true + if d.HasChange("scaling_configuration") { + if v, ok := d.GetOk("scaling_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.ScalingConfiguration = expandScalingConfiguration(v.([]interface{})[0].(map[string]interface{})) + } } - } - if d.HasChange("enable_http_endpoint") { - req.EnableHttpEndpoint = aws.Bool(d.Get("enable_http_endpoint").(bool)) - requestUpdate = true - } + if d.HasChange("serverlessv2_scaling_configuration") { + if v, ok := d.GetOk("serverlessv2_scaling_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.ServerlessV2ScalingConfiguration = expandServerlessV2ScalingConfiguration(v.([]interface{})[0].(map[string]interface{})) + } + } - if d.HasChange("enable_global_write_forwarding") { - req.EnableGlobalWriteForwarding = aws.Bool(d.Get("enable_global_write_forwarding").(bool)) - requestUpdate = true - } + if d.HasChange("storage_type") { + input.StorageType = aws.String(d.Get("storage_type").(string)) + } - if requestUpdate { - err := resource.Retry(5*time.Minute, func() *resource.RetryError { - _, err := conn.ModifyDBCluster(req) - if err != nil { - if tfawserr.ErrMessageContains(err, "InvalidParameterValue", "IAM role ARN value is invalid or does not include the required permissions") { - return resource.RetryableError(err) - } + if d.HasChange("vpc_security_group_ids") { + if v, ok := d.GetOk("vpc_security_group_ids"); ok && v.(*schema.Set).Len() > 0 { + input.VpcSecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) + } else { + input.VpcSecurityGroupIds = aws.StringSlice(nil) + } + } - if tfawserr.ErrMessageContains(err, rds.ErrCodeInvalidDBClusterStateFault, "Cannot modify engine version without a primary instance in DB cluster") { - return resource.NonRetryableError(err) + log.Printf("[DEBUG] Modifying RDS Cluster: %s", input) + _, err := tfresource.RetryWhen(5*time.Minute, + func() (interface{}, error) { + return conn.ModifyDBCluster(input) + }, + func(err error) (bool, error) { + if tfawserr.ErrMessageContains(err, errCodeInvalidParameterValue, "IAM role ARN value is invalid or does not include the required permissions") { + return true, err } if tfawserr.ErrCodeEquals(err, rds.ErrCodeInvalidDBClusterStateFault) { - return resource.RetryableError(err) + return true, err } - return resource.NonRetryableError(err) - } - return nil - }) - if tfresource.TimedOut(err) { - _, err = conn.ModifyDBCluster(req) - } + + return false, err + }, + ) + if err != nil { - return fmt.Errorf("Failed to modify RDS Cluster (%s): %s", d.Id(), err) + return fmt.Errorf("updating RDS Cluster (%s): %w", d.Id(), err) } - log.Printf("[INFO] Waiting for RDS Cluster (%s) to be available", d.Id()) - err = waitForClusterUpdate(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return fmt.Errorf("error waiting for RDS Cluster (%s) to be available: %s", d.Id(), err) + if _, err := waitDBClusterUpdated(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return fmt.Errorf("waiting for RDS Cluster (%s) update: %w", d.Id(), err) } } @@ -1295,7 +1270,7 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { _, err := conn.RemoveFromGlobalCluster(input) if err != nil && !tfawserr.ErrCodeEquals(err, rds.ErrCodeGlobalClusterNotFoundFault) && !tfawserr.ErrMessageContains(err, "InvalidParameterValue", "is not found in global cluster") { - return fmt.Errorf("error removing RDS Cluster (%s) from RDS Global Cluster: %s", d.Id(), err) + return fmt.Errorf("removing RDS Cluster (%s) from RDS Global Cluster: %w", d.Id(), err) } } @@ -1409,40 +1384,6 @@ func resourceClusterImport(d *schema.ResourceData, meta interface{}) ([]*schema. return []*schema.ResourceData{d}, nil } -func resourceClusterStateRefreshFunc(conn *rds.RDS, dbClusterIdentifier string) resource.StateRefreshFunc { - return func() (interface{}, string, error) { - resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ - DBClusterIdentifier: aws.String(dbClusterIdentifier), - }) - - if tfawserr.ErrCodeEquals(err, rds.ErrCodeDBClusterNotFoundFault) { - return 42, "destroyed", nil - } - - if err != nil { - return nil, "", err - } - - var dbc *rds.DBCluster - - for _, c := range resp.DBClusters { - if aws.StringValue(c.DBClusterIdentifier) == dbClusterIdentifier { - dbc = c - } - } - - if dbc == nil { - return 42, "destroyed", nil - } - - if dbc.Status != nil { - log.Printf("[DEBUG] DB Cluster status (%s): %s", dbClusterIdentifier, *dbc.Status) - } - - return dbc, aws.StringValue(dbc.Status), nil - } -} - func addIAMRoleToCluster(conn *rds.RDS, clusterID, roleARN string) error { input := &rds.AddRoleToDBClusterInput{ DBClusterIdentifier: aws.String(clusterID), @@ -1473,29 +1414,6 @@ func removeIAMRoleFromCluster(conn *rds.RDS, clusterID, roleARN string) error { return err } -var resourceClusterUpdatePendingStates = []string{ - "backing-up", - "configuring-iam-database-auth", - "modifying", - "renaming", - "resetting-master-credentials", - "upgrading", -} - -func waitForClusterUpdate(conn *rds.RDS, id string, timeout time.Duration) error { - stateConf := &resource.StateChangeConf{ - Pending: resourceClusterUpdatePendingStates, - Target: []string{"available"}, - Refresh: resourceClusterStateRefreshFunc(conn, id), - Timeout: timeout, - MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting - } - - _, err := stateConf.WaitForState() - return err -} - func clusterSetResourceDataEngineVersionFromCluster(d *schema.ResourceData, c *rds.DBCluster) { oldVersion := d.Get("engine_version").(string) newVersion := aws.StringValue(c.EngineVersion) diff --git a/internal/service/rds/global_cluster.go b/internal/service/rds/global_cluster.go index a2d98929268..f32552da893 100644 --- a/internal/service/rds/global_cluster.go +++ b/internal/service/rds/global_cluster.go @@ -739,3 +739,60 @@ func globalClusterUpgradeEngineVersion(d *schema.ResourceData, meta interface{}, return nil } + +var resourceClusterUpdatePendingStates = []string{ + "backing-up", + "configuring-iam-database-auth", + "modifying", + "renaming", + "resetting-master-credentials", + "upgrading", +} + +func waitForClusterUpdate(conn *rds.RDS, id string, timeout time.Duration) error { + stateConf := &resource.StateChangeConf{ + Pending: resourceClusterUpdatePendingStates, + Target: []string{"available"}, + Refresh: resourceClusterStateRefreshFunc(conn, id), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + + _, err := stateConf.WaitForState() + return err +} + +func resourceClusterStateRefreshFunc(conn *rds.RDS, dbClusterIdentifier string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ + DBClusterIdentifier: aws.String(dbClusterIdentifier), + }) + + if tfawserr.ErrCodeEquals(err, rds.ErrCodeDBClusterNotFoundFault) { + return 42, "destroyed", nil + } + + if err != nil { + return nil, "", err + } + + var dbc *rds.DBCluster + + for _, c := range resp.DBClusters { + if aws.StringValue(c.DBClusterIdentifier) == dbClusterIdentifier { + dbc = c + } + } + + if dbc == nil { + return 42, "destroyed", nil + } + + if dbc.Status != nil { + log.Printf("[DEBUG] DB Cluster status (%s): %s", dbClusterIdentifier, *dbc.Status) + } + + return dbc, aws.StringValue(dbc.Status), nil + } +} From 2c9a94f305116bc8d74e136632c0c09ea66e0bbb Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 6 Sep 2022 14:58:40 -0400 Subject: [PATCH 19/29] r/aws_rds_cluster: Tidy up 'TestAccRDSCluster_takeFinalSnapshot'. Acceptance test output: % make testacc TESTARGS='-run=TestAccRDSCluster_takeFinalSnapshot' PKG=rds ACCTEST_PARALLELISM=2 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/rds/... -v -count 1 -parallel 2 -run=TestAccRDSCluster_takeFinalSnapshot -timeout 180m === RUN TestAccRDSCluster_takeFinalSnapshot === PAUSE TestAccRDSCluster_takeFinalSnapshot === CONT TestAccRDSCluster_takeFinalSnapshot --- PASS: TestAccRDSCluster_takeFinalSnapshot (179.72s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/rds 184.293s --- internal/service/rds/cluster_test.go | 102 +++++++++++--------------- internal/service/rds/find.go | 30 ++++++++ internal/service/rds/instance_test.go | 6 +- 3 files changed, 77 insertions(+), 61 deletions(-) diff --git a/internal/service/rds/cluster_test.go b/internal/service/rds/cluster_test.go index a9ddb42e51e..492ab2bcc97 100644 --- a/internal/service/rds/cluster_test.go +++ b/internal/service/rds/cluster_test.go @@ -3,14 +3,12 @@ package rds_test import ( "errors" "fmt" - "log" "regexp" "strings" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/rds" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -646,17 +644,17 @@ func TestAccRDSCluster_generatedName(t *testing.T) { func TestAccRDSCluster_takeFinalSnapshot(t *testing.T) { var v rds.DBCluster - rInt := sdkacctest.RandInt() + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_rds_cluster.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, ErrorCheck: acctest.ErrorCheck(t, rds.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckClusterSnapshot(rInt), + CheckDestroy: testAccCheckClusterDestroyWithFinalSnapshot, Steps: []resource.TestStep{ { - Config: testAccClusterConfig_finalSnapshot(rInt), + Config: testAccClusterConfig_finalSnapshot(rName), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(resourceName, &v), ), @@ -2181,51 +2179,43 @@ func testAccCheckClusterDestroyWithProvider(s *terraform.State, provider *schema return nil } -func testAccCheckClusterSnapshot(rInt int) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_rds_cluster" { - continue - } - - // Try and delete the snapshot before we check for the cluster not found - snapshot_identifier := fmt.Sprintf("tf-acctest-rdscluster-snapshot-%d", rInt) - - awsClient := acctest.Provider.Meta().(*conns.AWSClient) - conn := awsClient.RDSConn - - log.Printf("[INFO] Deleting the Snapshot %s", snapshot_identifier) - _, snapDeleteErr := conn.DeleteDBClusterSnapshot( - &rds.DeleteDBClusterSnapshotInput{ - DBClusterSnapshotIdentifier: aws.String(snapshot_identifier), - }) - if snapDeleteErr != nil { - return snapDeleteErr - } - - // Try to find the Group - var err error - resp, err := conn.DescribeDBClusters( - &rds.DescribeDBClustersInput{ - DBClusterIdentifier: aws.String(rs.Primary.ID), - }) - - if tfawserr.ErrCodeEquals(err, rds.ErrCodeDBClusterNotFoundFault) { - continue - } - - if err == nil { - if len(resp.DBClusters) != 0 && - *resp.DBClusters[0].DBClusterIdentifier == rs.Primary.ID { - return fmt.Errorf("DB Cluster %s still exists", rs.Primary.ID) - } - } +func testAccCheckClusterDestroyWithFinalSnapshot(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).RDSConn + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_rds_cluster" { + continue + } + + finalSnapshotID := rs.Primary.Attributes["final_snapshot_identifier"] + _, err := tfrds.FindDBClusterSnapshotByID(conn, finalSnapshotID) + + if err != nil { return err } - return nil + _, err = conn.DeleteDBClusterSnapshot(&rds.DeleteDBClusterSnapshotInput{ + DBClusterSnapshotIdentifier: aws.String(finalSnapshotID), + }) + + if err != nil { + return err + } + + _, err = tfrds.FindDBClusterByID(conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("RDS Cluster %s still exists", rs.Primary.ID) } + + return nil } func testAccCheckClusterExists(n string, v *rds.DBCluster) resource.TestCheckFunc { @@ -2288,8 +2278,8 @@ resource "aws_rds_cluster" "test" { cluster_identifier = %[2]q engine = %[3]q engine_version = %[4]q - master_password = "mustbeeightcharaters" - master_username = "test" + master_password = "avoid-plaintext-passwords" + master_username = "tfacctest" skip_final_snapshot = true } @@ -2664,21 +2654,17 @@ resource "aws_rds_cluster" "test" { ` } -func testAccClusterConfig_finalSnapshot(n int) string { +func testAccClusterConfig_finalSnapshot(rName string) string { return fmt.Sprintf(` resource "aws_rds_cluster" "test" { - cluster_identifier = "tf-aurora-cluster-%[1]d" - database_name = "mydb" - master_username = "foo" - master_password = "mustbeeightcharaters" + cluster_identifier = %[1]q + database_name = "test" + master_username = "tfacctest" + master_password = "avoid-plaintext-passwords" db_cluster_parameter_group_name = "default.aurora5.6" - final_snapshot_identifier = "tf-acctest-rdscluster-snapshot-%[1]d" - - tags = { - Environment = "production" - } + final_snapshot_identifier = %[1]q } -`, n) +`, rName) } func testAccClusterConfig_withoutUserNameAndPassword(n int) string { diff --git a/internal/service/rds/find.go b/internal/service/rds/find.go index 316dc4ed471..1c7b735a4a8 100644 --- a/internal/service/rds/find.go +++ b/internal/service/rds/find.go @@ -157,6 +157,36 @@ func FindDBClusterWithActivityStream(conn *rds.RDS, dbClusterArn string) (*rds.D return dbCluster, nil } +func FindDBClusterSnapshotByID(conn *rds.RDS, id string) (*rds.DBClusterSnapshot, error) { + input := &rds.DescribeDBClusterSnapshotsInput{ + DBClusterSnapshotIdentifier: aws.String(id), + } + + output, err := conn.DescribeDBClusterSnapshots(input) + + if tfawserr.ErrCodeEquals(err, rds.ErrCodeDBClusterSnapshotNotFoundFault) { + return nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if output == nil || len(output.DBClusterSnapshots) == 0 || output.DBClusterSnapshots[0] == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + dbClusterSnapshot := output.DBClusterSnapshots[0] + + // Eventual consistency check. + if aws.StringValue(dbClusterSnapshot.DBClusterSnapshotIdentifier) != id { + return nil, &resource.NotFoundError{ + LastRequest: input, + } + } + + return dbClusterSnapshot, nil +} + func FindDBInstanceByID(conn *rds.RDS, id string) (*rds.DBInstance, error) { input := &rds.DescribeDBInstancesInput{ DBInstanceIdentifier: aws.String(id), diff --git a/internal/service/rds/instance_test.go b/internal/service/rds/instance_test.go index 8a83e474557..afa9e357b18 100644 --- a/internal/service/rds/instance_test.go +++ b/internal/service/rds/instance_test.go @@ -4435,7 +4435,7 @@ func testAccCheckInstanceDestroyWithFinalSnapshot(s *terraform.State) error { return err } - return fmt.Errorf("DB Instance %s still exists", rs.Primary.ID) + return fmt.Errorf("RDS DB Instance %s still exists", rs.Primary.ID) } return nil @@ -4460,7 +4460,7 @@ func testAccCheckInstanceDestroyWithoutFinalSnapshot(s *terraform.State) error { return err } } else { - return fmt.Errorf("DB Snapshot %s exists", finalSnapshotID) + return fmt.Errorf("RDS DB Snapshot %s exists", finalSnapshotID) } _, err = tfrds.FindDBInstanceByID(conn, rs.Primary.ID) @@ -4473,7 +4473,7 @@ func testAccCheckInstanceDestroyWithoutFinalSnapshot(s *terraform.State) error { return err } - return fmt.Errorf("DB Instance %s still exists", rs.Primary.ID) + return fmt.Errorf("RDS DB Instance %s still exists", rs.Primary.ID) } return nil From fd09c5a69e8698d152bd8367f4bcc0e92d88cc7b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 6 Sep 2022 15:11:33 -0400 Subject: [PATCH 20/29] r/aws_rds_cluster: Tidy up 'TestAccRDSCluster_identifierGenerated' and 'TestAccRDSCluster_identifierPrefix'. Acceptance test output: % make testacc TESTARGS='-run=TestAccRDSCluster_identifierGenerated\|TestAccRDSCluster_identifierPrefix' PKG=rds ACCTEST_PARALLELISM=2 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/rds/... -v -count 1 -parallel 2 -run=TestAccRDSCluster_identifierGenerated\|TestAccRDSCluster_identifierPrefix -timeout 180m === RUN TestAccRDSCluster_identifierGenerated === PAUSE TestAccRDSCluster_identifierGenerated === RUN TestAccRDSCluster_identifierPrefix === PAUSE TestAccRDSCluster_identifierPrefix === CONT TestAccRDSCluster_identifierGenerated === CONT TestAccRDSCluster_identifierPrefix --- PASS: TestAccRDSCluster_identifierPrefix (151.61s) --- PASS: TestAccRDSCluster_identifierGenerated (151.63s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/rds 155.759s --- internal/service/rds/cluster_test.go | 190 +++++++++++++-------------- 1 file changed, 94 insertions(+), 96 deletions(-) diff --git a/internal/service/rds/cluster_test.go b/internal/service/rds/cluster_test.go index 492ab2bcc97..c643245a684 100644 --- a/internal/service/rds/cluster_test.go +++ b/internal/service/rds/cluster_test.go @@ -146,6 +146,48 @@ func TestAccRDSCluster_tags(t *testing.T) { }) } +func TestAccRDSCluster_identifierGenerated(t *testing.T) { + var v rds.DBCluster + resourceName := "aws_rds_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, rds.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_identifierGenerated(), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &v), + resource.TestMatchResourceAttr(resourceName, "cluster_identifier", regexp.MustCompile("^tf-")), + ), + }, + }, + }) +} + +func TestAccRDSCluster_identifierPrefix(t *testing.T) { + var v rds.DBCluster + resourceName := "aws_rds_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, rds.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_identifierPrefix("tf-test-"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &v), + resource.TestMatchResourceAttr(resourceName, "cluster_identifier", regexp.MustCompile("^tf-test-")), + ), + }, + }, + }) +} + func TestAccRDSCluster_allowMajorVersionUpgrade(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -487,28 +529,6 @@ func TestAccRDSCluster_backtrackWindow(t *testing.T) { }) } -func TestAccRDSCluster_clusterIdentifierPrefix(t *testing.T) { - var v rds.DBCluster - resourceName := "aws_rds_cluster.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, rds.EndpointsID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccClusterConfig_clusterIDPrefix("tf-test-"), - Check: resource.ComposeTestCheckFunc( - testAccCheckClusterExists(resourceName, &v), - resource.TestMatchResourceAttr( - resourceName, "cluster_identifier", regexp.MustCompile("^tf-test-")), - ), - }, - }, - }) -} - func TestAccRDSCluster_dbSubnetGroupName(t *testing.T) { var dbCluster rds.DBCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -620,28 +640,6 @@ func TestAccRDSCluster_PointInTimeRestore_enabledCloudWatchLogsExports(t *testin }) } -func TestAccRDSCluster_generatedName(t *testing.T) { - var v rds.DBCluster - resourceName := "aws_rds_cluster.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, rds.EndpointsID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccClusterConfig_generatedName(), - Check: resource.ComposeTestCheckFunc( - testAccCheckClusterExists(resourceName, &v), - resource.TestMatchResourceAttr( - resourceName, "cluster_identifier", regexp.MustCompile("^tf-")), - ), - }, - }, - }) -} - func TestAccRDSCluster_takeFinalSnapshot(t *testing.T) { var v rds.DBCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -2270,6 +2268,58 @@ resource "aws_rds_cluster" "test" { `, rName) } +func testAccClusterConfig_tags1(rName, tagKey1, tagValue1 string) string { + return fmt.Sprintf(` +resource "aws_rds_cluster" "test" { + cluster_identifier = %[1]q + master_username = "tfacctest" + master_password = "avoid-plaintext-passwords" + skip_final_snapshot = true + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1) +} + +func testAccClusterConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return fmt.Sprintf(` +resource "aws_rds_cluster" "test" { + cluster_identifier = %[1]q + master_username = "tfacctest" + master_password = "avoid-plaintext-passwords" + skip_final_snapshot = true + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +} + +func testAccClusterConfig_identifierGenerated() string { + return ` +resource "aws_rds_cluster" "test" { + master_username = "tfacctest" + master_password = "avoid-plaintext-passwords" + skip_final_snapshot = true +} +` +} + +func testAccClusterConfig_identifierPrefix(identifierPrefix string) string { + return fmt.Sprintf(` +resource "aws_rds_cluster" "test" { + cluster_identifier_prefix = %[1]q + master_username = "tfacctest" + master_password = "avoid-plaintext-passwords" + skip_final_snapshot = true +} +`, identifierPrefix) +} + func testAccClusterConfig_allowMajorVersionUpgrade(rName string, allowMajorVersionUpgrade bool, engine string, engineVersion string) string { return fmt.Sprintf(` resource "aws_rds_cluster" "test" { @@ -2487,17 +2537,6 @@ resource "aws_rds_cluster" "test" { `, backtrackWindow) } -func testAccClusterConfig_clusterIDPrefix(clusterIdentifierPrefix string) string { - return fmt.Sprintf(` -resource "aws_rds_cluster" "test" { - cluster_identifier_prefix = %q - master_username = "root" - master_password = "password" - skip_final_snapshot = true -} -`, clusterIdentifierPrefix) -} - func testAccClusterConfig_subnetGroupName(rName string) string { return fmt.Sprintf(` data "aws_availability_zones" "available" { @@ -2644,16 +2683,6 @@ resource "aws_rds_cluster" "test" { `, bucketName, bucketPrefix, uniqueId) } -func testAccClusterConfig_generatedName() string { - return ` -resource "aws_rds_cluster" "test" { - master_username = "root" - master_password = "password" - skip_final_snapshot = true -} -` -} - func testAccClusterConfig_finalSnapshot(rName string) string { return fmt.Sprintf(` resource "aws_rds_cluster" "test" { @@ -2770,37 +2799,6 @@ resource "aws_rds_cluster" "restored_pit" { `, parentId, childId, enabledCloudwatchLogExports)) } -func testAccClusterConfig_tags1(rName, tagKey1, tagValue1 string) string { - return fmt.Sprintf(` -resource "aws_rds_cluster" "test" { - cluster_identifier = %q - master_username = "foo" - master_password = "mustbeeightcharaters" - skip_final_snapshot = true - - tags = { - %q = %q - } -} -`, rName, tagKey1, tagValue1) -} - -func testAccClusterConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return fmt.Sprintf(` -resource "aws_rds_cluster" "test" { - cluster_identifier = %q - master_username = "foo" - master_password = "mustbeeightcharaters" - skip_final_snapshot = true - - tags = { - %q = %q - %q = %q - } -} -`, rName, tagKey1, tagValue1, tagKey2, tagValue2) -} - func testAccClusterConfig_enabledCloudWatchLogsExports1(rName, enabledCloudwatchLogExports1 string) string { return fmt.Sprintf(` resource "aws_rds_cluster" "test" { From ad4bd8f1b3cf52f69a95306b94d537eeee27dd64 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 6 Sep 2022 15:38:15 -0400 Subject: [PATCH 21/29] r/aws_rds_cluster: Tidy up 'TestAccRDSCluster_availabilityZones', 'TestAccRDSCluster_dbSubnetGroupName' and 'TestAccRDSCluster_pointInTimeRestore'. Acceptance test output: % make testacc TESTARGS='-run=TestAccRDSCluster_availabilityZones\|TestAccRDSCluster_dbSubnetGroupName\|TestAccRDSCluster_pointInTimeRestore' PKG=rds ACCTEST_PARALLELISM=2 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/rds/... -v -count 1 -parallel 2 -run=TestAccRDSCluster_availabilityZones\|TestAccRDSCluster_dbSubnetGroupName\|TestAccRDSCluster_pointInTimeRestore -timeout 180m === RUN TestAccRDSCluster_availabilityZones === PAUSE TestAccRDSCluster_availabilityZones === RUN TestAccRDSCluster_dbSubnetGroupName === PAUSE TestAccRDSCluster_dbSubnetGroupName === RUN TestAccRDSCluster_pointInTimeRestore === PAUSE TestAccRDSCluster_pointInTimeRestore === CONT TestAccRDSCluster_availabilityZones === CONT TestAccRDSCluster_pointInTimeRestore --- PASS: TestAccRDSCluster_availabilityZones (142.59s) === CONT TestAccRDSCluster_dbSubnetGroupName --- PASS: TestAccRDSCluster_dbSubnetGroupName (163.82s) --- PASS: TestAccRDSCluster_pointInTimeRestore (355.39s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/rds 359.505s --- internal/service/rds/cluster_test.go | 134 ++++++++------------------- 1 file changed, 37 insertions(+), 97 deletions(-) diff --git a/internal/service/rds/cluster_test.go b/internal/service/rds/cluster_test.go index c643245a684..c75dc70602d 100644 --- a/internal/service/rds/cluster_test.go +++ b/internal/service/rds/cluster_test.go @@ -583,11 +583,10 @@ func TestAccRDSCluster_pointInTimeRestore(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var v rds.DBCluster - var c rds.DBCluster - - parentId := sdkacctest.RandomWithPrefix("tf-acc-point-in-time-restore-seed-test") - restoredId := sdkacctest.RandomWithPrefix("tf-acc-point-in-time-restored-test") + var sourceDBCluster, dbCluster rds.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + sourceResourceName := "aws_rds_cluster.test" + resourceName := "aws_rds_cluster.restore" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -596,12 +595,11 @@ func TestAccRDSCluster_pointInTimeRestore(t *testing.T) { CheckDestroy: testAccCheckClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccClusterConfig_pointInTimeRestoreSource(parentId, restoredId), + Config: testAccClusterConfig_pointInTimeRestoreSource(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckClusterExists("aws_rds_cluster.test", &v), - testAccCheckClusterExists("aws_rds_cluster.restored_pit", &c), - resource.TestCheckResourceAttr("aws_rds_cluster.restored_pit", "cluster_identifier", restoredId), - resource.TestCheckResourceAttrPair("aws_rds_cluster.restored_pit", "engine", "aws_rds_cluster.test", "engine"), + testAccCheckClusterExists(sourceResourceName, &sourceDBCluster), + testAccCheckClusterExists(resourceName, &dbCluster), + resource.TestCheckResourceAttrPair(resourceName, "engine", sourceResourceName, "engine"), ), }, }, @@ -2435,25 +2433,16 @@ resource "aws_rds_cluster_instance" "test" { } func testAccClusterConfig_availabilityZones(rName string) string { - return fmt.Sprintf(` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - + return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` resource "aws_rds_cluster" "test" { apply_immediately = true availability_zones = [data.aws_availability_zones.available.names[0], data.aws_availability_zones.available.names[1], data.aws_availability_zones.available.names[2]] - cluster_identifier = %q - master_password = "mustbeeightcharaters" - master_username = "test" + cluster_identifier = %[1]q + master_password = "avoid-plaintext-passwords" + master_username = "tfacctest" skip_final_snapshot = true } -`, rName) +`, rName)) } func testAccClusterConfig_storageType(rName string) string { @@ -2538,57 +2527,20 @@ resource "aws_rds_cluster" "test" { } func testAccClusterConfig_subnetGroupName(rName string) string { - return fmt.Sprintf(` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 3), fmt.Sprintf(` +resource "aws_db_subnet_group" "test" { + name = %[1]q + subnet_ids = aws_subnet.test[*].id } resource "aws_rds_cluster" "test" { cluster_identifier = %[1]q - master_username = "root" - master_password = "password" + master_username = "tfacctest" + master_password = "avoid-plaintext-passwords" db_subnet_group_name = aws_db_subnet_group.test.name skip_final_snapshot = true } - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - - tags = { - Name = "terraform-testacc-rds-cluster-name-prefix" - } -} - -resource "aws_subnet" "a" { - vpc_id = aws_vpc.test.id - cidr_block = "10.0.0.0/24" - availability_zone = data.aws_availability_zones.available.names[0] - - tags = { - Name = "tf-acc-rds-cluster-name-prefix-a" - } -} - -resource "aws_subnet" "b" { - vpc_id = aws_vpc.test.id - cidr_block = "10.0.1.0/24" - availability_zone = data.aws_availability_zones.available.names[1] - - tags = { - Name = "tf-acc-rds-cluster-name-prefix-b" - } -} - -resource "aws_db_subnet_group" "test" { - name = %[1]q - subnet_ids = [aws_subnet.a.id, aws_subnet.b.id] -} -`, rName) +`, rName)) } func testAccClusterConfig_s3Restore(bucketName string, bucketPrefix string, uniqueId string) string { @@ -2706,50 +2658,38 @@ resource "aws_rds_cluster" "default" { `, n) } -func testAccClusterConfig_pointInTimeRestoreSource(parentId, childId string) string { - return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` +func testAccClusterConfig_baseForPITR(rName string) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 3), fmt.Sprintf(` +resource "aws_db_subnet_group" "test" { + name = %[1]q + subnet_ids = aws_subnet.test[*].id +} + resource "aws_rds_cluster" "test" { - cluster_identifier = "%[1]s" - master_username = "root" - master_password = "password" + cluster_identifier = %[1]q + master_username = "tfacctest" + master_password = "avoid-plaintext-passwords" db_subnet_group_name = aws_db_subnet_group.test.name skip_final_snapshot = true engine = "aurora-mysql" } - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - tags = { - Name = "%[1]s-vpc" - } -} - -resource "aws_subnet" "subnets" { - count = length(data.aws_availability_zones.available.names) - vpc_id = aws_vpc.test.id - cidr_block = "10.0.${count.index}.0/24" - availability_zone = data.aws_availability_zones.available.names[count.index] - tags = { - Name = "%[1]s-subnet-${count.index}" - } -} - -resource "aws_db_subnet_group" "test" { - name = "%[1]s-db-subnet-group" - subnet_ids = aws_subnet.subnets[*].id +`, rName)) } -resource "aws_rds_cluster" "restored_pit" { - cluster_identifier = "%s" +func testAccClusterConfig_pointInTimeRestoreSource(rName string) string { + return acctest.ConfigCompose(testAccClusterConfig_baseForPITR(rName), fmt.Sprintf(` +resource "aws_rds_cluster" "restore" { + cluster_identifier = "%[1]s-restore" skip_final_snapshot = true engine = aws_rds_cluster.test.engine + restore_to_point_in_time { source_cluster_identifier = aws_rds_cluster.test.cluster_identifier restore_type = "full-copy" use_latest_restorable_time = true } } -`, parentId, childId)) +`, rName)) } func testAccClusterConfig_pointInTimeRestoreSource_enabled_cloudWatch_logs_exports(parentId, childId, enabledCloudwatchLogExports string) string { From 8ade83b4ac4b9efb4efbfcb0e33009ecdf83250d Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 6 Sep 2022 16:03:31 -0400 Subject: [PATCH 22/29] r/aws_rds_cluster: Tidy up 'TestAccRDSCluster_PointInTimeRestore_enabledCloudWatchLogsExports', 'TestAccRDSCluster_networkType' and 'TestAccRDSCluster_EngineMode_multiMaster'. Acceptance test output: % make testacc TESTARGS='-run=TestAccRDSCluster_PointInTimeRestore_enabledCloudWatchLogsExports\|TestAccRDSCluster_networkType\|TestAccRDSCluster_EngineMode_multiMaster' PKG=rds ACCTEST_PARALLELISM=2 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/rds/... -v -count 1 -parallel 2 -run=TestAccRDSCluster_PointInTimeRestore_enabledCloudWatchLogsExports\|TestAccRDSCluster_networkType\|TestAccRDSCluster_EngineMode_multiMaster -timeout 180m === RUN TestAccRDSCluster_PointInTimeRestore_enabledCloudWatchLogsExports === PAUSE TestAccRDSCluster_PointInTimeRestore_enabledCloudWatchLogsExports === RUN TestAccRDSCluster_networkType === PAUSE TestAccRDSCluster_networkType === RUN TestAccRDSCluster_EngineMode_multiMaster === PAUSE TestAccRDSCluster_EngineMode_multiMaster === CONT TestAccRDSCluster_PointInTimeRestore_enabledCloudWatchLogsExports === CONT TestAccRDSCluster_EngineMode_multiMaster --- PASS: TestAccRDSCluster_EngineMode_multiMaster (179.05s) === CONT TestAccRDSCluster_networkType --- PASS: TestAccRDSCluster_PointInTimeRestore_enabledCloudWatchLogsExports (331.96s) --- PASS: TestAccRDSCluster_networkType (275.56s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/rds 458.717s --- internal/service/rds/cluster_test.go | 121 +++++---------------------- 1 file changed, 23 insertions(+), 98 deletions(-) diff --git a/internal/service/rds/cluster_test.go b/internal/service/rds/cluster_test.go index c75dc70602d..aefc58a1a5a 100644 --- a/internal/service/rds/cluster_test.go +++ b/internal/service/rds/cluster_test.go @@ -611,11 +611,10 @@ func TestAccRDSCluster_PointInTimeRestore_enabledCloudWatchLogsExports(t *testin t.Skip("skipping long-running test in short mode") } - var v rds.DBCluster - var c rds.DBCluster - - parentId := sdkacctest.RandomWithPrefix("tf-acc-point-in-time-restore-seed-test") - restoredId := sdkacctest.RandomWithPrefix("tf-acc-point-in-time-restored-test") + var sourceDBCluster, dbCluster rds.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + sourceResourceName := "aws_rds_cluster.test" + resourceName := "aws_rds_cluster.restore" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -624,14 +623,12 @@ func TestAccRDSCluster_PointInTimeRestore_enabledCloudWatchLogsExports(t *testin CheckDestroy: testAccCheckClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccClusterConfig_pointInTimeRestoreSource_enabled_cloudWatch_logs_exports(parentId, restoredId, "audit"), + Config: testAccClusterConfig_pointInTimeRestoreSource_enabledCloudWatchLogsExports(rName, "audit"), Check: resource.ComposeTestCheckFunc( - testAccCheckClusterExists("aws_rds_cluster.test", &v), - testAccCheckClusterExists("aws_rds_cluster.restored_pit", &c), - resource.TestCheckResourceAttr("aws_rds_cluster.restored_pit", "cluster_identifier", restoredId), - resource.TestCheckResourceAttrPair("aws_rds_cluster.restored_pit", "engine", "aws_rds_cluster.test", "engine"), - resource.TestCheckResourceAttr("aws_rds_cluster.restored_pit", "enabled_cloudwatch_logs_exports.#", "1"), - resource.TestCheckTypeSetElemAttr("aws_rds_cluster.restored_pit", "enabled_cloudwatch_logs_exports.*", "audit"), + testAccCheckClusterExists(sourceResourceName, &sourceDBCluster), + testAccCheckClusterExists(resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "enabled_cloudwatch_logs_exports.#", "1"), + resource.TestCheckTypeSetElemAttr(resourceName, "enabled_cloudwatch_logs_exports.*", "audit"), ), }, }, @@ -2692,51 +2689,20 @@ resource "aws_rds_cluster" "restore" { `, rName)) } -func testAccClusterConfig_pointInTimeRestoreSource_enabled_cloudWatch_logs_exports(parentId, childId, enabledCloudwatchLogExports string) string { - return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` -resource "aws_rds_cluster" "test" { - cluster_identifier = "%[1]s" - master_username = "root" - master_password = "password" - db_subnet_group_name = aws_db_subnet_group.test.name - skip_final_snapshot = true - engine = "aurora-mysql" -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - tags = { - Name = "%[1]s-vpc" - } -} - -resource "aws_subnet" "subnets" { - count = length(data.aws_availability_zones.available.names) - vpc_id = aws_vpc.test.id - cidr_block = "10.0.${count.index}.0/24" - availability_zone = data.aws_availability_zones.available.names[count.index] - tags = { - Name = "%[1]s-subnet-${count.index}" - } -} - -resource "aws_db_subnet_group" "test" { - name = "%[1]s-db-subnet-group" - subnet_ids = aws_subnet.subnets[*].id -} - -resource "aws_rds_cluster" "restored_pit" { - cluster_identifier = "%s" +func testAccClusterConfig_pointInTimeRestoreSource_enabledCloudWatchLogsExports(rName, enabledCloudwatchLogExports string) string { + return acctest.ConfigCompose(testAccClusterConfig_baseForPITR(rName), fmt.Sprintf(` +resource "aws_rds_cluster" "restore" { + cluster_identifier = "%[1]s-restore" skip_final_snapshot = true engine = aws_rds_cluster.test.engine - enabled_cloudwatch_logs_exports = [%q] + enabled_cloudwatch_logs_exports = [%[2]q] restore_to_point_in_time { source_cluster_identifier = aws_rds_cluster.test.cluster_identifier restore_type = "full-copy" use_latest_restorable_time = true } } -`, parentId, childId, enabledCloudwatchLogExports)) +`, rName, enabledCloudwatchLogExports)) } func testAccClusterConfig_enabledCloudWatchLogsExports1(rName, enabledCloudwatchLogExports1 string) string { @@ -3322,9 +3288,7 @@ resource "aws_rds_cluster" "alternate" { } func testAccClusterConfig_networkType(rName string, networkType string) string { - return acctest.ConfigCompose( - acctest.ConfigVPCWithSubnetsIPv6(rName, 2), - fmt.Sprintf(` + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnetsIPv6(rName, 2), fmt.Sprintf(` resource "aws_db_subnet_group" "test" { name = %[1]q subnet_ids = aws_subnet.test[*].id @@ -3336,8 +3300,8 @@ resource "aws_rds_cluster" "test" { network_type = %[2]q engine = "aurora-postgresql" engine_version = "14.3" - master_password = "barbarbarbar" - master_username = "foo" + master_password = "avoid-plaintext-passwords" + master_username = "tfacctest" skip_final_snapshot = true apply_immediately = true } @@ -3386,59 +3350,20 @@ resource "aws_rds_cluster" "test" { } func testAccClusterConfig_EngineMode_multimaster(rName string) string { - return fmt.Sprintf(` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - - tags = { - Name = "tf-acc-test-rds-cluster-multimaster" - } -} - -resource "aws_subnet" "test1" { - vpc_id = aws_vpc.test.id - cidr_block = "10.0.0.0/24" - availability_zone = data.aws_availability_zones.available.names[0] - - tags = { - Name = "tf-acc-test-rds-cluster-multimaster" - } -} - -resource "aws_subnet" "test2" { - vpc_id = aws_vpc.test.id - cidr_block = "10.0.1.0/24" - availability_zone = data.aws_availability_zones.available.names[1] - - tags = { - Name = "tf-acc-test-rds-cluster-multimaster" - } -} - + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 3), fmt.Sprintf(` resource "aws_db_subnet_group" "test" { name = %[1]q - subnet_ids = [aws_subnet.test1.id, aws_subnet.test2.id] + subnet_ids = aws_subnet.test[*].id } - -# multimaster requires db_subnet_group_name resource "aws_rds_cluster" "test" { cluster_identifier = %[1]q db_subnet_group_name = aws_db_subnet_group.test.name engine_mode = "multimaster" - master_password = "barbarbarbar" - master_username = "foo" + master_password = "avoid-plaintext-passwords" + master_username = "tfacctest" skip_final_snapshot = true } -`, rName) +`, rName)) } func testAccClusterConfig_GlobalClusterID_EngineMode_global(rName string) string { From e39d54a22d536aad261aad50054b453fd29b535e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 6 Sep 2022 16:52:14 -0400 Subject: [PATCH 23/29] r/aws_rds_cluster: Tidy up 'TestAccRDSCluster_ReplicationSourceIdentifier_kmsKeyID'. Acceptance test output: % make testacc TESTARGS='-run=TestAccRDSCluster_ReplicationSourceIdentifier_kmsKeyID' PKG=rds ACCTEST_PARALLELISM=2 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/rds/... -v -count 1 -parallel 2 -run=TestAccRDSCluster_ReplicationSourceIdentifier_kmsKeyID -timeout 180m === RUN TestAccRDSCluster_ReplicationSourceIdentifier_kmsKeyID === PAUSE TestAccRDSCluster_ReplicationSourceIdentifier_kmsKeyID === CONT TestAccRDSCluster_ReplicationSourceIdentifier_kmsKeyID --- PASS: TestAccRDSCluster_ReplicationSourceIdentifier_kmsKeyID (1574.73s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/rds 1578.477s --- internal/service/rds/cluster_test.go | 53 +++++++++++++++------------- 1 file changed, 28 insertions(+), 25 deletions(-) diff --git a/internal/service/rds/cluster_test.go b/internal/service/rds/cluster_test.go index aefc58a1a5a..5658b5d0de9 100644 --- a/internal/service/rds/cluster_test.go +++ b/internal/service/rds/cluster_test.go @@ -3173,10 +3173,8 @@ resource "aws_rds_cluster" "test" { } func testAccClusterConfig_replicationSourceIDKMSKeyID(rName string) string { - return acctest.ConfigCompose( - acctest.ConfigMultipleRegionProvider(2), - fmt.Sprintf(` -data "aws_availability_zones" "alternate" { + return acctest.ConfigCompose(acctest.ConfigMultipleRegionProvider(2), fmt.Sprintf(` +data "aws_availability_zones" "available" { provider = "awsalternate" state = "available" @@ -3206,9 +3204,9 @@ resource "aws_rds_cluster_parameter_group" "test" { resource "aws_rds_cluster" "test" { cluster_identifier = "%[1]s-primary" db_cluster_parameter_group_name = aws_rds_cluster_parameter_group.test.name - database_name = "mydb" - master_username = "foo" - master_password = "mustbeeightcharaters" + database_name = "test" + master_username = "tfacctest" + master_password = "avoid-plaintext-passwords" storage_encrypted = true skip_final_snapshot = true } @@ -3219,8 +3217,9 @@ resource "aws_rds_cluster_instance" "test" { instance_class = "db.t2.small" } -resource "aws_kms_key" "alternate" { - provider = "awsalternate" +resource "aws_kms_key" "test" { + provider = "awsalternate" + description = %[1]q policy = < Date: Tue, 6 Sep 2022 17:19:44 -0400 Subject: [PATCH 24/29] r/aws_rds_cluster_instance: Test 'network_type' in 'TestAccRDSClusterInstance_basic'. Acceptance test output: % make testacc TESTARGS='-run=TestAccRDSClusterInstance_basic' PKG=rds ACCTEST_PARALLELISM=2 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/rds/... -v -count 1 -parallel 2 -run=TestAccRDSClusterInstance_basic -timeout 180m === RUN TestAccRDSClusterInstance_basic === PAUSE TestAccRDSClusterInstance_basic === CONT TestAccRDSClusterInstance_basic --- PASS: TestAccRDSClusterInstance_basic (794.60s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/rds 798.711s --- internal/service/rds/cluster_instance.go | 2 +- internal/service/rds/cluster_instance_test.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/service/rds/cluster_instance.go b/internal/service/rds/cluster_instance.go index 2cda99cf4cc..67ed7a0ea87 100644 --- a/internal/service/rds/cluster_instance.go +++ b/internal/service/rds/cluster_instance.go @@ -406,6 +406,7 @@ func resourceClusterInstanceRead(d *schema.ResourceData, meta interface{}) error d.Set("kms_key_id", db.KmsKeyId) d.Set("monitoring_interval", db.MonitoringInterval) d.Set("monitoring_role_arn", db.MonitoringRoleArn) + d.Set("network_type", db.NetworkType) d.Set("performance_insights_enabled", db.PerformanceInsightsEnabled) d.Set("performance_insights_kms_key_id", db.PerformanceInsightsKMSKeyId) d.Set("performance_insights_retention_period", db.PerformanceInsightsRetentionPeriod) @@ -413,7 +414,6 @@ func resourceClusterInstanceRead(d *schema.ResourceData, meta interface{}) error d.Set("preferred_maintenance_window", db.PreferredMaintenanceWindow) d.Set("promotion_tier", db.PromotionTier) d.Set("publicly_accessible", db.PubliclyAccessible) - d.Set("network_type", db.NetworkType) d.Set("storage_encrypted", db.StorageEncrypted) clusterSetResourceDataEngineVersionFromClusterInstance(d, db) diff --git a/internal/service/rds/cluster_instance_test.go b/internal/service/rds/cluster_instance_test.go index 7f68ff54c43..8e146e0155a 100644 --- a/internal/service/rds/cluster_instance_test.go +++ b/internal/service/rds/cluster_instance_test.go @@ -45,6 +45,7 @@ func TestAccRDSClusterInstance_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "dbi_resource_id"), resource.TestCheckResourceAttr(resourceName, "engine", "aurora"), resource.TestCheckResourceAttrSet(resourceName, "engine_version"), + resource.TestCheckResourceAttr(resourceName, "network_type", "IPV4"), resource.TestCheckResourceAttrSet(resourceName, "preferred_backup_window"), resource.TestCheckResourceAttrSet(resourceName, "preferred_maintenance_window"), ), From b23e084bf95c53e4402c4e089b05e7663f87e540 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 7 Sep 2022 07:56:08 -0400 Subject: [PATCH 25/29] Remove 'TestAccRDSCluster_s3Restore'. --- internal/service/rds/cluster_test.go | 120 --------------------------- 1 file changed, 120 deletions(-) diff --git a/internal/service/rds/cluster_test.go b/internal/service/rds/cluster_test.go index 5658b5d0de9..db4b08d350f 100644 --- a/internal/service/rds/cluster_test.go +++ b/internal/service/rds/cluster_test.go @@ -550,34 +550,6 @@ func TestAccRDSCluster_dbSubnetGroupName(t *testing.T) { }) } -func TestAccRDSCluster_s3Restore(t *testing.T) { - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - - var v rds.DBCluster - resourceName := "aws_rds_cluster.test" - bucket := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - uniqueId := sdkacctest.RandomWithPrefix("tf-acc-s3-import-test") - bucketPrefix := sdkacctest.RandString(5) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, rds.EndpointsID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckClusterDestroy, - Steps: []resource.TestStep{ - { - Config: testAccClusterConfig_s3Restore(bucket, bucketPrefix, uniqueId), - Check: resource.ComposeTestCheckFunc( - testAccCheckClusterExists(resourceName, &v), - resource.TestCheckResourceAttr(resourceName, "engine", "aurora"), - ), - }, - }, - }) -} - func TestAccRDSCluster_pointInTimeRestore(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -2540,98 +2512,6 @@ resource "aws_rds_cluster" "test" { `, rName)) } -func testAccClusterConfig_s3Restore(bucketName string, bucketPrefix string, uniqueId string) string { - return fmt.Sprintf(` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - -resource "aws_s3_bucket" "xtrabackup" { - bucket = %[1]q -} - -resource "aws_s3_object" "xtrabackup_db" { - bucket = aws_s3_bucket.xtrabackup.id - key = "%[2]s/mysql-5-6-xtrabackup.tar.gz" - source = "./testdata/mysql-5-6-xtrabackup.tar.gz" - etag = filemd5("./testdata/mysql-5-6-xtrabackup.tar.gz") -} - -resource "aws_iam_role" "rds_s3_access_role" { - name = "%[3]s-role" - - assume_role_policy = < Date: Wed, 7 Sep 2022 09:46:31 -0400 Subject: [PATCH 26/29] r/aws_rds_global_cluster: Prevent spurious 'engine_version' drift. --- internal/service/rds/cluster_test.go | 63 +++++++++++---------- internal/service/rds/engine_version_test.go | 3 +- internal/service/rds/global_cluster.go | 23 +++++++- 3 files changed, 54 insertions(+), 35 deletions(-) diff --git a/internal/service/rds/cluster_test.go b/internal/service/rds/cluster_test.go index db4b08d350f..2b3ababe960 100644 --- a/internal/service/rds/cluster_test.go +++ b/internal/service/rds/cluster_test.go @@ -1100,7 +1100,7 @@ func TestAccRDSCluster_engineVersion(t *testing.T) { } var dbCluster rds.DBCluster - rInt := sdkacctest.RandInt() + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_rds_cluster.test" dataSourceName := "data.aws_rds_engine_version.test" @@ -1111,7 +1111,7 @@ func TestAccRDSCluster_engineVersion(t *testing.T) { CheckDestroy: testAccCheckClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccClusterConfig_engineVersion(false, rInt), + Config: testAccClusterConfig_engineVersion(rName, false), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(resourceName, &dbCluster), resource.TestCheckResourceAttr(resourceName, "engine", "aurora-postgresql"), @@ -1119,7 +1119,7 @@ func TestAccRDSCluster_engineVersion(t *testing.T) { ), }, { - Config: testAccClusterConfig_engineVersion(true, rInt), + Config: testAccClusterConfig_engineVersion(rName, true), ExpectError: regexp.MustCompile(`Cannot modify engine version without a healthy primary instance in DB cluster`), }, }, @@ -1132,7 +1132,7 @@ func TestAccRDSCluster_engineVersionWithPrimaryInstance(t *testing.T) { } var dbCluster rds.DBCluster - rInt := sdkacctest.RandInt() + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_rds_cluster.test" dataSourceName := "data.aws_rds_engine_version.test" dataSourceNameUpgrade := "data.aws_rds_engine_version.upgrade" @@ -1144,7 +1144,7 @@ func TestAccRDSCluster_engineVersionWithPrimaryInstance(t *testing.T) { CheckDestroy: testAccCheckClusterDestroy, Steps: []resource.TestStep{ { - Config: testAccClusterConfig_engineVersionPrimaryInstance(false, rInt), + Config: testAccClusterConfig_engineVersionPrimaryInstance(rName, false), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(resourceName, &dbCluster), resource.TestCheckResourceAttrPair(resourceName, "engine", dataSourceName, "engine"), @@ -1152,7 +1152,7 @@ func TestAccRDSCluster_engineVersionWithPrimaryInstance(t *testing.T) { ), }, { - Config: testAccClusterConfig_engineVersionPrimaryInstance(true, rInt), + Config: testAccClusterConfig_engineVersionPrimaryInstance(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(resourceName, &dbCluster), resource.TestCheckResourceAttrPair(resourceName, "engine", dataSourceNameUpgrade, "engine"), @@ -2718,7 +2718,7 @@ resource "aws_rds_cluster" "test" { `, n) } -func testAccClusterConfig_engineVersion(upgrade bool, rInt int) string { +func testAccClusterConfig_engineVersion(rName string, upgrade bool) string { return fmt.Sprintf(` data "aws_rds_engine_version" "test" { engine = "aurora-postgresql" @@ -2731,29 +2731,29 @@ data "aws_rds_engine_version" "upgrade" { } locals { - parameter_group_name = %[1]t ? data.aws_rds_engine_version.upgrade.parameter_group_family : data.aws_rds_engine_version.test.parameter_group_family - engine_version = %[1]t ? data.aws_rds_engine_version.upgrade.version : data.aws_rds_engine_version.test.version + parameter_group_name = %[2]t ? data.aws_rds_engine_version.upgrade.parameter_group_family : data.aws_rds_engine_version.test.parameter_group_family + engine_version = %[2]t ? data.aws_rds_engine_version.upgrade.version : data.aws_rds_engine_version.test.version } resource "aws_rds_cluster" "test" { - cluster_identifier = "tf-acc-test-%[2]d" - database_name = "mydb" + cluster_identifier = %[1]q + database_name = "test" db_cluster_parameter_group_name = "default.${local.parameter_group_name}" engine = data.aws_rds_engine_version.test.engine engine_version = local.engine_version - master_password = "mustbeeightcharaters" - master_username = "foo" + master_password = "avoid-plaintext-passwords" + master_username = "tfacctest" skip_final_snapshot = true apply_immediately = true } -`, upgrade, rInt) +`, rName, upgrade) } -func testAccClusterConfig_engineVersionPrimaryInstance(upgrade bool, rInt int) string { +func testAccClusterConfig_engineVersionPrimaryInstance(rName string, upgrade bool) string { return fmt.Sprintf(` data "aws_rds_engine_version" "test" { engine = "aurora-postgresql" - preferred_versions = ["10.7", "10.13", "11.6"] + preferred_versions = ["10.17", "11.13", "12.8"] } data "aws_rds_engine_version" "upgrade" { @@ -2762,18 +2762,18 @@ data "aws_rds_engine_version" "upgrade" { } locals { - parameter_group_name = %[1]t ? data.aws_rds_engine_version.upgrade.parameter_group_family : data.aws_rds_engine_version.test.parameter_group_family - engine_version = %[1]t ? data.aws_rds_engine_version.upgrade.version : data.aws_rds_engine_version.test.version + parameter_group_name = %[2]t ? data.aws_rds_engine_version.upgrade.parameter_group_family : data.aws_rds_engine_version.test.parameter_group_family + engine_version = %[2]t ? data.aws_rds_engine_version.upgrade.version : data.aws_rds_engine_version.test.version } resource "aws_rds_cluster" "test" { - cluster_identifier = "tf-acc-test-%[2]d" - database_name = "mydb" + cluster_identifier = %[1]q + database_name = "test" db_cluster_parameter_group_name = "default.${local.parameter_group_name}" engine = data.aws_rds_engine_version.test.engine engine_version = local.engine_version - master_password = "mustbeeightcharaters" - master_username = "foo" + master_password = "avoid-plaintext-passwords" + master_username = "tfacctest" skip_final_snapshot = true apply_immediately = true } @@ -2785,12 +2785,12 @@ data "aws_rds_orderable_db_instance" "test" { } resource "aws_rds_cluster_instance" "test" { - identifier = "tf-acc-test-%[2]d" + identifier = %[1]q cluster_identifier = aws_rds_cluster.test.cluster_identifier engine = aws_rds_cluster.test.engine instance_class = data.aws_rds_orderable_db_instance.test.instance_class } -`, upgrade, rInt) +`, rName, upgrade) } func testAccClusterConfig_port(rName string, port int) string { @@ -3222,11 +3222,11 @@ resource "aws_rds_cluster" "test" { func testAccClusterConfig_EngineMode_global(rName string) string { return fmt.Sprintf(` resource "aws_rds_cluster" "test" { - cluster_identifier = %q + cluster_identifier = %[1]q engine_mode = "global" engine_version = "5.6.10a" # version compatible with engine_mode = "global" - master_password = "barbarbarbar" - master_username = "foo" + master_password = "avoid-plaintext-passwords" + master_username = "tfacctest" skip_final_snapshot = true } `, rName) @@ -3238,6 +3238,7 @@ resource "aws_db_subnet_group" "test" { name = %[1]q subnet_ids = aws_subnet.test[*].id } + resource "aws_rds_cluster" "test" { cluster_identifier = %[1]q db_subnet_group_name = aws_db_subnet_group.test.name @@ -3262,8 +3263,8 @@ resource "aws_rds_cluster" "test" { global_cluster_identifier = aws_rds_global_cluster.test.id engine_mode = "global" engine_version = aws_rds_global_cluster.test.engine_version - master_password = "barbarbarbar" - master_username = "foo" + master_password = "avoid-plaintext-passwords" + master_username = "tfacctest" skip_final_snapshot = true } `, rName) @@ -3283,8 +3284,8 @@ resource "aws_rds_cluster" "test" { global_cluster_identifier = %[2]s.id engine_mode = "global" engine_version = %[2]s.engine_version - master_password = "barbarbarbar" - master_username = "foo" + master_password = "avoid-plaintext-passwords" + master_username = "tfacctest" skip_final_snapshot = true } `, rName, globalClusterIdentifierResourceName) diff --git a/internal/service/rds/engine_version_test.go b/internal/service/rds/engine_version_test.go index f9613539224..92e485cf84e 100644 --- a/internal/service/rds/engine_version_test.go +++ b/internal/service/rds/engine_version_test.go @@ -17,7 +17,7 @@ func TestCompareActualEngineVersion(t *testing.T) { "point version upgrade": { configuredVersion: "8.0", actualVersion: "8.0.27", - expectedEngineVersion: "", + expectedEngineVersion: "8.0", expectedEngineVersionActual: "8.0.27", }, "minor version upgrade": { @@ -63,6 +63,7 @@ func TestCompareActualEngineVersion(t *testing.T) { t.Run(name, func(t *testing.T) { r := ResourceCluster() d := r.Data(nil) + d.Set("engine_version", test.configuredVersion) compareActualEngineVersion(d, test.configuredVersion, test.actualVersion) if want, got := test.expectedEngineVersion, d.Get("engine_version"); got != want { diff --git a/internal/service/rds/global_cluster.go b/internal/service/rds/global_cluster.go index f32552da893..6131b3b92b8 100644 --- a/internal/service/rds/global_cluster.go +++ b/internal/service/rds/global_cluster.go @@ -72,6 +72,10 @@ func ResourceGlobalCluster() *schema.Resource { Optional: true, Computed: true, }, + "engine_version_actual": { + Type: schema.TypeString, + Computed: true, + }, "force_destroy": { Type: schema.TypeBool, Optional: true, @@ -202,16 +206,29 @@ func resourceGlobalClusterRead(d *schema.ResourceData, meta interface{}) error { d.Set("database_name", globalCluster.DatabaseName) d.Set("deletion_protection", globalCluster.DeletionProtection) d.Set("engine", globalCluster.Engine) - d.Set("engine_version", globalCluster.EngineVersion) d.Set("global_cluster_identifier", globalCluster.GlobalClusterIdentifier) - if err := d.Set("global_cluster_members", flattenGlobalClusterMembers(globalCluster.GlobalClusterMembers)); err != nil { return fmt.Errorf("error setting global_cluster_members: %w", err) } - d.Set("global_cluster_resource_id", globalCluster.GlobalClusterResourceId) d.Set("storage_encrypted", globalCluster.StorageEncrypted) + oldEngineVersion := d.Get("engine_version").(string) + newEngineVersion := aws.StringValue(globalCluster.EngineVersion) + + // For example a configured engine_version of "5.6.10a" and a returned engine_version of "5.6.global_10a". + if oldParts, newParts := strings.Split(oldEngineVersion, "."), strings.Split(newEngineVersion, "."); len(oldParts) == 3 && + len(oldParts) == len(newParts) && + oldParts[0] == newParts[0] && + oldParts[1] == newParts[1] && + strings.HasSuffix(newParts[2], oldParts[2]) { + d.Set("engine_version", oldEngineVersion) + d.Set("engine_version_actual", newEngineVersion) + } else { + d.Set("engine_version", newEngineVersion) + d.Set("engine_version_actual", newEngineVersion) + } + return nil } From e35f649e1f2b4ce7015175d1a26d91d943056937 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 7 Sep 2022 10:14:58 -0400 Subject: [PATCH 27/29] d/aws_rds_cluster: Add 'network_type' attribute. Acceptance test output: % make testacc TESTARGS='-run=TestAccRDSClusterDataSource_' PKG=rds ACCTEST_PARALLELISM=2 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/rds/... -v -count 1 -parallel 2 -run=TestAccRDSClusterDataSource_ -timeout 180m === RUN TestAccRDSClusterDataSource_basic === PAUSE TestAccRDSClusterDataSource_basic === CONT TestAccRDSClusterDataSource_basic --- PASS: TestAccRDSClusterDataSource_basic (135.58s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/rds 140.333s --- .changelog/26489.txt | 4 + internal/service/rds/cluster_data_source.go | 152 +++++------------- .../service/rds/cluster_data_source_test.go | 49 ++---- website/docs/r/rds_cluster.html.markdown | 2 +- 4 files changed, 57 insertions(+), 150 deletions(-) diff --git a/.changelog/26489.txt b/.changelog/26489.txt index 55d7c37431e..0ed7c85b777 100644 --- a/.changelog/26489.txt +++ b/.changelog/26489.txt @@ -5,3 +5,7 @@ resource/aws_rds_cluster: Add `network_type` argument ```release-note:enhancement resource/aws_rds_cluster_instance: Add `network_type` attribute ``` + +```release-note:enhancement +data-source/aws_rds_cluster: Add `network_type` attribute +``` \ No newline at end of file diff --git a/internal/service/rds/cluster_data_source.go b/internal/service/rds/cluster_data_source.go index 749ab39f142..6730c0d3352 100644 --- a/internal/service/rds/cluster_data_source.go +++ b/internal/service/rds/cluster_data_source.go @@ -2,10 +2,8 @@ package rds import ( "fmt" - "log" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/rds" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -19,146 +17,119 @@ func DataSourceCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, - - "cluster_identifier": { - Type: schema.TypeString, - Required: true, - }, - "availability_zones": { Type: schema.TypeSet, Elem: &schema.Schema{Type: schema.TypeString}, Computed: true, - Set: schema.HashString, }, - "backtrack_window": { Type: schema.TypeInt, Computed: true, }, - "backup_retention_period": { Type: schema.TypeInt, Computed: true, }, - + "cluster_identifier": { + Type: schema.TypeString, + Required: true, + }, "cluster_members": { Type: schema.TypeSet, Elem: &schema.Schema{Type: schema.TypeString}, Computed: true, - Set: schema.HashString, }, - "cluster_resource_id": { Type: schema.TypeString, Computed: true, }, - "database_name": { Type: schema.TypeString, Computed: true, }, - - "db_subnet_group_name": { + "db_cluster_parameter_group_name": { Type: schema.TypeString, Computed: true, }, - - "db_cluster_parameter_group_name": { + "db_subnet_group_name": { Type: schema.TypeString, Computed: true, }, - "enabled_cloudwatch_logs_exports": { Type: schema.TypeList, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "endpoint": { Type: schema.TypeString, Computed: true, }, - "engine": { Type: schema.TypeString, Computed: true, }, - "engine_version": { Type: schema.TypeString, Computed: true, }, - "final_snapshot_identifier": { Type: schema.TypeString, Computed: true, }, - + "hosted_zone_id": { + Type: schema.TypeString, + Computed: true, + }, "iam_database_authentication_enabled": { Type: schema.TypeBool, Computed: true, }, - "iam_roles": { Type: schema.TypeSet, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, }, - "kms_key_id": { Type: schema.TypeString, Computed: true, }, - "master_username": { Type: schema.TypeString, Computed: true, }, - - "preferred_backup_window": { - Type: schema.TypeString, - Computed: true, - }, - - "preferred_maintenance_window": { + "network_type": { Type: schema.TypeString, Computed: true, }, - "port": { Type: schema.TypeInt, Computed: true, }, - - "reader_endpoint": { + "preferred_backup_window": { Type: schema.TypeString, Computed: true, }, - - "hosted_zone_id": { + "preferred_maintenance_window": { + Type: schema.TypeString, + Computed: true, + }, + "reader_endpoint": { Type: schema.TypeString, Computed: true, }, - "replication_source_identifier": { Type: schema.TypeString, Computed: true, }, - "storage_encrypted": { Type: schema.TypeBool, Computed: true, }, - "tags": tftags.TagsSchemaComputed(), - "vpc_security_group_ids": { Type: schema.TypeSet, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, }, }, } @@ -168,56 +139,27 @@ func dataSourceClusterRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*conns.AWSClient).RDSConn ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig - dbClusterIdentifier := d.Get("cluster_identifier").(string) - - params := &rds.DescribeDBClustersInput{ - DBClusterIdentifier: aws.String(dbClusterIdentifier), - } - log.Printf("[DEBUG] Reading RDS Cluster: %s", params) - resp, err := conn.DescribeDBClusters(params) + dbClusterID := d.Get("cluster_identifier").(string) + dbc, err := FindDBClusterByID(conn, dbClusterID) if err != nil { - return fmt.Errorf("Error retrieving RDS cluster: %w", err) - } - - if resp == nil { - return fmt.Errorf("Error retrieving RDS cluster: empty response for: %s", params) - } - - var dbc *rds.DBCluster - for _, c := range resp.DBClusters { - if aws.StringValue(c.DBClusterIdentifier) == dbClusterIdentifier { - dbc = c - break - } - } - - if dbc == nil { - return fmt.Errorf("Error retrieving RDS cluster: cluster not found in response for: %s", params) + return fmt.Errorf("reading RDS Cluster (%s): %w", dbClusterID, err) } d.SetId(aws.StringValue(dbc.DBClusterIdentifier)) - if err := d.Set("availability_zones", aws.StringValueSlice(dbc.AvailabilityZones)); err != nil { - return fmt.Errorf("error setting availability_zones: %w", err) - } - - arn := dbc.DBClusterArn - d.Set("arn", arn) + clusterARN := aws.StringValue(dbc.DBClusterArn) + d.Set("arn", clusterARN) + d.Set("availability_zones", aws.StringValueSlice(dbc.AvailabilityZones)) d.Set("backtrack_window", dbc.BacktrackWindow) d.Set("backup_retention_period", dbc.BackupRetentionPeriod) d.Set("cluster_identifier", dbc.DBClusterIdentifier) - - var cm []string - for _, m := range dbc.DBClusterMembers { - cm = append(cm, aws.StringValue(m.DBInstanceIdentifier)) + var clusterMembers []string + for _, v := range dbc.DBClusterMembers { + clusterMembers = append(clusterMembers, aws.StringValue(v.DBInstanceIdentifier)) } - if err := d.Set("cluster_members", cm); err != nil { - return fmt.Errorf("error setting cluster_members: %w", err) - } - + d.Set("cluster_members", clusterMembers) d.Set("cluster_resource_id", dbc.DbClusterResourceId) - // Only set the DatabaseName if it is not nil. There is a known API bug where // RDS accepts a DatabaseName but does not return it, causing a perpetual // diff. @@ -225,54 +167,42 @@ func dataSourceClusterRead(d *schema.ResourceData, meta interface{}) error { if dbc.DatabaseName != nil { d.Set("database_name", dbc.DatabaseName) } - d.Set("db_cluster_parameter_group_name", dbc.DBClusterParameterGroup) d.Set("db_subnet_group_name", dbc.DBSubnetGroup) - - if err := d.Set("enabled_cloudwatch_logs_exports", aws.StringValueSlice(dbc.EnabledCloudwatchLogsExports)); err != nil { - return fmt.Errorf("error setting enabled_cloudwatch_logs_exports: %w", err) - } - + d.Set("enabled_cloudwatch_logs_exports", aws.StringValueSlice(dbc.EnabledCloudwatchLogsExports)) d.Set("endpoint", dbc.Endpoint) - d.Set("engine_version", dbc.EngineVersion) d.Set("engine", dbc.Engine) + d.Set("engine_version", dbc.EngineVersion) d.Set("hosted_zone_id", dbc.HostedZoneId) d.Set("iam_database_authentication_enabled", dbc.IAMDatabaseAuthenticationEnabled) - - var roles []string - for _, r := range dbc.AssociatedRoles { - roles = append(roles, aws.StringValue(r.RoleArn)) + var iamRoleARNs []string + for _, v := range dbc.AssociatedRoles { + iamRoleARNs = append(iamRoleARNs, aws.StringValue(v.RoleArn)) } - if err := d.Set("iam_roles", roles); err != nil { - return fmt.Errorf("error setting iam_roles: %w", err) - } - + d.Set("iam_roles", iamRoleARNs) d.Set("kms_key_id", dbc.KmsKeyId) d.Set("master_username", dbc.MasterUsername) + d.Set("network_type", dbc.NetworkType) d.Set("port", dbc.Port) d.Set("preferred_backup_window", dbc.PreferredBackupWindow) d.Set("preferred_maintenance_window", dbc.PreferredMaintenanceWindow) d.Set("reader_endpoint", dbc.ReaderEndpoint) d.Set("replication_source_identifier", dbc.ReplicationSourceIdentifier) - d.Set("storage_encrypted", dbc.StorageEncrypted) - - var vpcg []string - for _, g := range dbc.VpcSecurityGroups { - vpcg = append(vpcg, aws.StringValue(g.VpcSecurityGroupId)) - } - if err := d.Set("vpc_security_group_ids", vpcg); err != nil { - return fmt.Errorf("error setting vpc_security_group_ids: %w", err) + var securityGroupIDs []string + for _, v := range dbc.VpcSecurityGroups { + securityGroupIDs = append(securityGroupIDs, aws.StringValue(v.VpcSecurityGroupId)) } + d.Set("vpc_security_group_ids", securityGroupIDs) - tags, err := ListTags(conn, *arn) + tags, err := ListTags(conn, clusterARN) if err != nil { - return fmt.Errorf("error listing tags for RDS Cluster (%s): %w", *arn, err) + return fmt.Errorf("listing tags for RDS Cluster (%s): %w", d.Id(), err) } if err := d.Set("tags", tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return fmt.Errorf("error setting tags: %w", err) + return fmt.Errorf("setting tags: %w", err) } return nil diff --git a/internal/service/rds/cluster_data_source_test.go b/internal/service/rds/cluster_data_source_test.go index 675b892a0c6..55a57c18357 100644 --- a/internal/service/rds/cluster_data_source_test.go +++ b/internal/service/rds/cluster_data_source_test.go @@ -31,8 +31,9 @@ func TestAccRDSClusterDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, "db_subnet_group_name", resourceName, "db_subnet_group_name"), resource.TestCheckResourceAttrPair(dataSourceName, "hosted_zone_id", resourceName, "hosted_zone_id"), resource.TestCheckResourceAttrPair(dataSourceName, "master_username", resourceName, "master_username"), + resource.TestCheckResourceAttrPair(dataSourceName, "network_type", resourceName, "network_type"), resource.TestCheckResourceAttrPair(dataSourceName, "tags.%", resourceName, "tags.%"), - resource.TestCheckResourceAttrPair(dataSourceName, "tags.Environment", resourceName, "tags.Environment"), + resource.TestCheckResourceAttrPair(dataSourceName, "tags.Name", resourceName, "tags.Name"), ), }, }, @@ -40,54 +41,26 @@ func TestAccRDSClusterDataSource_basic(t *testing.T) { } func testAccClusterDataSourceConfig_basic(rName string) string { - return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 2), fmt.Sprintf(` +resource "aws_db_subnet_group" "test" { + name = %[1]q + subnet_ids = aws_subnet.test[*].id +} + resource "aws_rds_cluster" "test" { cluster_identifier = %[1]q - database_name = "mydb" + database_name = "test" db_cluster_parameter_group_name = "default.aurora5.6" db_subnet_group_name = aws_db_subnet_group.test.name - master_password = "mustbeeightcharacters" - master_username = "foo" + master_password = "avoid-plaintext-passwords" + master_username = "tfacctest" skip_final_snapshot = true - tags = { - Environment = "test" - } -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - tags = { Name = %[1]q } } -resource "aws_subnet" "a" { - vpc_id = aws_vpc.test.id - cidr_block = "10.0.0.0/24" - availability_zone = data.aws_availability_zones.available.names[0] - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "b" { - vpc_id = aws_vpc.test.id - cidr_block = "10.0.1.0/24" - availability_zone = data.aws_availability_zones.available.names[1] - - tags = { - Name = %[1]q - } -} - -resource "aws_db_subnet_group" "test" { - name = %[1]q - subnet_ids = [aws_subnet.a.id, aws_subnet.b.id] -} - data "aws_rds_cluster" "test" { cluster_identifier = aws_rds_cluster.test.cluster_identifier } diff --git a/website/docs/r/rds_cluster.html.markdown b/website/docs/r/rds_cluster.html.markdown index b335e583c56..8c10d5d4a4e 100644 --- a/website/docs/r/rds_cluster.html.markdown +++ b/website/docs/r/rds_cluster.html.markdown @@ -186,7 +186,7 @@ The following arguments are supported: * `preferred_backup_window` - (Optional) The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.Time in UTC. Default: A 30-minute window selected at random from an 8-hour block of time per regionE.g., 04:00-09:00 * `preferred_maintenance_window` - (Optional) The weekly time range during which system maintenance can occur, in (UTC) e.g., wed:04:00-wed:04:30 * `replication_source_identifier` - (Optional) ARN of a source DB cluster or DB instance if this DB cluster is to be created as a Read Replica. If DB Cluster is part of a Global Cluster, use the [`lifecycle` configuration block `ignore_changes` argument](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) to prevent Terraform from showing differences for this argument instead of configuring this value. -* `network_type` - (Optional) The network type of the DB instance. Valid values: `IPV4`, `DUAL`. +* `network_type` - (Optional) The network type of the cluster. Valid values: `IPV4`, `DUAL`. * `restore_to_point_in_time` - (Optional) Nested attribute for [point in time restore](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_PIT.html). More details below. * `scaling_configuration` - (Optional) Nested attribute with scaling properties. Only valid when `engine_mode` is set to `serverless`. More details below. * `serverlessv2_scaling_configuration`- (Optional) Nested attribute with scaling properties for ServerlessV2. Only valid when `engine_mode` is set to `provisioned`. More details below. From 94efb6aaf79fae582fbe9b600d963b2dce82a8e5 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 7 Sep 2022 10:33:19 -0400 Subject: [PATCH 28/29] Fix golangi-lint 'unparam'. --- internal/service/rds/wait.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/rds/wait.go b/internal/service/rds/wait.go index 487dba64ff2..f2aea302a6c 100644 --- a/internal/service/rds/wait.go +++ b/internal/service/rds/wait.go @@ -167,7 +167,7 @@ func waitDBClusterDeleted(conn *rds.RDS, id string, timeout time.Duration) (*rds return nil, err } -func waitDBClusterUpdated(conn *rds.RDS, id string, timeout time.Duration) (*rds.DBCluster, error) { +func waitDBClusterUpdated(conn *rds.RDS, id string, timeout time.Duration) (*rds.DBCluster, error) { //nolint:unparam stateConf := &resource.StateChangeConf{ Pending: []string{ ClusterStatusBackingUp, From 083fbdcea6b55c45b1ee88a37837bc997db39f82 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 7 Sep 2022 14:13:21 -0400 Subject: [PATCH 29/29] Fix terrafmt error. --- internal/service/rds/cluster_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/service/rds/cluster_test.go b/internal/service/rds/cluster_test.go index 2b3ababe960..5eb4908631a 100644 --- a/internal/service/rds/cluster_test.go +++ b/internal/service/rds/cluster_test.go @@ -2572,10 +2572,11 @@ resource "aws_rds_cluster" "restore" { func testAccClusterConfig_pointInTimeRestoreSource_enabledCloudWatchLogsExports(rName, enabledCloudwatchLogExports string) string { return acctest.ConfigCompose(testAccClusterConfig_baseForPITR(rName), fmt.Sprintf(` resource "aws_rds_cluster" "restore" { - cluster_identifier = "%[1]s-restore" + cluster_identifier = "%[1]s-restore" skip_final_snapshot = true engine = aws_rds_cluster.test.engine enabled_cloudwatch_logs_exports = [%[2]q] + restore_to_point_in_time { source_cluster_identifier = aws_rds_cluster.test.cluster_identifier restore_type = "full-copy"