diff --git a/.changelog/23684.txt b/.changelog/23684.txt new file mode 100644 index 00000000000..17fbddee617 --- /dev/null +++ b/.changelog/23684.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_rds_cluster: Add `db_cluster_instance_class`, `allocated_storage`, `storage_type`, and `iops` arguments to support [Multi-AZ deployments for MySQL & PostgreSQL](https://aws.amazon.com/blogs/aws/amazon-rds-multi-az-db-cluster/) +``` diff --git a/internal/service/rds/cluster.go b/internal/service/rds/cluster.go index 32084974fe2..71283cfd69f 100644 --- a/internal/service/rds/cluster.go +++ b/internal/service/rds/cluster.go @@ -157,6 +157,11 @@ func ResourceCluster() *schema.Resource { Computed: true, }, + "db_cluster_instance_class": { + Type: schema.TypeString, + Optional: true, + }, + "engine": { Type: schema.TypeString, Optional: true, @@ -231,6 +236,23 @@ func ResourceCluster() *schema.Resource { }, }, + "allocated_storage": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + }, + + "storage_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "iops": { + Type: schema.TypeInt, + Optional: true, + }, + "storage_encrypted": { Type: schema.TypeBool, Optional: true, @@ -870,6 +892,10 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { createOpts.DBClusterParameterGroupName = aws.String(attr.(string)) } + if attr, ok := d.GetOk("db_cluster_instance_class"); ok { + createOpts.DBClusterInstanceClass = aws.String(attr.(string)) + } + if attr, ok := d.GetOk("engine_version"); ok { createOpts.EngineVersion = aws.String(attr.(string)) } @@ -922,6 +948,18 @@ func resourceClusterCreate(d *schema.ResourceData, meta interface{}) error { createOpts.ReplicationSourceIdentifier = aws.String(attr.(string)) } + if attr, ok := d.GetOkExists("allocated_storage"); ok { + createOpts.AllocatedStorage = aws.Int64(int64(attr.(int))) + } + + if attr, ok := d.GetOkExists("storage_type"); ok { + createOpts.StorageType = aws.String(attr.(string)) + } + + if attr, ok := d.GetOkExists("iops"); ok { + createOpts.Iops = aws.Int64(int64(attr.(int))) + } + if attr, ok := d.GetOkExists("storage_encrypted"); ok { createOpts.StorageEncrypted = aws.Bool(attr.(bool)) } @@ -1075,6 +1113,7 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error { } d.Set("endpoint", dbc.Endpoint) + d.Set("db_cluster_instance_class", dbc.DBClusterInstanceClass) d.Set("engine_mode", dbc.EngineMode) d.Set("engine", dbc.Engine) d.Set("hosted_zone_id", dbc.HostedZoneId) @@ -1102,7 +1141,11 @@ func resourceClusterRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error setting scaling_configuration: %s", err) } + d.Set("allocated_storage", dbc.AllocatedStorage) + d.Set("storage_type", dbc.StorageType) + d.Set("iops", dbc.Iops) d.Set("storage_encrypted", dbc.StorageEncrypted) + d.Set("enable_http_endpoint", dbc.HttpEndpointEnabled) var vpcg []string @@ -1181,6 +1224,11 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { requestUpdate = true } + if d.HasChange("db_cluster_instance_class") { + req.EngineVersion = aws.String(d.Get("db_cluster_instance_class").(string)) + requestUpdate = true + } + if d.HasChange("engine_version") { req.EngineVersion = aws.String(d.Get("engine_version").(string)) requestUpdate = true @@ -1200,6 +1248,21 @@ func resourceClusterUpdate(d *schema.ResourceData, meta interface{}) error { requestUpdate = true } + if d.HasChange("storage_type") { + req.StorageType = aws.String(d.Get("storage_type").(string)) + requestUpdate = true + } + + if d.HasChange("allocated_storage") { + req.AllocatedStorage = aws.Int64(int64(d.Get("allocated_storage").(int))) + requestUpdate = true + } + + if d.HasChange("iops") { + req.Iops = aws.Int64(int64(d.Get("iops").(int))) + requestUpdate = true + } + if d.HasChange("preferred_backup_window") { req.PreferredBackupWindow = aws.String(d.Get("preferred_backup_window").(string)) requestUpdate = true diff --git a/internal/service/rds/cluster_test.go b/internal/service/rds/cluster_test.go index 5324b6bcd2f..96c19ae0a15 100644 --- a/internal/service/rds/cluster_test.go +++ b/internal/service/rds/cluster_test.go @@ -308,6 +308,94 @@ func TestAccRDSCluster_availabilityZones(t *testing.T) { }) } +func TestAccRDSCluster_storageType(t *testing.T) { + var dbCluster rds.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, rds.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_StorageType(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "storage_type", "io1"), + ), + }, + }, + }) +} + +func TestAccRDSCluster_allocatedStorage(t *testing.T) { + var dbCluster rds.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, rds.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_AllocatedStorage(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "allocated_storage", "100"), + ), + }, + }, + }) +} + +func TestAccRDSCluster_iops(t *testing.T) { + var dbCluster rds.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, rds.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_Iops(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "iops", "1000"), + ), + }, + }, + }) +} + +func TestAccRDSCluster_dbClusterInstanceClass(t *testing.T) { + var dbCluster rds.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, rds.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckClusterDestroy, + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_DbClusterInstanceClass(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(resourceName, &dbCluster), + resource.TestCheckResourceAttr(resourceName, "db_cluster_instance_class", "db.r6gd.xlarge"), + ), + }, + }, + }) +} + func TestAccRDSCluster_backtrackWindow(t *testing.T) { var dbCluster rds.DBCluster resourceName := "aws_rds_cluster.test" @@ -2251,6 +2339,74 @@ resource "aws_rds_cluster" "test" { `, rName) } +func testAccClusterConfig_StorageType(rName string) string { + return fmt.Sprintf(` +resource "aws_rds_cluster" "test" { + apply_immediately = true + cluster_identifier = %[1]q + db_cluster_instance_class = "db.r6gd.xlarge" + engine = "mysql" + storage_type = "io1" + allocated_storage = 100 + iops = 1000 + master_password = "mustbeeightcharaters" + master_username = "test" + skip_final_snapshot = true +} +`, rName) +} + +func testAccClusterConfig_AllocatedStorage(rName string) string { + return fmt.Sprintf(` +resource "aws_rds_cluster" "test" { + apply_immediately = true + cluster_identifier = %[1]q + db_cluster_instance_class = "db.r6gd.xlarge" + engine = "mysql" + storage_type = "io1" + allocated_storage = 100 + iops = 1000 + master_password = "mustbeeightcharaters" + master_username = "test" + skip_final_snapshot = true +} +`, rName) +} + +func testAccClusterConfig_Iops(rName string) string { + return fmt.Sprintf(` +resource "aws_rds_cluster" "test" { + apply_immediately = true + cluster_identifier = %[1]q + db_cluster_instance_class = "db.r6gd.xlarge" + engine = "mysql" + storage_type = "io1" + allocated_storage = 100 + iops = 1000 + master_password = "mustbeeightcharaters" + master_username = "test" + skip_final_snapshot = true +} +`, rName) +} + +func testAccClusterConfig_DbClusterInstanceClass(rName string) string { + return fmt.Sprintf(` +resource "aws_rds_cluster" "test" { + apply_immediately = true + cluster_identifier = %[1]q + db_cluster_instance_class = "db.r6gd.xlarge" + engine = "mysql" + storage_type = "io1" + allocated_storage = 100 + iops = 1000 + master_password = "mustbeeightcharaters" + master_username = "test" + skip_final_snapshot = true +} +`, rName) +} + func testAccClusterConfig_BacktrackWindow(backtrackWindow int) string { return fmt.Sprintf(` resource "aws_rds_cluster" "test" { diff --git a/internal/service/rds/validate.go b/internal/service/rds/validate.go index e22f7b44cbc..6e3da6ad67e 100644 --- a/internal/service/rds/validate.go +++ b/internal/service/rds/validate.go @@ -148,6 +148,8 @@ func validEngine() schema.SchemaValidateFunc { "aurora", "aurora-mysql", "aurora-postgresql", + "postgres", + "mysql", }, false) } diff --git a/website/docs/r/rds_cluster.html.markdown b/website/docs/r/rds_cluster.html.markdown index 3ad578feb9b..fa845e602f9 100644 --- a/website/docs/r/rds_cluster.html.markdown +++ b/website/docs/r/rds_cluster.html.markdown @@ -95,6 +95,26 @@ resource "aws_rds_cluster" "example" { } ``` +### RDS Multi-AZ Cluster + +-> More information about RDS Multi-AZ Clusters can be found in the [RDS User Guide](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/multi-az-db-clusters-concepts.html). + +To create a Multi-AZ RDS cluster, you must additionally specify the `engine`, `storage_type`, `allocated_storage`, `iops` and `db_cluster_instance_class` attributes. + +```terraform +resource "aws_rds_cluster" "example" { + cluster_identifier = "example" + availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] + engine = "mysql" + db_cluster_instance_class = "db.r6gd.xlarge" + storage_type = "io1" + allocated_storage = 100 + iops = 1000 + master_username = "test" + master_password = "mustbeeightcharaters" +} +``` + ## Argument Reference For more detailed documentation about each argument, refer to @@ -120,9 +140,10 @@ The following arguments are supported: * `deletion_protection` - (Optional) If the DB instance should have deletion protection enabled. The database can't be deleted when this value is set to `true`. The default is `false`. * `enable_http_endpoint` - (Optional) Enable HTTP endpoint (data API). Only valid when `engine_mode` is set to `serverless`. * `enabled_cloudwatch_logs_exports` - (Optional) Set of log types to export to cloudwatch. If omitted, no logs will be exported. The following log types are supported: `audit`, `error`, `general`, `slowquery`, `postgresql` (PostgreSQL). -* `engine` - (Optional) The name of the database engine to be used for this DB cluster. Defaults to `aurora`. Valid Values: `aurora`, `aurora-mysql`, `aurora-postgresql` +* `engine` - (Optional) The name of the database engine to be used for this DB cluster. Defaults to `aurora`. Valid Values: `aurora`, `aurora-mysql`, `aurora-postgresql`, `mysql`, `postgres`. (Note that `mysql` and `postgres` are Multi-AZ RDS clusters). * `engine_mode` - (Optional) The database engine mode. Valid values: `global` (only valid for Aurora MySQL 1.21 and earlier), `multimaster`, `parallelquery`, `provisioned`, `serverless`. Defaults to: `provisioned`. See the [RDS User Guide](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/aurora-serverless.html) for limitations when using `serverless`. * `engine_version` - (Optional) The database engine version. Updating this argument results in an outage. See the [Aurora MySQL](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Updates.html) and [Aurora Postgres](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Updates.html) documentation for your configured engine to determine this value. For example with Aurora MySQL 2, a potential value for this argument is `5.7.mysql_aurora.2.03.2`. The value can contain a partial version where supported by the API. The actual engine version used is returned in the attribute `engine_version_actual`, [defined below](#engine_version_actual). +* `db_cluster_instance_class` - (Optional) The compute and memory capacity of each DB instance in the Multi-AZ DB cluster, for example db.m6g.xlarge. Not all DB instance classes are available in all AWS Regions, or for all database engines. For the full list of DB instance classes and availability for your engine, see [DB instance class](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) in the Amazon RDS User Guide. (This setting is required to create a Multi-AZ DB cluster). * `final_snapshot_identifier` - (Optional) The name of your final DB snapshot when this DB cluster is deleted. If omitted, no final snapshot will be made. * `global_cluster_identifier` - (Optional) The global cluster identifier specified on [`aws_rds_global_cluster`](/docs/providers/aws/r/rds_global_cluster.html). * `enable_global_write_forwarding` - (Optional) Whether cluster should forward writes to an associated global cluster. Applied to secondary clusters to enable them to forward writes to an [`aws_rds_global_cluster`](/docs/providers/aws/r/rds_global_cluster.html)'s primary cluster. See the [Aurora Userguide documentation](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database-write-forwarding.html) for more information. @@ -140,6 +161,9 @@ The following arguments are supported: * `skip_final_snapshot` - (Optional) Determines whether a final DB snapshot is created before the DB cluster is deleted. If true is specified, no DB snapshot is created. If false is specified, a DB snapshot is created before the DB cluster is deleted, using the value from `final_snapshot_identifier`. Default is `false`. * `snapshot_identifier` - (Optional) Specifies whether or not to create this cluster from a snapshot. You can use either the name or ARN when specifying a DB cluster snapshot, or the ARN when specifying a DB snapshot. * `source_region` - (Optional) The source region for an encrypted replica DB cluster. +* `allocated_storage` - (Optional) The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster. (This setting is required to create a Multi-AZ DB cluster). +* `storage_type` - (Optional) Specifies the storage type to be associated with the DB cluster. (This setting is required to create a Multi-AZ DB cluster). Valid values: `io1`, Default: `io1`. +* `iops` - (Optional) The amount of Provisioned IOPS (input/output operations per second) to be initially allocated for each DB instance in the Multi-AZ DB cluster. For information about valid Iops values, see [Amazon RDS Provisioned IOPS storage to improve performance](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS) in the Amazon RDS User Guide. (This setting is required to create a Multi-AZ DB cluster). Must be a multiple between .5 and 50 of the storage amount for the DB cluster. * `storage_encrypted` - (Optional) Specifies whether the DB cluster is encrypted. The default is `false` for `provisioned` `engine_mode` and `true` for `serverless` `engine_mode`. When restoring an unencrypted `snapshot_identifier`, the `kms_key_id` argument must be provided to encrypt the restored cluster. Terraform will only perform drift detection if a configuration value is provided. * `tags` - (Optional) A map of tags to assign to the DB cluster. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `vpc_security_group_ids` - (Optional) List of VPC security groups to associate with the Cluster