Skip to content

Commit

Permalink
Merge pull request #7031 from Gufran/aurora-pit-restore
Browse files Browse the repository at this point in the history
Support Aurora point-in-time restore
  • Loading branch information
anGie44 authored Nov 12, 2020
2 parents 815dab7 + 2af971b commit 0738a98
Show file tree
Hide file tree
Showing 5 changed files with 275 additions and 1 deletion.
144 changes: 144 additions & 0 deletions aws/resource_aws_rds_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -217,12 +217,59 @@ func resourceAwsRDSCluster() *schema.Resource {
ForceNew: true,
},

"restore_to_point_in_time": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
MaxItems: 1,
ConflictsWith: []string{
"s3_import",
"snapshot_identifier",
},
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"source_cluster_identifier": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validateRdsIdentifier,
},

"restore_type": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.StringInSlice([]string{
"full-copy",
"copy-on-write",
}, false),
},

"use_latest_restorable_time": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
ConflictsWith: []string{"restore_to_point_in_time.0.restore_to_time"},
},

"restore_to_time": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validateUTCTimestamp,
ConflictsWith: []string{"restore_to_point_in_time.0.use_latest_restorable_time"},
},
},
},
},

"s3_import": {
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
ConflictsWith: []string{
"snapshot_identifier",
"restore_to_point_in_time",
},
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
Expand Down Expand Up @@ -650,6 +697,103 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error
return err
}

} else if v, ok := d.GetOk("restore_to_point_in_time"); ok {
pointInTime := v.([]interface{})[0].(map[string]interface{})
createOpts := &rds.RestoreDBClusterToPointInTimeInput{
DBClusterIdentifier: aws.String(identifier),
DeletionProtection: aws.Bool(d.Get("deletion_protection").(bool)),
SourceDBClusterIdentifier: aws.String(pointInTime["source_cluster_identifier"].(string)),
Tags: tags,
}

if v, ok := pointInTime["restore_to_time"].(string); ok && v != "" {
restoreToTime, _ := time.Parse(time.RFC3339, v)
createOpts.RestoreToTime = aws.Time(restoreToTime)
}

if v, ok := pointInTime["use_latest_restorable_time"].(bool); ok && v {
createOpts.UseLatestRestorableTime = aws.Bool(v)
}

if createOpts.RestoreToTime == nil && createOpts.UseLatestRestorableTime == nil {
return fmt.Errorf(`provider.aws: aws_rds_cluster: %s: Either "restore_to_time" or "use_latest_restorable_time" must be set`, d.Get("database_name").(string))
}

if attr, ok := pointInTime["restore_type"].(string); ok {
createOpts.RestoreType = aws.String(attr)
}

if v, ok := d.GetOk("backtrack_window"); ok {
createOpts.BacktrackWindow = aws.Int64(int64(v.(int)))
}

if attr, ok := d.GetOk("db_subnet_group_name"); ok {
createOpts.DBSubnetGroupName = aws.String(attr.(string))
}

if attr, ok := d.GetOk("port"); ok {
createOpts.Port = aws.Int64(int64(attr.(int)))
}

if attr, ok := d.GetOk("option_group_name"); ok {
createOpts.OptionGroupName = aws.String(attr.(string))
}

if attr := d.Get("vpc_security_group_ids").(*schema.Set); attr.Len() > 0 {
createOpts.VpcSecurityGroupIds = expandStringList(attr.List())
}

if attr, ok := d.GetOk("kms_key_id"); ok {
createOpts.KmsKeyId = aws.String(attr.(string))
}

if attr, ok := d.GetOk("enabled_cloudwatch_logs_exports"); ok && len(attr.([]interface{})) > 0 {
createOpts.EnableCloudwatchLogsExports = expandStringList(attr.([]interface{}))
}

if attr, ok := d.GetOk("iam_database_authentication_enabled"); ok {
createOpts.EnableIAMDatabaseAuthentication = aws.Bool(attr.(bool))
}

if attr, ok := d.GetOk("db_cluster_parameter_group_name"); ok {
createOpts.DBClusterParameterGroupName = aws.String(attr.(string))
}

requireUpdateAttrs := []string{
"master_password",
"backup_retention_period",
"preferred_backup_window",
"preferred_maintenance_window",
"scaling_configuration",
}

for _, attr := range requireUpdateAttrs {
if val, ok := d.GetOk(attr); ok {
requiresModifyDbCluster = true
switch attr {
case "master_password":
modifyDbClusterInput.MasterUserPassword = aws.String(val.(string))
case "backup_retention_period":
modifyDbClusterInput.BackupRetentionPeriod = aws.Int64(int64(val.(int)))
case "preferred_backup_window":
modifyDbClusterInput.PreferredBackupWindow = aws.String(val.(string))
case "preferred_maintenance_window":
modifyDbClusterInput.PreferredMaintenanceWindow = aws.String(val.(string))
case "scaling_configuration":
modifyDbClusterInput.ScalingConfiguration = expandRdsClusterScalingConfiguration(d.Get("scaling_configuration").([]interface{}))
}
}
}

log.Printf("[DEBUG] RDS Cluster restore options: %s", createOpts)

resp, err := conn.RestoreDBClusterToPointInTime(createOpts)
if err != nil {
log.Printf("[ERROR] Error restoring RDS Cluster: %s", err)
return err
}

log.Printf("[DEBUG]: RDS Cluster restore response: %s", resp)
} else {

createOpts := &rds.CreateDBClusterInput{
Expand Down
71 changes: 71 additions & 0 deletions aws/resource_aws_rds_cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -364,6 +364,31 @@ func TestAccAWSRDSCluster_s3Restore(t *testing.T) {
})
}

func TestAccAWSRDSCluster_PointInTimeRestore(t *testing.T) {
var v rds.DBCluster
var c rds.DBCluster

parentId := acctest.RandomWithPrefix("tf-acc-point-in-time-restore-seed-test")
restoredId := acctest.RandomWithPrefix("tf-acc-point-in-time-restored-test")

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSClusterDestroy,
Steps: []resource.TestStep{
{
Config: testAccAWSClusterConfig_pointInTimeRestoreSource(parentId, restoredId),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSClusterExists("aws_rds_cluster.test", &v),
testAccCheckAWSClusterExists("aws_rds_cluster.restored_pit", &c),
resource.TestCheckResourceAttr("aws_rds_cluster.restored_pit", "cluster_identifier", restoredId),
resource.TestCheckResourceAttrPair("aws_rds_cluster.restored_pit", "engine", "aws_rds_cluster.test", "engine"),
),
},
},
})
}

func TestAccAWSRDSCluster_generatedName(t *testing.T) {
var v rds.DBCluster
resourceName := "aws_rds_cluster.test"
Expand Down Expand Up @@ -2634,6 +2659,52 @@ resource "aws_rds_cluster" "default" {
`, n)
}

func testAccAWSClusterConfig_pointInTimeRestoreSource(parentId, childId string) string {
return composeConfig(testAccAvailableAZsNoOptInConfig(), fmt.Sprintf(`
resource "aws_rds_cluster" "test" {
cluster_identifier = "%[1]s"
master_username = "root"
master_password = "password"
db_subnet_group_name = aws_db_subnet_group.test.name
skip_final_snapshot = true
engine = "aurora-mysql"
}
resource "aws_vpc" "test" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "%[1]s-vpc"
}
}
resource "aws_subnet" "subnets" {
count = length(data.aws_availability_zones.available.names)
vpc_id = aws_vpc.test.id
cidr_block = "10.0.${count.index}.0/24"
availability_zone = data.aws_availability_zones.available.names[count.index]
tags = {
Name = "%[1]s-subnet-${count.index}"
}
}
resource "aws_db_subnet_group" "test" {
name = "%[1]s-db-subnet-group"
subnet_ids = aws_subnet.subnets[*].id
}
resource "aws_rds_cluster" "restored_pit" {
cluster_identifier = "%s"
skip_final_snapshot = true
engine = aws_rds_cluster.test.engine
restore_to_point_in_time {
source_cluster_identifier = aws_rds_cluster.test.cluster_identifier
restore_type = "full-copy"
use_latest_restorable_time = true
}
}
`, parentId, childId))
}

func testAccAWSClusterConfigTags1(rName, tagKey1, tagValue1 string) string {
return fmt.Sprintf(`
resource "aws_rds_cluster" "test" {
Expand Down
9 changes: 9 additions & 0 deletions aws/validators.go
Original file line number Diff line number Diff line change
Expand Up @@ -1081,6 +1081,15 @@ func validateOnceADayWindowFormat(v interface{}, k string) (ws []string, errors
return
}

func validateUTCTimestamp(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
_, err := time.Parse(time.RFC3339, value)
if err != nil {
errors = append(errors, fmt.Errorf("%q must be in RFC3339 time format %q. Example: %s", k, time.RFC3339, err))
}
return
}

// Validates that ECS Placement Constraints are set correctly
// Takes type, and expression as strings
func validateAwsEcsPlacementConstraint(constType, constExpr string) error {
Expand Down
26 changes: 26 additions & 0 deletions aws/validators_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3221,3 +3221,29 @@ func TestValidateServiceDiscoveryNamespaceName(t *testing.T) {
}
}
}

func TestValidateUTCTimestamp(t *testing.T) {
validT := []string{
"2006-01-02T15:04:05Z",
}

invalidT := []string{
"2015-03-07 23:45:00",
"27-03-2019 23:45:00",
"Mon, 02 Jan 2006 15:04:05 -0700",
}

for _, f := range validT {
_, errors := validateUTCTimestamp(f, "valid_restorable_time_format")
if len(errors) > 0 {
t.Fatalf("Expected the time %q to be in valid format, got error %q", f, errors)
}
}

for _, f := range invalidT {
_, errors := validateUTCTimestamp(f, "invalid_restorable_time_format")
if len(errors) == 0 {
t.Fatalf("Expected the time %q to fail validation", f)
}
}
}
26 changes: 25 additions & 1 deletion website/docs/r/rds_cluster.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@ The following arguments are supported:
* `preferred_backup_window` - (Optional) The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.Time in UTC. Default: A 30-minute window selected at random from an 8-hour block of time per region. e.g. 04:00-09:00
* `preferred_maintenance_window` - (Optional) The weekly time range during which system maintenance can occur, in (UTC) e.g. wed:04:00-wed:04:30
* `replication_source_identifier` - (Optional) ARN of a source DB cluster or DB instance if this DB cluster is to be created as a Read Replica. If DB Cluster is part of a Global Cluster, use the [`lifecycle` configuration block `ignore_changes` argument](/docs/configuration/resources.html#ignore_changes) to prevent Terraform from showing differences for this argument instead of configuring this value.
* `restore_to_point_in_time` - (Optional) Nested attribute for [point in time restore](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_PIT.html). More details below.
* `scaling_configuration` - (Optional) Nested attribute with scaling properties. Only valid when `engine_mode` is set to `serverless`. More details below.
* `skip_final_snapshot` - (Optional) Determines whether a final DB snapshot is created before the DB cluster is deleted. If true is specified, no DB snapshot is created. If false is specified, a DB snapshot is created before the DB cluster is deleted, using the value from `final_snapshot_identifier`. Default is `false`.
* `snapshot_identifier` - (Optional) Specifies whether or not to create this cluster from a snapshot. You can use either the name or ARN when specifying a DB cluster snapshot, or the ARN when specifying a DB snapshot.
Expand All @@ -133,7 +134,6 @@ The following arguments are supported:
* `tags` - (Optional) A map of tags to assign to the DB cluster.
* `vpc_security_group_ids` - (Optional) List of VPC security groups to associate with the Cluster


### S3 Import Options

Full details on the core parameters and impacts are in the API Docs: [RestoreDBClusterFromS3](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_RestoreDBClusterFromS3.html). Requires that the S3 bucket be in the same region as the RDS cluster you're trying to create. Sample:
Expand Down Expand Up @@ -162,6 +162,30 @@ resource "aws_rds_cluster" "db" {

This will not recreate the resource if the S3 object changes in some way. It's only used to initialize the database. This only works currently with the aurora engine. See AWS for currently supported engines and options. See [Aurora S3 Migration Docs](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AuroraMySQL.Migrating.ExtMySQL.html#AuroraMySQL.Migrating.ExtMySQL.S3).

### restore_to_point_in_time Argument Reference

~> **NOTE:** The DB cluster is created from the source DB cluster with the same configuration as the original DB cluster, except that the new DB cluster is created with the default DB security group. Thus, the following arguments should only be specified with the source DB cluster's respective values: `database_name`, `master_username`, `storage_encrypted`, `replication_source_identifier`, and `source_region`.

Example:

```hcl
resource "aws_rds_cluster" "example-clone" {
# ... other configuration ...
restore_to_point_in_time {
source_cluster_identifier = "example"
restore_type = "copy-on-write"
use_latest_restorable_time = true
}
}
```

* `source_cluster_identifier` - (Required) The identifier of the source database cluster from which to restore.
* `restore_type` - (Optional) Type of restore to be performed.
Valid options are `full-copy` (default) and `copy-on-write`.
* `use_latest_restorable_time` - (Optional) Set to true to restore the database cluster to the latest restorable backup time. Defaults to false. Conflicts with `restore_to_time`.
* `restore_to_time` - (Optional) Date and time in UTC format to restore the database cluster to. Conflicts with `use_latest_restorable_time`.

### scaling_configuration Argument Reference

~> **NOTE:** `scaling_configuration` configuration is only valid when `engine_mode` is set to `serverless`.
Expand Down

0 comments on commit 0738a98

Please sign in to comment.