Skip to content

Commit

Permalink
Changes per request
Browse files Browse the repository at this point in the history
  • Loading branch information
jasonmcintosh committed Apr 24, 2018
1 parent 1512604 commit 85d7a8e
Show file tree
Hide file tree
Showing 4 changed files with 47 additions and 47 deletions.
47 changes: 21 additions & 26 deletions aws/resource_aws_db_instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ func resourceAwsDbInstance() *schema.Resource {
},

"s3_import": {
Type: schema.TypeSet,
Type: schema.TypeList,
Optional: true,
MaxItems: 1,
ConflictsWith: []string{
Expand All @@ -233,31 +233,28 @@ func resourceAwsDbInstance() *schema.Resource {
"bucket_name": {
Type: schema.TypeString,
Required: true,
Optional: false,
ForceNew: true,
},
"bucket_prefix": {
Type: schema.TypeString,
Required: false,
Optional: true,
Default: "",
ForceNew: true,
},
"ingestion_role": {
Type: schema.TypeString,
Required: true,
Optional: false,
ForceNew: true,
},
"source_engine": {
Type: schema.TypeString,
Required: false,
Optional: true,
Default: "mysql",
Required: true,
ForceNew: true,
},
"source_engine_version": {
Type: schema.TypeString,
Required: false,
Optional: true,
Default: "5.6",
Required: true,
ForceNew: true,
},
},
},
Expand Down Expand Up @@ -509,7 +506,7 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
return fmt.Errorf(`provider.aws: aws_db_instance: %s: "username": required field is not set`, d.Get("name").(string))
}

record := v.(*schema.Set).List()[0].(map[string]interface{})
s3_bucket := v.([]interface{})[0].(map[string]interface{})
opts := rds.RestoreDBInstanceFromS3Input{
AllocatedStorage: aws.Int64(int64(d.Get("allocated_storage").(int))),
AutoMinorVersionUpgrade: aws.Bool(d.Get("auto_minor_version_upgrade").(bool)),
Expand All @@ -519,15 +516,15 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
DBInstanceIdentifier: aws.String(d.Get("identifier").(string)),
Engine: aws.String(d.Get("engine").(string)),
EngineVersion: aws.String(d.Get("engine_version").(string)),
S3BucketName: aws.String(record["bucket_name"].(string)),
S3Prefix: aws.String(record["bucket_prefix"].(string)),
S3IngestionRoleArn: aws.String(record["ingestion_role"].(string)),
S3BucketName: aws.String(s3_bucket["bucket_name"].(string)),
S3Prefix: aws.String(s3_bucket["bucket_prefix"].(string)),
S3IngestionRoleArn: aws.String(s3_bucket["ingestion_role"].(string)),
MasterUsername: aws.String(d.Get("username").(string)),
MasterUserPassword: aws.String(d.Get("password").(string)),
PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)),
StorageEncrypted: aws.Bool(d.Get("storage_encrypted").(bool)),
SourceEngine: aws.String(record["source_engine"].(string)),
SourceEngineVersion: aws.String(record["source_engine_version"].(string)),
SourceEngine: aws.String(s3_bucket["source_engine"].(string)),
SourceEngineVersion: aws.String(s3_bucket["source_engine_version"].(string)),
Tags: tags,
}

Expand Down Expand Up @@ -621,16 +618,14 @@ func resourceAwsDbInstanceCreate(d *schema.ResourceData, meta interface{}) error
err = resource.Retry(5*time.Minute, func() *resource.RetryError {
_, err = conn.RestoreDBInstanceFromS3(&opts)
if err != nil {
if awsErr, ok := err.(awserr.Error); ok {
if awsErr.Code() == "InvalidParameterValue" && strings.Contains(awsErr.Message(), "ENHANCED_MONITORING") {
return resource.RetryableError(awsErr)
}
if awsErr.Code() == "InvalidParameterValue" && strings.Contains(awsErr.Message(), "S3_SNAPSHOT_INGESTION") {
return resource.RetryableError(err)
}
if awsErr.Code() == "InvalidParameterValue" && strings.Contains(awsErr.Message(), "S3 bucket cannot be found") {
return resource.RetryableError(err)
}
if isAWSErr(err, "InvalidParameterValue", "ENHANCED_MONITORING") {
return resource.RetryableError(err)
}
if isAWSErr(err, "InvalidParameterValue", "S3_SNAPSHOT_INGESTION") {
return resource.RetryableError(err)
}
if isAWSErr(err, "InvalidParameterValue", "S3 bucket cannot be found") {
return resource.RetryableError(err)
}
return resource.NonRetryableError(err)
}
Expand Down
38 changes: 19 additions & 19 deletions aws/resource_aws_db_instance_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -318,20 +318,17 @@ func TestAccAWSDBInstance_snapshot(t *testing.T) {

func TestAccAWSDBInstance_s3(t *testing.T) {
var snap rds.DBInstance
bucket := acctest.RandString(5)
//bucket := acctest.RandString(5)
prefix := "xtrabackup"
role := acctest.RandString(5)
bucket := acctest.RandomWithPrefix("tf-acc-test")
uniqueId := acctest.RandomWithPrefix("tf-acc-s3-import-test")
bucketPrefix := acctest.RandString(5)

resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
// testAccCheckAWSDBInstanceSnapshot verifies a database snapshot is
// created, and subequently deletes it
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSDBInstanceNoSnapshot,
Steps: []resource.TestStep{
{
Config: testAccSnapshotInstanceConfigWithS3Import(bucket, prefix, role),
Config: testAccSnapshotInstanceConfigWithS3Import(bucket, bucketPrefix, uniqueId),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSDBInstanceExists("aws_db_instance.s3", &snap),
),
Expand Down Expand Up @@ -1070,7 +1067,7 @@ resource "aws_db_instance" "snapshot" {
}`, acctest.RandInt())
}

func testAccSnapshotInstanceConfigWithS3Import(bucketName string, prefix string, role string) string {
func testAccSnapshotInstanceConfigWithS3Import(bucketName string, bucketPrefix string, uniqueId string) string {
return fmt.Sprintf(`
resource "aws_s3_bucket" "xtrabackup" {
Expand All @@ -1079,15 +1076,15 @@ resource "aws_s3_bucket" "xtrabackup" {
resource "aws_s3_bucket_object" "xtrabackup_db" {
bucket = "${aws_s3_bucket.xtrabackup.id}"
key = "%s/sample.tar.gz"
source = "../files/2018-02-26_19-31-02.tar.gz"
etag = "${md5(file("../files/2018-02-26_19-31-02.tar.gz"))}"
key = "%s/mysql-5-6-xtrabackup.tar.gz"
source = "../files/mysql-5-6-xtrabackup.tar.gz"
etag = "${md5(file("../files/mysql-5-6-xtrabackup.tar.gz"))}"
}
resource "aws_iam_role" "rds_s3_access_role" {
name = "aws-rds-import-%s"
name = "%s-role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
Expand All @@ -1106,7 +1103,7 @@ EOF
}
resource "aws_iam_policy" "test" {
name = "tf-s3-rds-access-policy"
name = "%s-policy"
policy = <<POLICY
{
"Version": "2012-10-17",
Expand All @@ -1127,7 +1124,7 @@ POLICY
}
resource "aws_iam_policy_attachment" "test-attach" {
name = "s3_access_attachment"
name = "%s-policy-attachment"
roles = [
"${aws_iam_role.rds_s3_access_role.name}"
]
Expand Down Expand Up @@ -1163,7 +1160,7 @@ resource "aws_subnet" "bar" {
}
resource "aws_db_subnet_group" "foo" {
name = "foo"
name = "%s-subnet-group"
subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"]
tags {
Name = "tf-dbsubnet-group-test"
Expand All @@ -1172,7 +1169,7 @@ resource "aws_db_subnet_group" "foo" {
resource "aws_db_instance" "s3" {
identifier = "test-db-instance-from-s3-import-%s"
identifier = "%s-db"
allocated_storage = 5
engine = "mysql"
Expand All @@ -1191,12 +1188,15 @@ resource "aws_db_instance" "s3" {
db_subnet_group_name = "${aws_db_subnet_group.foo.id}"
s3_import {
source_engine = "mysql"
source_engine_version = "5.6"
bucket_name = "${aws_s3_bucket.xtrabackup.bucket}"
bucket_prefix = "%s"
ingestion_role = "${aws_iam_role.rds_s3_access_role.arn}"
}
}
`, bucketName, prefix, role, prefix, bucketName)
`, bucketName, bucketPrefix, uniqueId, uniqueId, uniqueId, uniqueId, uniqueId, bucketPrefix)
}

func testAccSnapshotInstanceConfigWithSnapshot(rInt int) string {
Expand Down
File renamed without changes.
9 changes: 7 additions & 2 deletions website/docs/r/db_instance.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -182,21 +182,26 @@ Replicate database managed by Terraform will promote the database to a fully
standalone database.

### S3 Import Options

Full details on the core parameters and impacts are in the API Docs: [RestoreDBInstanceFromS3](http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_RestoreDBInstanceFromS3.html). Sample

```hcl
resource "aws_db_instance" "db" {
s3_import {
source_engine = "mysql"
source_engine_version = "5.6"
bucket_name = "mybucket"
bucket_prefix = "backups"
ingestion_role = "arn:aws:iam::1234567890:role/role-xtrabackup-rds-restore"
}
}
```

* `bucket_name` - (Required) The bucket name where your backup is stored
* `bucket_prefix` - (Optional) Can be blank, but is the path to your backup
* `ingestion_role` - (Required) Role applied to load the data.
* `source_engine` - (Defaults to 'mysql') Source engine for the backup
* `source_engine_version` - (Defaults to '5.6') Version of the source engine used to make the backup
* `source_engine` - (Required, as of Feb 2018 only 'mysql' supported) Source engine for the backup
* `source_engine_version` - (Required, as of Feb 2018 only '5.6' supported) Version of the source engine used to make the backup

This will not recreate the resource if the S3 object changes in some way. It's only used to initialize the database

Expand Down

0 comments on commit 85d7a8e

Please sign in to comment.