diff --git a/internal/service/s3/bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go index fdf35560c4d..aefe8b36931 100644 --- a/internal/service/s3/bucket_replication_configuration.go +++ b/internal/service/s3/bucket_replication_configuration.go @@ -38,6 +38,11 @@ func ResourceBucketReplicationConfiguration() *schema.Resource { Required: true, ValidateFunc: verify.ValidARN, }, + "token": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + }, "rule": { Type: schema.TypeSet, Required: true, @@ -303,7 +308,7 @@ func resourceBucketReplicationConfigurationCreate(d *schema.ResourceData, meta i rc := &s3.ReplicationConfiguration{ Role: aws.String(d.Get("role").(string)), - Rules: ExpandRules(d.Get("rule").(*schema.Set).List()), + Rules: ExpandReplicationRules(d.Get("rule").(*schema.Set).List()), } input := &s3.PutBucketReplicationInput{ @@ -311,6 +316,10 @@ func resourceBucketReplicationConfigurationCreate(d *schema.ResourceData, meta i ReplicationConfiguration: rc, } + if v, ok := d.GetOk("token"); ok { + input.Token = aws.String(v.(string)) + } + err := resource.Retry(propagationTimeout, func() *resource.RetryError { _, err := conn.PutBucketReplication(input) if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) || tfawserr.ErrMessageContains(err, "InvalidRequest", "Versioning must be 'Enabled' on the bucket") { @@ -367,7 +376,7 @@ func resourceBucketReplicationConfigurationRead(d *schema.ResourceData, meta int d.Set("bucket", d.Id()) d.Set("role", r.Role) - if err := d.Set("rule", schema.NewSet(rulesHash, FlattenRules(r.Rules))); err != nil { + if err := d.Set("rule", schema.NewSet(rulesHash, FlattenReplicationRules(r.Rules))); err != nil { return fmt.Errorf("error setting rule: %w", err) } @@ -379,7 +388,7 @@ func resourceBucketReplicationConfigurationUpdate(d *schema.ResourceData, meta i rc := &s3.ReplicationConfiguration{ Role: aws.String(d.Get("role").(string)), - Rules: ExpandRules(d.Get("rule").(*schema.Set).List()), + Rules: ExpandReplicationRules(d.Get("rule").(*schema.Set).List()), } input := &s3.PutBucketReplicationInput{ @@ -387,6 +396,10 @@ func resourceBucketReplicationConfigurationUpdate(d *schema.ResourceData, meta i ReplicationConfiguration: rc, } + if v, ok := d.GetOk("token"); ok { + input.Token = aws.String(v.(string)) + } + err := resource.Retry(propagationTimeout, func() *resource.RetryError { _, err := conn.PutBucketReplication(input) if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) || tfawserr.ErrMessageContains(err, "InvalidRequest", "Versioning must be 'Enabled' on the bucket") { diff --git a/internal/service/s3/bucket_replication_configuration_test.go b/internal/service/s3/bucket_replication_configuration_test.go index 8dd0e32c072..1412b6e0191 100644 --- a/internal/service/s3/bucket_replication_configuration_test.go +++ b/internal/service/s3/bucket_replication_configuration_test.go @@ -197,7 +197,7 @@ func TestAccS3BucketReplicationConfiguration_multipleDestinationsNonEmptyFilter( Config: testAccBucketReplicationConfigurationWithMultipleDestinationsNonEmptyFilter(rName), Check: resource.ComposeTestCheckFunc( testAccCheckBucketReplicationConfigurationExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "rule.#", "2"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "3"), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ "id": "rule1", "priority": "1", @@ -212,10 +212,24 @@ func TestAccS3BucketReplicationConfiguration_multipleDestinationsNonEmptyFilter( "priority": "2", "status": s3.ReplicationRuleStatusEnabled, "filter.#": "1", - "filter.0.prefix": "prefix2", + "filter.0.tag.#": "1", + "filter.0.tag.0.key": "Key2", + "filter.0.tag.0.value": "Value2", "destination.#": "1", "destination.0.storage_class": s3.StorageClassStandardIa, }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "rule3", + "priority": "3", + "status": s3.ReplicationRuleStatusDisabled, + "filter.#": "1", + "filter.0.and.#": "1", + "filter.0.and.0.prefix": "prefix3", + "filter.0.and.0.tags.%": "1", + "filter.0.and.0.tags.Key3": "Value3", + "destination.#": "1", + "destination.0.storage_class": s3.StorageClassOnezoneIa, + }), ), }, { @@ -744,6 +758,91 @@ func TestAccS3BucketReplicationConfiguration_existingObjectReplication(t *testin }) } +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/23487 +func TestAccS3BucketReplicationConfiguration_filter_emptyConfigurationBlock(t *testing.T) { + resourceName := "aws_s3_bucket_replication_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroy, &providers), + Steps: []resource.TestStep{ + { + Config: testAccBucketReplicationConfiguration_filter_emptyConfigurationBlock(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "delete_marker_replication.#": "1", + "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusDisabled, + "filter.#": "1", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/23487 +func TestAccS3BucketReplicationConfiguration_filter_emptyPrefix(t *testing.T) { + resourceName := "aws_s3_bucket_replication_configuration.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dstBucketResourceName := "aws_s3_bucket.destination" + iamRoleResourceName := "aws_iam_role.test" + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroy, &providers), + Steps: []resource.TestStep{ + { + Config: testAccBucketReplicationConfiguration_filter_emptyPrefix(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "role", iamRoleResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "rule.*", map[string]string{ + "id": "foobar", + "delete_marker_replication.#": "1", + "delete_marker_replication.0.status": s3.DeleteMarkerReplicationStatusDisabled, + "filter.#": "1", + "filter.0.prefix": "", + "status": s3.ReplicationRuleStatusEnabled, + "destination.#": "1", + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "rule.*.destination.0.bucket", dstBucketResourceName, "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccS3BucketReplicationConfiguration_filter_tagFilter(t *testing.T) { resourceName := "aws_s3_bucket_replication_configuration.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -974,32 +1073,42 @@ POLICY resource "aws_s3_bucket" "destination" { provider = "awsalternate" bucket = "%[1]s-destination" +} - versioning { - enabled = true - } - - lifecycle { - ignore_changes = [replication_configuration] +resource "aws_s3_bucket_versioning" "destination" { + bucket = aws_s3_bucket.destination.id + versioning_configuration { + status = "Enabled" } } resource "aws_s3_bucket" "source" { bucket = "%[1]s-source" - versioning { - enabled = true + lifecycle { + ignore_changes = [ + replication_configuration + ] } +} - lifecycle { - ignore_changes = [replication_configuration] +resource "aws_s3_bucket_versioning" "source" { + bucket = aws_s3_bucket.source.id + versioning_configuration { + status = "Enabled" } -}`, rName) +} +`, rName) } func testAccBucketReplicationConfigurationBasic(rName, storageClass string) string { return testAccBucketReplicationConfigurationBase(rName) + fmt.Sprintf(` resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination + ] + bucket = aws_s3_bucket.source.id role = aws_iam_role.test.arn @@ -1017,9 +1126,15 @@ resource "aws_s3_bucket_replication_configuration" "test" { } func testAccBucketReplicationConfigurationRTC(rName string) string { - return acctest.ConfigCompose(testAccBucketReplicationConfigurationBase(rName), + return acctest.ConfigCompose( + testAccBucketReplicationConfigurationBase(rName), ` resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination + ] + bucket = aws_s3_bucket.source.id role = aws_iam_role.test.arn @@ -1054,6 +1169,11 @@ resource "aws_s3_bucket_replication_configuration" "test" { func testAccBucketReplicationConfigurationReplicaMods(rName string) string { return testAccBucketReplicationConfigurationBase(rName) + ` resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination + ] + bucket = aws_s3_bucket.source.id role = aws_iam_role.test.arn @@ -1086,28 +1206,36 @@ func testAccBucketReplicationConfigurationWithMultipleDestinationsEmptyFilter(rN resource "aws_s3_bucket" "destination2" { provider = "awsalternate" bucket = "%[1]s-destination2" +} - versioning { - enabled = true - } - lifecycle { - ignore_changes = [replication_configuration] +resource "aws_s3_bucket_versioning" "destination2" { + bucket = aws_s3_bucket.destination2.id + versioning_configuration { + status = "Enabled" } } resource "aws_s3_bucket" "destination3" { provider = "awsalternate" bucket = "%[1]s-destination3" +} - versioning { - enabled = true - } - lifecycle { - ignore_changes = [replication_configuration] +resource "aws_s3_bucket_versioning" "destination3" { + bucket = aws_s3_bucket.destination3.id + versioning_configuration { + status = "Enabled" } } resource "aws_s3_bucket_replication_configuration" "test" { + # Must have bucket versioning enabled first + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination, + aws_s3_bucket_versioning.destination2, + aws_s3_bucket_versioning.destination3 + ] + bucket = aws_s3_bucket.source.id role = aws_iam_role.test.arn @@ -1161,7 +1289,6 @@ resource "aws_s3_bucket_replication_configuration" "test" { storage_class = "ONEZONE_IA" } } - }`, rName)) } @@ -1172,28 +1299,35 @@ func testAccBucketReplicationConfigurationWithMultipleDestinationsNonEmptyFilter resource "aws_s3_bucket" "destination2" { provider = "awsalternate" bucket = "%[1]s-destination2" +} - versioning { - enabled = true - } - lifecycle { - ignore_changes = [replication_configuration] +resource "aws_s3_bucket_versioning" "destination2" { + bucket = aws_s3_bucket.destination2.id + versioning_configuration { + status = "Enabled" } } resource "aws_s3_bucket" "destination3" { provider = "awsalternate" bucket = "%[1]s-destination3" +} - versioning { - enabled = true - } - lifecycle { - ignore_changes = [replication_configuration] +resource "aws_s3_bucket_versioning" "destination3" { + bucket = aws_s3_bucket.destination3.id + versioning_configuration { + status = "Enabled" } } resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination, + aws_s3_bucket_versioning.destination2, + aws_s3_bucket_versioning.destination3 + ] + bucket = aws_s3_bucket.source.id role = aws_iam_role.test.arn @@ -1222,11 +1356,14 @@ resource "aws_s3_bucket_replication_configuration" "test" { status = "Enabled" filter { - prefix = "prefix2" + tag { + key = "Key2" + value = "Value2" + } } delete_marker_replication { - status = "Enabled" + status = "Disabled" } destination { @@ -1235,6 +1372,29 @@ resource "aws_s3_bucket_replication_configuration" "test" { } } + rule { + id = "rule3" + priority = 3 + status = "Disabled" + + filter { + and { + prefix = "prefix3" + tags = { + Key3 = "Value3" + } + } + } + + delete_marker_replication { + status = "Disabled" + } + + destination { + bucket = aws_s3_bucket.destination3.arn + storage_class = "ONEZONE_IA" + } + } }`, rName)) } @@ -1245,16 +1405,22 @@ func testAccBucketReplicationConfigurationWithMultipleDestinationsTwoDestination resource "aws_s3_bucket" "destination2" { provider = "awsalternate" bucket = "%[1]s-destination2" +} - versioning { - enabled = true - } - lifecycle { - ignore_changes = [replication_configuration] +resource "aws_s3_bucket_versioning" "destination2" { + bucket = aws_s3_bucket.destination2.id + versioning_configuration { + status = "Enabled" } } resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination, + aws_s3_bucket_versioning.destination2 + ] + bucket = aws_s3_bucket.source.id role = aws_iam_role.test.arn @@ -1307,6 +1473,11 @@ resource "aws_kms_key" "test" { } resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination + ] + bucket = aws_s3_bucket.source.id role = aws_iam_role.test.arn @@ -1339,6 +1510,11 @@ func testAccBucketReplicationConfigurationWithAccessControlTranslation(rName str data "aws_caller_identity" "current" {} resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination + ] + bucket = aws_s3_bucket.source.id role = aws_iam_role.test.arn @@ -1365,6 +1541,11 @@ func testAccBucketReplicationConfigurationRulesDestination(rName string) string data "aws_caller_identity" "current" {} resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination + ] + bucket = aws_s3_bucket.source.id role = aws_iam_role.test.arn @@ -1393,6 +1574,11 @@ resource "aws_kms_key" "test" { } resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination + ] + bucket = aws_s3_bucket.source.id role = aws_iam_role.test.arn @@ -1426,6 +1612,11 @@ resource "aws_s3_bucket_replication_configuration" "test" { func testAccBucketReplicationConfigurationWithoutStorageClass(rName string) string { return testAccBucketReplicationConfigurationBase(rName) + ` resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination + ] + bucket = aws_s3_bucket.source.id role = aws_iam_role.test.arn @@ -1444,6 +1635,11 @@ resource "aws_s3_bucket_replication_configuration" "test" { func testAccBucketReplicationConfigurationWithV2ConfigurationNoTags(rName string) string { return testAccBucketReplicationConfigurationBase(rName) + ` resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination + ] + bucket = aws_s3_bucket.source.id role = aws_iam_role.test.arn @@ -1491,30 +1687,41 @@ POLICY resource "aws_s3_bucket" "destination" { bucket = %[2]q +} - versioning { - enabled = true +resource "aws_s3_bucket_versioning" "destination" { + bucket = aws_s3_bucket.destination.id + versioning_configuration { + status = "Enabled" } +} + +resource "aws_s3_bucket" "source" { + bucket = %[1]q lifecycle { ignore_changes = [replication_configuration] } } -resource "aws_s3_bucket" "source" { - bucket = %[1]q +resource "aws_s3_bucket_acl" "source_acl" { + bucket = aws_s3_bucket.source.id acl = "private" +} - versioning { - enabled = true - } - - lifecycle { - ignore_changes = [replication_configuration] +resource "aws_s3_bucket_versioning" "source" { + bucket = aws_s3_bucket.source.id + versioning_configuration { + status = "Enabled" } } resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination + ] + bucket = aws_s3_bucket.source.id role = aws_iam_role.test.arn @@ -1564,30 +1771,41 @@ POLICY resource "aws_s3_bucket" "destination" { bucket = %[2]q +} - versioning { - enabled = true +resource "aws_s3_bucket_versioning" "destination" { + bucket = aws_s3_bucket.destination.id + versioning_configuration { + status = "Enabled" } +} + +resource "aws_s3_bucket" "source" { + bucket = %[1]q lifecycle { ignore_changes = [replication_configuration] } } -resource "aws_s3_bucket" "source" { - bucket = %[1]q +resource "aws_s3_bucket_acl" "source_acl" { + bucket = aws_s3_bucket.source.id acl = "private" +} - versioning { - enabled = true - } - - lifecycle { - ignore_changes = [replication_configuration] +resource "aws_s3_bucket_versioning" "source" { + bucket = aws_s3_bucket.source.id + versioning_configuration { + status = "Enabled" } } resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination + ] + bucket = aws_s3_bucket.source.id role = aws_iam_role.test.arn @@ -1616,11 +1834,74 @@ resource "aws_s3_bucket_replication_configuration" "test" { `, rName, rNameDestination) } +func testAccBucketReplicationConfiguration_filter_emptyConfigurationBlock(rName string) string { + return acctest.ConfigCompose( + testAccBucketReplicationConfigurationBase(rName), + ` +resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [aws_s3_bucket_versioning.source] + + bucket = aws_s3_bucket.source.id + role = aws_iam_role.test.arn + + rule { + id = "foobar" + + delete_marker_replication { + status = "Disabled" + } + + filter {} + + status = "Enabled" + + destination { + bucket = aws_s3_bucket.destination.arn + } + } +}`) +} + +func testAccBucketReplicationConfiguration_filter_emptyPrefix(rName string) string { + return acctest.ConfigCompose( + testAccBucketReplicationConfigurationBase(rName), ` +resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [aws_s3_bucket_versioning.source] + + bucket = aws_s3_bucket.source.id + role = aws_iam_role.test.arn + + rule { + id = "foobar" + + delete_marker_replication { + status = "Disabled" + } + + filter { + prefix = "" + } + + status = "Enabled" + + destination { + bucket = aws_s3_bucket.destination.arn + } + } +}`, + ) +} + func testAccBucketReplicationConfiguration_filter_tag(rName, key, value string) string { return acctest.ConfigCompose( testAccBucketReplicationConfigurationBase(rName), fmt.Sprintf(` resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination + ] + bucket = aws_s3_bucket.source.id role = aws_iam_role.test.arn @@ -1652,6 +1933,11 @@ func testAccBucketReplicationConfiguration_filter_andOperator_tags(rName, key1, testAccBucketReplicationConfigurationBase(rName), fmt.Sprintf(` resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination + ] + bucket = aws_s3_bucket.source.id role = aws_iam_role.test.arn @@ -1685,6 +1971,11 @@ func testAccBucketReplicationConfiguration_filter_andOperator_prefixAndTags(rNam testAccBucketReplicationConfigurationBase(rName), fmt.Sprintf(` resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination + ] + bucket = aws_s3_bucket.source.id role = aws_iam_role.test.arn @@ -1717,6 +2008,11 @@ resource "aws_s3_bucket_replication_configuration" "test" { func testAccBucketReplicationConfiguration_schemaV2DestinationMetrics_statusOnly(rName, storageClass string) string { return testAccBucketReplicationConfigurationBase(rName) + fmt.Sprintf(` resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination + ] + bucket = aws_s3_bucket.source.id role = aws_iam_role.test.arn @@ -1748,6 +2044,11 @@ func testAccBucketReplicationConfigurationWithoutPrefix(rName string) string { testAccBucketReplicationConfigurationBase(rName), ` resource "aws_s3_bucket_replication_configuration" "test" { + depends_on = [ + aws_s3_bucket_versioning.source, + aws_s3_bucket_versioning.destination + ] + bucket = aws_s3_bucket.source.id role = aws_iam_role.test.arn diff --git a/internal/service/s3/flex.go b/internal/service/s3/flex.go index c1cc5a3ac69..38b7abd98e4 100644 --- a/internal/service/s3/flex.go +++ b/internal/service/s3/flex.go @@ -12,7 +12,7 @@ import ( tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" ) -func ExpandAccessControlTranslation(l []interface{}) *s3.AccessControlTranslation { +func ExpandReplicationRuleDestinationAccessControlTranslation(l []interface{}) *s3.AccessControlTranslation { if len(l) == 0 || l[0] == nil { return nil } @@ -32,7 +32,7 @@ func ExpandAccessControlTranslation(l []interface{}) *s3.AccessControlTranslatio return result } -func ExpandEncryptionConfiguration(l []interface{}) *s3.EncryptionConfiguration { +func ExpandReplicationRuleDestinationEncryptionConfiguration(l []interface{}) *s3.EncryptionConfiguration { if len(l) == 0 || l[0] == nil { return nil } @@ -52,7 +52,7 @@ func ExpandEncryptionConfiguration(l []interface{}) *s3.EncryptionConfiguration return result } -func ExpandDeleteMarkerReplication(l []interface{}) *s3.DeleteMarkerReplication { +func ExpandReplicationRuleDeleteMarkerReplication(l []interface{}) *s3.DeleteMarkerReplication { if len(l) == 0 || l[0] == nil { return nil } @@ -72,7 +72,7 @@ func ExpandDeleteMarkerReplication(l []interface{}) *s3.DeleteMarkerReplication return result } -func ExpandDestination(l []interface{}) *s3.Destination { +func ExpandReplicationRuleDestination(l []interface{}) *s3.Destination { if len(l) == 0 || l[0] == nil { return nil } @@ -86,7 +86,7 @@ func ExpandDestination(l []interface{}) *s3.Destination { result := &s3.Destination{} if v, ok := tfMap["access_control_translation"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.AccessControlTranslation = ExpandAccessControlTranslation(v) + result.AccessControlTranslation = ExpandReplicationRuleDestinationAccessControlTranslation(v) } if v, ok := tfMap["account"].(string); ok && v != "" { @@ -98,15 +98,15 @@ func ExpandDestination(l []interface{}) *s3.Destination { } if v, ok := tfMap["encryption_configuration"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.EncryptionConfiguration = ExpandEncryptionConfiguration(v) + result.EncryptionConfiguration = ExpandReplicationRuleDestinationEncryptionConfiguration(v) } if v, ok := tfMap["metrics"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.Metrics = ExpandMetrics(v) + result.Metrics = ExpandReplicationRuleDestinationMetrics(v) } if v, ok := tfMap["replication_time"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.ReplicationTime = ExpandReplicationTime(v) + result.ReplicationTime = ExpandReplicationRuleDestinationReplicationTime(v) } if v, ok := tfMap["storage_class"].(string); ok && v != "" { @@ -116,7 +116,7 @@ func ExpandDestination(l []interface{}) *s3.Destination { return result } -func ExpandExistingObjectReplication(l []interface{}) *s3.ExistingObjectReplication { +func ExpandReplicationRuleExistingObjectReplication(l []interface{}) *s3.ExistingObjectReplication { if len(l) == 0 || l[0] == nil { return nil } @@ -136,32 +136,37 @@ func ExpandExistingObjectReplication(l []interface{}) *s3.ExistingObjectReplicat return result } -func ExpandFilter(l []interface{}) *s3.ReplicationRuleFilter { - if len(l) == 0 || l[0] == nil { +func ExpandReplicationRuleFilter(l []interface{}) *s3.ReplicationRuleFilter { + if len(l) == 0 { return nil } - tfMap, ok := l[0].(map[string]interface{}) + result := &s3.ReplicationRuleFilter{} - if !ok { - return nil + // Support the empty filter block in terraform i.e. 'filter {}', + // which is also supported by the API even though the docs note that + // one of Prefix, Tag, or And is required. + if l[0] == nil { + return result } - result := &s3.ReplicationRuleFilter{} + tfMap := l[0].(map[string]interface{}) if v, ok := tfMap["and"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.And = ExpandReplicationRuleAndOperator(v) + result.And = ExpandReplicationRuleFilterAndOperator(v) } - if v, ok := tfMap["prefix"].(string); ok && v != "" { - result.Prefix = aws.String(v) + if v, ok := tfMap["tag"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.Tag = ExpandReplicationRuleFilterTag(v) } - if v, ok := tfMap["tag"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - tags := Tags(tftags.New(v[0]).IgnoreAWS()) - if len(tags) > 0 { - result.Tag = tags[0] - } + // Per AWS S3 API, "A Filter must have exactly one of Prefix, Tag, or And specified"; + // Specifying more than one of the listed parameters results in a MalformedXML error. + // If a filter is specified as filter { prefix = "" } in Terraform, we should send the prefix value + // in the API request even if it is an empty value, else Terraform will report non-empty plans. + // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/23487 + if v, ok := tfMap["prefix"].(string); ok && result.And == nil && result.Tag == nil { + result.Prefix = aws.String(v) } return result @@ -460,7 +465,7 @@ func ExpandLifecycleRules(l []interface{}) ([]*s3.LifecycleRule, error) { return results, nil } -func ExpandMetrics(l []interface{}) *s3.Metrics { +func ExpandReplicationRuleDestinationMetrics(l []interface{}) *s3.Metrics { if len(l) == 0 || l[0] == nil { return nil } @@ -474,7 +479,7 @@ func ExpandMetrics(l []interface{}) *s3.Metrics { result := &s3.Metrics{} if v, ok := tfMap["event_threshold"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.EventThreshold = ExpandReplicationTimeValue(v) + result.EventThreshold = ExpandReplicationRuleDestinationReplicationTimeValue(v) } if v, ok := tfMap["status"].(string); ok && v != "" { @@ -484,7 +489,7 @@ func ExpandMetrics(l []interface{}) *s3.Metrics { return result } -func ExpandReplicationRuleAndOperator(l []interface{}) *s3.ReplicationRuleAndOperator { +func ExpandReplicationRuleFilterAndOperator(l []interface{}) *s3.ReplicationRuleAndOperator { if len(l) == 0 || l[0] == nil { return nil } @@ -511,7 +516,7 @@ func ExpandReplicationRuleAndOperator(l []interface{}) *s3.ReplicationRuleAndOpe return result } -func ExpandReplicationTime(l []interface{}) *s3.ReplicationTime { +func ExpandReplicationRuleDestinationReplicationTime(l []interface{}) *s3.ReplicationTime { if len(l) == 0 || l[0] == nil { return nil } @@ -529,13 +534,13 @@ func ExpandReplicationTime(l []interface{}) *s3.ReplicationTime { } if v, ok := tfMap["time"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.Time = ExpandReplicationTimeValue(v) + result.Time = ExpandReplicationRuleDestinationReplicationTimeValue(v) } return result } -func ExpandReplicationTimeValue(l []interface{}) *s3.ReplicationTimeValue { +func ExpandReplicationRuleDestinationReplicationTimeValue(l []interface{}) *s3.ReplicationTimeValue { if len(l) == 0 || l[0] == nil { return nil } @@ -555,7 +560,7 @@ func ExpandReplicationTimeValue(l []interface{}) *s3.ReplicationTimeValue { return result } -func ExpandReplicaModifications(l []interface{}) *s3.ReplicaModifications { +func ExpandSourceSelectionCriteriaReplicaModifications(l []interface{}) *s3.ReplicaModifications { if len(l) == 0 || l[0] == nil { return nil } @@ -575,7 +580,7 @@ func ExpandReplicaModifications(l []interface{}) *s3.ReplicaModifications { return result } -func ExpandRules(l []interface{}) []*s3.ReplicationRule { +func ExpandReplicationRules(l []interface{}) []*s3.ReplicationRule { var rules []*s3.ReplicationRule for _, tfMapRaw := range l { @@ -586,15 +591,15 @@ func ExpandRules(l []interface{}) []*s3.ReplicationRule { rule := &s3.ReplicationRule{} if v, ok := tfMap["delete_marker_replication"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.DeleteMarkerReplication = ExpandDeleteMarkerReplication(v) + rule.DeleteMarkerReplication = ExpandReplicationRuleDeleteMarkerReplication(v) } if v, ok := tfMap["destination"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.Destination = ExpandDestination(v) + rule.Destination = ExpandReplicationRuleDestination(v) } if v, ok := tfMap["existing_object_replication"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.ExistingObjectReplication = ExpandExistingObjectReplication(v) + rule.ExistingObjectReplication = ExpandReplicationRuleExistingObjectReplication(v) } if v, ok := tfMap["id"].(string); ok && v != "" { @@ -602,16 +607,19 @@ func ExpandRules(l []interface{}) []*s3.ReplicationRule { } if v, ok := tfMap["source_selection_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.SourceSelectionCriteria = ExpandSourceSelectionCriteria(v) + rule.SourceSelectionCriteria = ExpandReplicationRuleSourceSelectionCriteria(v) } if v, ok := tfMap["status"].(string); ok && v != "" { rule.Status = aws.String(v) } - if v, ok := tfMap["filter"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + // Support the empty filter block in terraform i.e. 'filter {}', + // which implies the replication rule does not require a specific filter, + // by expanding the "filter" array even if the first element is nil. + if v, ok := tfMap["filter"].([]interface{}); ok && len(v) > 0 { // XML schema V2 - rule.Filter = ExpandFilter(v) + rule.Filter = ExpandReplicationRuleFilter(v) rule.Priority = aws.Int64(int64(tfMap["priority"].(int))) } else { // XML schema V1 @@ -624,7 +632,7 @@ func ExpandRules(l []interface{}) []*s3.ReplicationRule { return rules } -func ExpandSourceSelectionCriteria(l []interface{}) *s3.SourceSelectionCriteria { +func ExpandReplicationRuleSourceSelectionCriteria(l []interface{}) *s3.SourceSelectionCriteria { if len(l) == 0 || l[0] == nil { return nil } @@ -638,17 +646,17 @@ func ExpandSourceSelectionCriteria(l []interface{}) *s3.SourceSelectionCriteria result := &s3.SourceSelectionCriteria{} if v, ok := tfMap["replica_modifications"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.ReplicaModifications = ExpandReplicaModifications(v) + result.ReplicaModifications = ExpandSourceSelectionCriteriaReplicaModifications(v) } if v, ok := tfMap["sse_kms_encrypted_objects"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.SseKmsEncryptedObjects = ExpandSseKmsEncryptedObjects(v) + result.SseKmsEncryptedObjects = ExpandSourceSelectionCriteriaSseKmsEncryptedObjects(v) } return result } -func ExpandSseKmsEncryptedObjects(l []interface{}) *s3.SseKmsEncryptedObjects { +func ExpandSourceSelectionCriteriaSseKmsEncryptedObjects(l []interface{}) *s3.SseKmsEncryptedObjects { if len(l) == 0 || l[0] == nil { return nil } @@ -668,7 +676,7 @@ func ExpandSseKmsEncryptedObjects(l []interface{}) *s3.SseKmsEncryptedObjects { return result } -func ExpandTag(l []interface{}) *s3.Tag { +func ExpandReplicationRuleFilterTag(l []interface{}) *s3.Tag { if len(l) == 0 || l[0] == nil { return nil } @@ -692,7 +700,7 @@ func ExpandTag(l []interface{}) *s3.Tag { return result } -func FlattenAccessControlTranslation(act *s3.AccessControlTranslation) []interface{} { +func FlattenReplicationRuleDestinationAccessControlTranslation(act *s3.AccessControlTranslation) []interface{} { if act == nil { return []interface{}{} } @@ -706,7 +714,7 @@ func FlattenAccessControlTranslation(act *s3.AccessControlTranslation) []interfa return []interface{}{m} } -func FlattenEncryptionConfiguration(ec *s3.EncryptionConfiguration) []interface{} { +func FlattenReplicationRuleDestinationEncryptionConfiguration(ec *s3.EncryptionConfiguration) []interface{} { if ec == nil { return []interface{}{} } @@ -720,7 +728,7 @@ func FlattenEncryptionConfiguration(ec *s3.EncryptionConfiguration) []interface{ return []interface{}{m} } -func FlattenDeleteMarkerReplication(dmr *s3.DeleteMarkerReplication) []interface{} { +func FlattenReplicationRuleDeleteMarkerReplication(dmr *s3.DeleteMarkerReplication) []interface{} { if dmr == nil { return []interface{}{} } @@ -734,7 +742,7 @@ func FlattenDeleteMarkerReplication(dmr *s3.DeleteMarkerReplication) []interface return []interface{}{m} } -func FlattenDestination(dest *s3.Destination) []interface{} { +func FlattenReplicationRuleDestination(dest *s3.Destination) []interface{} { if dest == nil { return []interface{}{} } @@ -742,7 +750,7 @@ func FlattenDestination(dest *s3.Destination) []interface{} { m := make(map[string]interface{}) if dest.AccessControlTranslation != nil { - m["access_control_translation"] = FlattenAccessControlTranslation(dest.AccessControlTranslation) + m["access_control_translation"] = FlattenReplicationRuleDestinationAccessControlTranslation(dest.AccessControlTranslation) } if dest.Account != nil { @@ -754,15 +762,15 @@ func FlattenDestination(dest *s3.Destination) []interface{} { } if dest.EncryptionConfiguration != nil { - m["encryption_configuration"] = FlattenEncryptionConfiguration(dest.EncryptionConfiguration) + m["encryption_configuration"] = FlattenReplicationRuleDestinationEncryptionConfiguration(dest.EncryptionConfiguration) } if dest.Metrics != nil { - m["metrics"] = FlattenMetrics(dest.Metrics) + m["metrics"] = FlattenReplicationRuleDestinationMetrics(dest.Metrics) } if dest.ReplicationTime != nil { - m["replication_time"] = FlattenReplicationTime(dest.ReplicationTime) + m["replication_time"] = FlattenReplicationRuleDestinationReplicationTime(dest.ReplicationTime) } if dest.StorageClass != nil { @@ -772,7 +780,7 @@ func FlattenDestination(dest *s3.Destination) []interface{} { return []interface{}{m} } -func FlattenExistingObjectReplication(eor *s3.ExistingObjectReplication) []interface{} { +func FlattenReplicationRuleExistingObjectReplication(eor *s3.ExistingObjectReplication) []interface{} { if eor == nil { return []interface{}{} } @@ -786,7 +794,7 @@ func FlattenExistingObjectReplication(eor *s3.ExistingObjectReplication) []inter return []interface{}{m} } -func FlattenFilter(filter *s3.ReplicationRuleFilter) []interface{} { +func FlattenReplicationRuleFilter(filter *s3.ReplicationRuleFilter) []interface{} { if filter == nil { return []interface{}{} } @@ -794,7 +802,7 @@ func FlattenFilter(filter *s3.ReplicationRuleFilter) []interface{} { m := make(map[string]interface{}) if filter.And != nil { - m["and"] = FlattenReplicationRuleAndOperator(filter.And) + m["and"] = FlattenReplicationRuleFilterAndOperator(filter.And) } if filter.Prefix != nil { @@ -802,8 +810,7 @@ func FlattenFilter(filter *s3.ReplicationRuleFilter) []interface{} { } if filter.Tag != nil { - tag := KeyValueTags([]*s3.Tag{filter.Tag}).IgnoreAWS().Map() - m["tag"] = []interface{}{tag} + m["tag"] = FlattenReplicationRuleFilterTag(filter.Tag) } return []interface{}{m} @@ -1057,7 +1064,7 @@ func FlattenLifecycleRuleTransitions(transitions []*s3.Transition) []interface{} return results } -func FlattenMetrics(metrics *s3.Metrics) []interface{} { +func FlattenReplicationRuleDestinationMetrics(metrics *s3.Metrics) []interface{} { if metrics == nil { return []interface{}{} } @@ -1065,7 +1072,7 @@ func FlattenMetrics(metrics *s3.Metrics) []interface{} { m := make(map[string]interface{}) if metrics.EventThreshold != nil { - m["event_threshold"] = FlattenReplicationTimeValue(metrics.EventThreshold) + m["event_threshold"] = FlattenReplicationRuleDestinationReplicationTimeValue(metrics.EventThreshold) } if metrics.Status != nil { @@ -1075,7 +1082,7 @@ func FlattenMetrics(metrics *s3.Metrics) []interface{} { return []interface{}{m} } -func FlattenReplicationTime(rt *s3.ReplicationTime) []interface{} { +func FlattenReplicationRuleDestinationReplicationTime(rt *s3.ReplicationTime) []interface{} { if rt == nil { return []interface{}{} } @@ -1087,14 +1094,14 @@ func FlattenReplicationTime(rt *s3.ReplicationTime) []interface{} { } if rt.Time != nil { - m["time"] = FlattenReplicationTimeValue(rt.Time) + m["time"] = FlattenReplicationRuleDestinationReplicationTimeValue(rt.Time) } return []interface{}{m} } -func FlattenReplicationTimeValue(rtv *s3.ReplicationTimeValue) []interface{} { +func FlattenReplicationRuleDestinationReplicationTimeValue(rtv *s3.ReplicationTimeValue) []interface{} { if rtv == nil { return []interface{}{} } @@ -1108,7 +1115,7 @@ func FlattenReplicationTimeValue(rtv *s3.ReplicationTimeValue) []interface{} { return []interface{}{m} } -func FlattenRules(rules []*s3.ReplicationRule) []interface{} { +func FlattenReplicationRules(rules []*s3.ReplicationRule) []interface{} { if len(rules) == 0 { return []interface{}{} } @@ -1123,19 +1130,19 @@ func FlattenRules(rules []*s3.ReplicationRule) []interface{} { m := make(map[string]interface{}) if rule.DeleteMarkerReplication != nil { - m["delete_marker_replication"] = FlattenDeleteMarkerReplication(rule.DeleteMarkerReplication) + m["delete_marker_replication"] = FlattenReplicationRuleDeleteMarkerReplication(rule.DeleteMarkerReplication) } if rule.Destination != nil { - m["destination"] = FlattenDestination(rule.Destination) + m["destination"] = FlattenReplicationRuleDestination(rule.Destination) } if rule.ExistingObjectReplication != nil { - m["existing_object_replication"] = FlattenExistingObjectReplication(rule.ExistingObjectReplication) + m["existing_object_replication"] = FlattenReplicationRuleExistingObjectReplication(rule.ExistingObjectReplication) } if rule.Filter != nil { - m["filter"] = FlattenFilter(rule.Filter) + m["filter"] = FlattenReplicationRuleFilter(rule.Filter) } if rule.ID != nil { @@ -1151,7 +1158,7 @@ func FlattenRules(rules []*s3.ReplicationRule) []interface{} { } if rule.SourceSelectionCriteria != nil { - m["source_selection_criteria"] = FlattenSourceSelectionCriteria(rule.SourceSelectionCriteria) + m["source_selection_criteria"] = FlattenReplicationRuleSourceSelectionCriteria(rule.SourceSelectionCriteria) } if rule.Status != nil { @@ -1164,7 +1171,7 @@ func FlattenRules(rules []*s3.ReplicationRule) []interface{} { return results } -func FlattenReplicaModifications(rc *s3.ReplicaModifications) []interface{} { +func FlattenSourceSelectionCriteriaReplicaModifications(rc *s3.ReplicaModifications) []interface{} { if rc == nil { return []interface{}{} } @@ -1178,7 +1185,7 @@ func FlattenReplicaModifications(rc *s3.ReplicaModifications) []interface{} { return []interface{}{m} } -func FlattenReplicationRuleAndOperator(op *s3.ReplicationRuleAndOperator) []interface{} { +func FlattenReplicationRuleFilterAndOperator(op *s3.ReplicationRuleAndOperator) []interface{} { if op == nil { return []interface{}{} } @@ -1197,7 +1204,26 @@ func FlattenReplicationRuleAndOperator(op *s3.ReplicationRuleAndOperator) []inte } -func FlattenSourceSelectionCriteria(ssc *s3.SourceSelectionCriteria) []interface{} { +func FlattenReplicationRuleFilterTag(tag *s3.Tag) []interface{} { + if tag == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if tag.Key != nil { + m["key"] = aws.StringValue(tag.Key) + } + + if tag.Value != nil { + m["value"] = aws.StringValue(tag.Value) + } + + return []interface{}{m} + +} + +func FlattenReplicationRuleSourceSelectionCriteria(ssc *s3.SourceSelectionCriteria) []interface{} { if ssc == nil { return []interface{}{} } @@ -1205,17 +1231,17 @@ func FlattenSourceSelectionCriteria(ssc *s3.SourceSelectionCriteria) []interface m := make(map[string]interface{}) if ssc.ReplicaModifications != nil { - m["replica_modifications"] = FlattenReplicaModifications(ssc.ReplicaModifications) + m["replica_modifications"] = FlattenSourceSelectionCriteriaReplicaModifications(ssc.ReplicaModifications) } if ssc.SseKmsEncryptedObjects != nil { - m["sse_kms_encrypted_objects"] = FlattenSseKmsEncryptedObjects(ssc.SseKmsEncryptedObjects) + m["sse_kms_encrypted_objects"] = FlattenSourceSelectionCriteriaSseKmsEncryptedObjects(ssc.SseKmsEncryptedObjects) } return []interface{}{m} } -func FlattenSseKmsEncryptedObjects(objects *s3.SseKmsEncryptedObjects) []interface{} { +func FlattenSourceSelectionCriteriaSseKmsEncryptedObjects(objects *s3.SseKmsEncryptedObjects) []interface{} { if objects == nil { return []interface{}{} } diff --git a/internal/service/s3/flex_test.go b/internal/service/s3/flex_test.go new file mode 100644 index 00000000000..2efdb753180 --- /dev/null +++ b/internal/service/s3/flex_test.go @@ -0,0 +1,71 @@ +package s3 + +import ( + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" +) + +func TestExpandReplicationRuleFilterTag(t *testing.T) { + expectedKey := "TestKey1" + expectedValue := "TestValue1" + + tagMap := map[string]interface{}{ + "key": expectedKey, + "value": expectedValue, + } + + result := ExpandReplicationRuleFilterTag([]interface{}{tagMap}) + + if result == nil { + t.Fatalf("Expected *s3.Tag to not be nil") + } + + if actualKey := aws.StringValue(result.Key); actualKey != expectedKey { + t.Fatalf("Expected key %s, got %s", expectedKey, actualKey) + } + + if actualValue := aws.StringValue(result.Value); actualValue != expectedValue { + t.Fatalf("Expected value %s, got %s", expectedValue, actualValue) + } +} + +func TestFlattenReplicationRuleFilterTag(t *testing.T) { + expectedKey := "TestKey1" + expectedValue := "TestValue1" + + tag := &s3.Tag{ + Key: aws.String(expectedKey), + Value: aws.String(expectedValue), + } + + result := FlattenReplicationRuleFilterTag(tag) + + if len(result) != 1 { + t.Fatalf("Expected array to have exactly 1 element, got %d", len(result)) + } + + tagMap, ok := result[0].(map[string]interface{}) + if !ok { + t.Fatal("Expected element in array to be a map[string]interface{}") + } + + actualKey, ok := tagMap["key"].(string) + if !ok { + t.Fatal("Expected string 'key' key in the map") + } + + if actualKey != expectedKey { + t.Fatalf("Expected 'key' to equal %s, got %s", expectedKey, actualKey) + } + + actualValue, ok := tagMap["value"].(string) + if !ok { + t.Fatal("Expected string 'value' key in the map") + } + + if actualValue != expectedValue { + t.Fatalf("Expected 'value' to equal %s, got %s", expectedValue, actualValue) + } +} diff --git a/website/docs/r/s3_bucket_replication_configuration.html.markdown b/website/docs/r/s3_bucket_replication_configuration.html.markdown index ec65a825dd2..2ea534a4255 100644 --- a/website/docs/r/s3_bucket_replication_configuration.html.markdown +++ b/website/docs/r/s3_bucket_replication_configuration.html.markdown @@ -10,6 +10,8 @@ description: |- Provides an independent configuration resource for S3 bucket [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html). +~> **NOTE:** S3 Buckets only support a single replication configuration. Declaring multiple `aws_s3_bucket_replication_configuration` resources to the same S3 Bucket will cause a perpetual difference in configuration. + ## Example Usage ### Using replication configuration @@ -93,20 +95,18 @@ resource "aws_iam_role_policy_attachment" "replication" { resource "aws_s3_bucket" "destination" { bucket = "tf-test-bucket-destination-12345" +} - versioning { - enabled = true +resource "aws_s3_bucket_versioning" "destination" { + bucket = aws_s3_bucket.destination.id + versioning_configuration { + status = "Enabled" } } resource "aws_s3_bucket" "source" { provider = aws.central bucket = "tf-test-bucket-source-12345" - acl = "private" - - versioning { - enabled = true - } lifecycle { ignore_changes = [ @@ -115,7 +115,24 @@ resource "aws_s3_bucket" "source" { } } +resource "aws_s3_bucket_acl" "source_bucket_acl" { + bucket = aws_s3_bucket.source.id + acl = "private" +} + +resource "aws_s3_bucket_versioning" "source" { + provider = aws.central + + bucket = aws_s3_bucket.source.id + versioning_configuration { + status = "Enabled" + } +} + resource "aws_s3_bucket_replication_configuration" "replication" { + # Must have bucket versioning enabled first + depends_on = [aws_s3_bucket_versioning.source] + role = aws_iam_role.replication.arn bucket = aws_s3_bucket.source.id @@ -140,10 +157,6 @@ resource "aws_s3_bucket_replication_configuration" "replication" { resource "aws_s3_bucket" "east" { bucket = "tf-test-bucket-east-12345" - versioning { - enabled = true - } - lifecycle { ignore_changes = [ replication_configuration @@ -151,14 +164,17 @@ resource "aws_s3_bucket" "east" { } } +resource "aws_s3_bucket_versioning" "east" { + bucket = aws_s3_bucket.east.id + versioning_configuration { + status = "Enabled" + } +} + resource "aws_s3_bucket" "west" { provider = west bucket = "tf-test-bucket-west-12345" - versioning { - enabled = true - } - lifecycle { ignore_changes = [ replication_configuration @@ -166,7 +182,19 @@ resource "aws_s3_bucket" "west" { } } +resource "aws_s3_bucket_versioning" "west" { + provider = west + + bucket = aws_s3_bucket.west.id + versioning_configuration { + status = "Enabled" + } +} + resource "aws_s3_bucket_replication_configuration" "east_to_west" { + # Must have bucket versioning enabled first + depends_on = [aws_s3_bucket_versioning.east] + role = aws_iam_role.east_replication.arn bucket = aws_s3_bucket.east.id @@ -183,6 +211,9 @@ resource "aws_s3_bucket_replication_configuration" "east_to_west" { } resource "aws_s3_bucket_replication_configuration" "west_to_east" { + # Must have bucket versioning enabled first + depends_on = [aws_s3_bucket_versioning.west] + role = aws_iam_role.west_replication.arn bucket = aws_s3_bucket.west.id @@ -229,6 +260,8 @@ The following arguments are supported: * `bucket` - (Required) The name of the source S3 bucket you want Amazon S3 to monitor. * `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. * `rule` - (Required) Set of configuration blocks describing the rules managing the replication [documented below](#rule). +* `token` - (Optional) A token to allow replication to be enabled on an Object Lock-enabled bucket. You must contact AWS support for the bucket's "Object Lock token". +For more details, see [Using S3 Object Lock with replication](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-managing.html#object-lock-managing-replication). ### rule @@ -241,9 +274,9 @@ The `rule` configuration block supports the following arguments: * `delete_marker_replication` - (Optional) Whether delete markers are replicated. This argument is only valid with V2 replication configurations (i.e., when `filter` is used)[documented below](#delete_marker_replication). * `destination` - (Required) Specifies the destination for the rule [documented below](#destination). * `existing_object_replication` - (Optional) Replicate existing objects in the source bucket according to the rule configurations [documented below](#existing_object_replication). -* `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies [documented below](#filter). +* `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies [documented below](#filter). If not specified, the `rule` will default to using `prefix`. * `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. -* `prefix` - (Optional, Conflicts with `filter`) Object key name prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. +* `prefix` - (Optional, Conflicts with `filter`) Object key name prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. Defaults to an empty string (`""`) if `filter` is not specified. * `priority` - (Optional) The priority associated with the rule. Priority should only be set if `filter` is configured. If not provided, defaults to `0`. Priority must be unique between multiple rules. * `source_selection_criteria` - (Optional) Specifies special object selection criteria [documented below](#source_selection_criteria). * `status` - (Required) The status of the rule. Either `"Enabled"` or `"Disabled"`. The rule is ignored if status is not "Enabled". @@ -360,7 +393,8 @@ The `existing_object_replication` configuration block supports the following arg ### filter -~> **NOTE:** With the `filter` argument, you must specify exactly one of `prefix`, `tag`, or `and`. Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. +~> **NOTE:** The `filter` argument must be specified as either an empty configuration block (`filter {}`) to imply the rule requires no filter or with exactly one of `prefix`, `tag`, or `and`. +Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. The `filter` configuration block supports the following arguments: