diff --git a/.changelog/23842.txt b/.changelog/23842.txt new file mode 100644 index 00000000000..b757bbb820f --- /dev/null +++ b/.changelog/23842.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_s3_bucket: Update `replication_configuration` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_replication_configuration` resource. +``` \ No newline at end of file diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index ee28aa9ff68..155e81ef959 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -403,100 +403,105 @@ func ResourceBucket() *schema.Resource { "replication_configuration": { Type: schema.TypeList, + Optional: true, Computed: true, + MaxItems: 1, Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "role": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Required: true, }, "rules": { - Type: schema.TypeSet, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeSet, + Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "id": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 255), }, "destination": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeList, + MaxItems: 1, + MinItems: 1, + Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "account_id": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidAccountID, }, "bucket": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, }, "storage_class": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(s3.StorageClass_Values(), false), }, "replica_kms_key_id": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Optional: true, }, "access_control_translation": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "owner": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.OwnerOverride_Values(), false), }, }, }, }, "replication_time": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeList, + Optional: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "minutes": { - Type: schema.TypeInt, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeInt, + Optional: true, + Default: 15, + ValidateFunc: validation.IntBetween(15, 15), }, "status": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Optional: true, + Default: s3.ReplicationTimeStatusEnabled, + ValidateFunc: validation.StringInSlice(s3.ReplicationTimeStatus_Values(), false), }, }, }, }, "metrics": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeList, + Optional: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "minutes": { - Type: schema.TypeInt, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeInt, + Optional: true, + Default: 15, + ValidateFunc: validation.IntBetween(10, 15), }, "status": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Optional: true, + Default: s3.MetricsStatusEnabled, + ValidateFunc: validation.StringInSlice(s3.MetricsStatus_Values(), false), }, }, }, @@ -505,21 +510,22 @@ func ResourceBucket() *schema.Resource { }, }, "source_selection_criteria": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "sse_kms_encrypted_objects": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enabled": { - Type: schema.TypeBool, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeBool, + Required: true, }, }, }, @@ -528,39 +534,39 @@ func ResourceBucket() *schema.Resource { }, }, "prefix": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), }, "status": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.ReplicationRuleStatus_Values(), false), }, "priority": { - Type: schema.TypeInt, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeInt, + Optional: true, }, "filter": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "prefix": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), }, - "tags": tftags.TagsSchemaComputedDeprecated("Use the aws_s3_bucket_replication_configuration resource instead"), + "tags": tftags.TagsSchema(), }, }, }, "delete_marker_replication_status": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{s3.DeleteMarkerReplicationStatusEnabled}, false), }, }, }, @@ -774,21 +780,40 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } } - if d.HasChange("acceleration_status") { - if err := resourceBucketInternalAccelerationUpdate(conn, d); err != nil { - return fmt.Errorf("error updating S3 Bucket (%s) Acceleration Status: %w", d.Id(), err) + // Note: Order of argument updates below is important + + if d.HasChange("cors_rule") { + if err := resourceBucketInternalCorsUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) CORS Rules: %w", d.Id(), err) } } - if d.HasChange("acl") && !d.IsNewResource() { - if err := resourceBucketInternalACLUpdate(conn, d); err != nil { - return fmt.Errorf("error updating S3 Bucket (%s) ACL: %w", d.Id(), err) + if d.HasChange("website") { + if err := resourceBucketInternalWebsiteUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Website: %w", d.Id(), err) } } - if d.HasChange("cors_rule") { - if err := resourceBucketInternalCorsUpdate(conn, d); err != nil { - return fmt.Errorf("error updating S3 Bucket (%s) CORS Rules: %w", d.Id(), err) + if d.HasChange("versioning") { + v := d.Get("versioning").([]interface{}) + + if d.IsNewResource() { + if versioning := expandVersioningWhenIsNewResource(v); versioning != nil { + err := resourceBucketInternalVersioningUpdate(conn, d.Id(), versioning) + if err != nil { + return fmt.Errorf("error updating (new) S3 Bucket (%s) Versioning: %w", d.Id(), err) + } + } + } else { + if err := resourceBucketInternalVersioningUpdate(conn, d.Id(), expandVersioning(v)); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Versioning: %w", d.Id(), err) + } + } + } + + if d.HasChange("acl") && !d.IsNewResource() { + if err := resourceBucketInternalACLUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) ACL: %w", d.Id(), err) } } @@ -798,21 +823,27 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } } + if d.HasChange("logging") { + if err := resourceBucketInternalLoggingUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Logging: %w", d.Id(), err) + } + } + if d.HasChange("lifecycle_rule") { if err := resourceBucketInternalLifecycleUpdate(conn, d); err != nil { return fmt.Errorf("error updating S3 Bucket (%s) Lifecycle Rules: %w", d.Id(), err) } } - if d.HasChange("logging") { - if err := resourceBucketInternalLoggingUpdate(conn, d); err != nil { - return fmt.Errorf("error updating S3 Bucket (%s) Logging: %w", d.Id(), err) + if d.HasChange("acceleration_status") { + if err := resourceBucketInternalAccelerationUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Acceleration Status: %w", d.Id(), err) } } - if d.HasChange("object_lock_configuration") { - if err := resourceBucketInternalObjectLockConfigurationUpdate(conn, d); err != nil { - return fmt.Errorf("error updating S3 Bucket (%s) Object Lock configuration: %w", d.Id(), err) + if d.HasChange("replication_configuration") { + if err := resourceBucketInternalReplicationConfigurationUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Replication configuration: %w", d.Id(), err) } } @@ -822,26 +853,9 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } } - if d.HasChange("versioning") { - v := d.Get("versioning").([]interface{}) - - if d.IsNewResource() { - if versioning := expandVersioningWhenIsNewResource(v); versioning != nil { - err := resourceBucketInternalVersioningUpdate(conn, d.Id(), versioning) - if err != nil { - return fmt.Errorf("error updating (new) S3 Bucket (%s) Versioning: %w", d.Id(), err) - } - } - } else { - if err := resourceBucketInternalVersioningUpdate(conn, d.Id(), expandVersioning(v)); err != nil { - return fmt.Errorf("error updating S3 Bucket (%s) Versioning: %w", d.Id(), err) - } - } - } - - if d.HasChange("website") { - if err := resourceBucketInternalWebsiteUpdate(conn, d); err != nil { - return fmt.Errorf("error updating S3 Bucket (%s) Website: %w", d.Id(), err) + if d.HasChange("object_lock_configuration") { + if err := resourceBucketInternalObjectLockConfigurationUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Object Lock configuration: %w", d.Id(), err) } } @@ -1857,6 +1871,60 @@ func resourceBucketInternalObjectLockConfigurationUpdate(conn *s3.S3, d *schema. return err } +func resourceBucketInternalReplicationConfigurationUpdate(conn *s3.S3, d *schema.ResourceData) error { + replicationConfiguration := d.Get("replication_configuration").([]interface{}) + + if len(replicationConfiguration) == 0 { + input := &s3.DeleteBucketReplicationInput{ + Bucket: aws.String(d.Id()), + } + + _, err := conn.DeleteBucketReplication(input) + + if err != nil { + return fmt.Errorf("error removing S3 Bucket (%s) Replication: %w", d.Id(), err) + } + + return nil + } + + hasVersioning := false + // Validate that bucket versioning is enabled + if versioning, ok := d.GetOk("versioning"); ok { + v := versioning.([]interface{}) + + if v[0].(map[string]interface{})["enabled"].(bool) { + hasVersioning = true + } + } + + if !hasVersioning { + return fmt.Errorf("versioning must be enabled to allow S3 bucket replication") + } + + input := &s3.PutBucketReplicationInput{ + Bucket: aws.String(d.Id()), + ReplicationConfiguration: expandBucketReplicationConfiguration(replicationConfiguration), + } + + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + _, err := conn.PutBucketReplication(input) + if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) || tfawserr.ErrMessageContains(err, ErrCodeInvalidRequest, "Versioning must be 'Enabled' on the bucket") { + return resource.RetryableError(err) + } + if err != nil { + return resource.NonRetryableError(err) + } + return nil + }) + + if tfresource.TimedOut(err) { + _, err = conn.PutBucketReplication(input) + } + + return err +} + func resourceBucketInternalServerSideEncryptionConfigurationUpdate(conn *s3.S3, d *schema.ResourceData) error { serverSideEncryptionConfiguration := d.Get("server_side_encryption_configuration").([]interface{}) @@ -2325,6 +2393,185 @@ func flattenS3ObjectLockConfiguration(conf *s3.ObjectLockConfiguration) []interf // Replication Configuration functions +func expandBucketReplicationConfiguration(l []interface{}) *s3.ReplicationConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + if !ok { + return nil + } + + rc := &s3.ReplicationConfiguration{} + + if val, ok := tfMap["role"].(string); ok { + rc.Role = aws.String(val) + } + + if v, ok := tfMap["rules"].(*schema.Set); ok && v.Len() > 0 { + rc.Rules = expandBucketReplicationConfigurationRules(v.List()) + } + + return rc +} + +func expandBucketReplicationConfigurationRules(l []interface{}) []*s3.ReplicationRule { + var rules []*s3.ReplicationRule + + for _, tfMapRaw := range l { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } + + rcRule := &s3.ReplicationRule{} + + if status, ok := tfMap["status"].(string); ok && status != "" { + rcRule.Status = aws.String(status) + } else { + continue + } + + if v, ok := tfMap["id"].(string); ok && v != "" { + rcRule.ID = aws.String(v) + } + + if v, ok := tfMap["destination"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rcRule.Destination = expandBucketReplicationConfigurationRulesDestination(v) + } else { + rcRule.Destination = &s3.Destination{} + } + + if v, ok := tfMap["source_selection_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rcRule.SourceSelectionCriteria = expandBucketReplicationConfigurationRulesSourceSelectionCriteria(v) + } + + if v, ok := tfMap["filter"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + // XML schema V2. + rcRule.Priority = aws.Int64(int64(tfMap["priority"].(int))) + + rcRule.Filter = &s3.ReplicationRuleFilter{} + + filter := v[0].(map[string]interface{}) + tags := Tags(tftags.New(filter["tags"]).IgnoreAWS()) + + if len(tags) > 0 { + rcRule.Filter.And = &s3.ReplicationRuleAndOperator{ + Prefix: aws.String(filter["prefix"].(string)), + Tags: tags, + } + } else { + rcRule.Filter.Prefix = aws.String(filter["prefix"].(string)) + } + + if dmr, ok := tfMap["delete_marker_replication_status"].(string); ok && dmr != "" { + rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ + Status: aws.String(dmr), + } + } else { + rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + } + } + } else { + // XML schema V1. + rcRule.Prefix = aws.String(tfMap["prefix"].(string)) + } + + rules = append(rules, rcRule) + } + + return rules +} + +func expandBucketReplicationConfigurationRulesDestination(l []interface{}) *s3.Destination { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + if !ok { + return nil + } + + ruleDestination := &s3.Destination{} + + if v, ok := tfMap["bucket"].(string); ok { + ruleDestination.Bucket = aws.String(v) + } + + if v, ok := tfMap["storage_class"].(string); ok && v != "" { + ruleDestination.StorageClass = aws.String(v) + } + + if v, ok := tfMap["replica_kms_key_id"].(string); ok && v != "" { + ruleDestination.EncryptionConfiguration = &s3.EncryptionConfiguration{ + ReplicaKmsKeyID: aws.String(v), + } + } + + if v, ok := tfMap["account_id"].(string); ok && v != "" { + ruleDestination.Account = aws.String(v) + } + + if v, ok := tfMap["access_control_translation"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + aclTranslationValues := v[0].(map[string]interface{}) + ruleAclTranslation := &s3.AccessControlTranslation{} + ruleAclTranslation.Owner = aws.String(aclTranslationValues["owner"].(string)) + ruleDestination.AccessControlTranslation = ruleAclTranslation + } + + // replication metrics (required for RTC) + if v, ok := tfMap["metrics"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + metricsConfig := &s3.Metrics{} + metricsValues := v[0].(map[string]interface{}) + metricsConfig.EventThreshold = &s3.ReplicationTimeValue{} + metricsConfig.Status = aws.String(metricsValues["status"].(string)) + metricsConfig.EventThreshold.Minutes = aws.Int64(int64(metricsValues["minutes"].(int))) + ruleDestination.Metrics = metricsConfig + } + + // replication time control (RTC) + if v, ok := tfMap["replication_time"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rtcValues := v[0].(map[string]interface{}) + rtcConfig := &s3.ReplicationTime{} + rtcConfig.Status = aws.String(rtcValues["status"].(string)) + rtcConfig.Time = &s3.ReplicationTimeValue{} + rtcConfig.Time.Minutes = aws.Int64(int64(rtcValues["minutes"].(int))) + ruleDestination.ReplicationTime = rtcConfig + } + + return ruleDestination +} + +func expandBucketReplicationConfigurationRulesSourceSelectionCriteria(l []interface{}) *s3.SourceSelectionCriteria { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + if !ok { + return nil + } + + ruleSsc := &s3.SourceSelectionCriteria{} + + if v, ok := tfMap["sse_kms_encrypted_objects"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + sseKmsValues := v[0].(map[string]interface{}) + sseKmsEncryptedObjects := &s3.SseKmsEncryptedObjects{} + + if sseKmsValues["enabled"].(bool) { + sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusEnabled) + } else { + sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusDisabled) + } + ruleSsc.SseKmsEncryptedObjects = sseKmsEncryptedObjects + } + + return ruleSsc +} + func flattenBucketReplicationConfiguration(r *s3.ReplicationConfiguration) []interface{} { if r == nil { return []interface{}{} diff --git a/internal/service/s3/bucket_replication_configuration_test.go b/internal/service/s3/bucket_replication_configuration_test.go index 07648700864..6c45bb01cf5 100644 --- a/internal/service/s3/bucket_replication_configuration_test.go +++ b/internal/service/s3/bucket_replication_configuration_test.go @@ -1073,6 +1073,88 @@ func TestAccS3BucketReplicationConfiguration_withoutPrefix(t *testing.T) { }) } +func TestAccS3BucketReplicationConfiguration_migrate_noChange(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_replication_configuration.test" + bucketResourceName := "aws_s3_bucket.source" + region := acctest.Region() + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroy, &providers), + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withReplicationV2_PrefixAndTags(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExistsWithProvider(bucketResourceName, acctest.RegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(bucketResourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(bucketResourceName, "replication_configuration.0.rules.*", map[string]string{ + "filter.#": "1", + "filter.0.prefix": "foo", + "filter.0.tags.%": "2", + }), + ), + }, + { + Config: testAccBucketReplicationConfiguration_Migrate_NoChangeConfig(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.filter.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.filter.0.and.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.filter.0.and.0.prefix", "foo"), + resource.TestCheckResourceAttr(resourceName, "rule.0.filter.0.and.0.tags.%", "2"), + ), + }, + }, + }) +} + +func TestAccS3BucketReplicationConfiguration_migrate_withChange(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_replication_configuration.test" + bucketResourceName := "aws_s3_bucket.source" + region := acctest.Region() + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroy, &providers), + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withReplicationV2_PrefixAndTags(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExistsWithProvider(bucketResourceName, acctest.RegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(bucketResourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(bucketResourceName, "replication_configuration.0.rules.*", map[string]string{ + "filter.#": "1", + "filter.0.prefix": "foo", + "filter.0.tags.%": "2", + }), + ), + }, + { + Config: testAccBucketReplicationConfiguration_Migrate_WithChangeConfig(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.filter.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.filter.0.prefix", "bar"), + ), + }, + }, + }) +} + func testAccCheckBucketReplicationConfigurationDestroy(s *terraform.State, provider *schema.Provider) error { conn := provider.Meta().(*conns.AWSClient).S3Conn @@ -2185,3 +2267,137 @@ resource "aws_s3_bucket_replication_configuration" "test" { } }`) } + +func testAccBucketReplicationConfigurationMigrationBase(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_iam_role" "role" { + name = %[1]q + + assume_role_policy = < 0 && err == nil { - t.Fatalf("expected %q to trigger an error", tc.Region) - } - if output != tc.ExpectedOutput { - t.Fatalf("expected %q, received %q", tc.ExpectedOutput, output) - } - } + }) } -func TestWebsiteEndpoint(t *testing.T) { - // https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html - testCases := []struct { - TestingClient *conns.AWSClient - LocationConstraint string - Expected string - }{ - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.UsEast1RegionID, +func TestAccS3Bucket_Security_corsEmptyOrigin(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withCORSEmptyOrigin(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "cors_rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_headers.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_headers.0", "*"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_methods.#", "2"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_methods.0", "PUT"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_methods.1", "POST"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_origins.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_origins.0", ""), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.expose_headers.#", "2"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.expose_headers.0", "x-amz-server-side-encryption"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.expose_headers.1", "ETag"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.max_age_seconds", "3000"), + ), }, - LocationConstraint: "", - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.UsEast1RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.UsWest2RegionID, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, }, - LocationConstraint: endpoints.UsWest2RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.UsWest2RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.UsWest1RegionID, - }, - LocationConstraint: endpoints.UsWest1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.UsWest1RegionID, acctest.PartitionDNSSuffix()), }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.EuWest1RegionID, + }) +} + +func TestAccS3Bucket_Security_corsSingleMethodAndEmptyOrigin(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withCORSSingleMethodAndEmptyOrigin(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + ), }, - LocationConstraint: endpoints.EuWest1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.EuWest1RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.EuWest3RegionID, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, }, - LocationConstraint: endpoints.EuWest3RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.EuWest3RegionID, acctest.PartitionDNSSuffix()), }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.EuCentral1RegionID, + }) +} + +func TestAccS3Bucket_Security_logging(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withLogging(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "logging.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "logging.0.target_bucket", "aws_s3_bucket.log_bucket", "id"), + resource.TestCheckResourceAttr(resourceName, "logging.0.target_prefix", "log/"), + ), }, - LocationConstraint: endpoints.EuCentral1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.EuCentral1RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.ApSouth1RegionID, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, }, - LocationConstraint: endpoints.ApSouth1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.ApSouth1RegionID, acctest.PartitionDNSSuffix()), }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.ApSoutheast1RegionID, + }) +} +func TestAccS3Bucket_Security_enableDefaultEncryptionWhenTypical(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withDefaultEncryption_KmsMasterKey(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAwsKms), + resource.TestMatchResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.kms_master_key_id", regexp.MustCompile("^arn")), + ), }, - LocationConstraint: endpoints.ApSoutheast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.ApSoutheast1RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.ApNortheast1RegionID, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, }, - LocationConstraint: endpoints.ApNortheast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.ApNortheast1RegionID, acctest.PartitionDNSSuffix()), }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.ApSoutheast2RegionID, + }) +} + +func TestAccS3Bucket_Security_enableDefaultEncryptionWhenAES256IsUsed(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withDefaultEncryption_defaultKey(bucketName, s3.ServerSideEncryptionAes256), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAes256), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.kms_master_key_id", ""), + ), }, - LocationConstraint: endpoints.ApSoutheast2RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.ApSoutheast2RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.ApNortheast2RegionID, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, }, - LocationConstraint: endpoints.ApNortheast2RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.ApNortheast2RegionID, acctest.PartitionDNSSuffix()), }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.SaEast1RegionID, + }) +} + +func TestAccS3Bucket_Security_disableDefaultEncryptionWhenDefaultEncryptionIsEnabled(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withDefaultEncryption_defaultKey(bucketName, s3.ServerSideEncryptionAwsKms), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(resourceName), + ), }, - LocationConstraint: endpoints.SaEast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.SaEast1RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.UsGovEast1RegionID, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, }, - LocationConstraint: endpoints.UsGovEast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.UsGovEast1RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.UsGovWest1RegionID, + { + // As ServerSide Encryption Configuration is a Computed field, removing them from terraform will not + // trigger an update to remove it from the S3 bucket. + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.#", "1"), + ), }, - LocationConstraint: endpoints.UsGovWest1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.UsGovWest1RegionID, acctest.PartitionDNSSuffix()), }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "c2s.ic.gov", - Region: endpoints.UsIsoEast1RegionID, + }) +} + +func TestAccS3Bucket_Web_simple(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + region := acctest.Region() + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withWebsite(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.index_document", "index.html"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), }, - LocationConstraint: endpoints.UsIsoEast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.c2s.ic.gov", endpoints.UsIsoEast1RegionID), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "sc2s.sgov.gov", - Region: endpoints.UsIsobEast1RegionID, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl", "grant"}, }, - LocationConstraint: endpoints.UsIsobEast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.sc2s.sgov.gov", endpoints.UsIsobEast1RegionID), + { + Config: testAccBucketConfig_withWebsiteAndError(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.index_document", "index.html"), + resource.TestCheckResourceAttr(resourceName, "website.0.error_document", "error.html"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + { + // As Website is a Computed field, removing them from terraform will not + // trigger an update to remove them from the S3 bucket. + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.index_document", "index.html"), + resource.TestCheckResourceAttr(resourceName, "website.0.error_document", "error.html"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + }, + }) +} + +func TestAccS3Bucket_Web_redirect(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + region := acctest.Region() + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withWebsiteAndRedirect(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.redirect_all_requests_to", "hashicorp.com?my=query"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl", "grant"}, + }, + { + Config: testAccBucketConfig_withWebsiteAndHTTPSRedirect(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.redirect_all_requests_to", "https://hashicorp.com?my=query"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + { + // As Website is a Computed field, removing them from terraform will not + // trigger an update to remove them from the S3 bucket. + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.redirect_all_requests_to", "https://hashicorp.com?my=query"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + }, + }) +} + +func TestAccS3Bucket_Web_routingRules(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + region := acctest.Region() + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withWebsiteAndRoutingRules(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.error_document", "error.html"), + resource.TestCheckResourceAttr(resourceName, "website.0.index_document", "index.html"), + resource.TestCheckResourceAttrSet(resourceName, "website.0.routing_rules"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl", "grant"}, + }, + { + // As Website is a Computed field, removing them from terraform will not + // trigger an update to remove them from the S3 bucket. + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.error_document", "error.html"), + resource.TestCheckResourceAttr(resourceName, "website.0.index_document", "index.html"), + resource.TestCheckResourceAttrSet(resourceName, "website.0.routing_rules"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + }, + }) +} + +func TestBucketName(t *testing.T) { + validDnsNames := []string{ + "foobar", + "foo.bar", + "foo.bar.baz", + "1234", + "foo-bar", + strings.Repeat("x", 63), + } + + for _, v := range validDnsNames { + if err := tfs3.ValidBucketName(v, endpoints.UsWest2RegionID); err != nil { + t.Fatalf("%q should be a valid S3 bucket name", v) + } + } + + invalidDnsNames := []string{ + "foo..bar", + "Foo.Bar", + "192.168.0.1", + "127.0.0.1", + ".foo", + "bar.", + "foo_bar", + strings.Repeat("x", 64), + } + + for _, v := range invalidDnsNames { + if err := tfs3.ValidBucketName(v, endpoints.UsWest2RegionID); err == nil { + t.Fatalf("%q should not be a valid S3 bucket name", v) + } + } + + validEastNames := []string{ + "foobar", + "foo_bar", + "127.0.0.1", + "foo..bar", + "foo_bar_baz", + "foo.bar.baz", + "Foo.Bar", + strings.Repeat("x", 255), + } + + for _, v := range validEastNames { + if err := tfs3.ValidBucketName(v, endpoints.UsEast1RegionID); err != nil { + t.Fatalf("%q should be a valid S3 bucket name", v) + } + } + + invalidEastNames := []string{ + "foo;bar", + strings.Repeat("x", 256), + } + + for _, v := range invalidEastNames { + if err := tfs3.ValidBucketName(v, endpoints.UsEast1RegionID); err == nil { + t.Fatalf("%q should not be a valid S3 bucket name", v) + } + } +} + +func TestBucketRegionalDomainName(t *testing.T) { + const bucket = "bucket-name" + + var testCases = []struct { + ExpectedErrCount int + ExpectedOutput string + Region string + }{ + { + Region: "", + ExpectedErrCount: 0, + ExpectedOutput: bucket + ".s3.amazonaws.com", + }, + { + Region: "custom", + ExpectedErrCount: 0, + ExpectedOutput: bucket + ".s3.custom.amazonaws.com", + }, + { + Region: endpoints.UsEast1RegionID, + ExpectedErrCount: 0, + ExpectedOutput: bucket + ".s3.amazonaws.com", + }, + { + Region: endpoints.UsWest2RegionID, + ExpectedErrCount: 0, + ExpectedOutput: bucket + fmt.Sprintf(".s3.%s.%s", endpoints.UsWest2RegionID, acctest.PartitionDNSSuffix()), + }, + { + Region: endpoints.UsGovWest1RegionID, + ExpectedErrCount: 0, + ExpectedOutput: bucket + fmt.Sprintf(".s3.%s.%s", endpoints.UsGovWest1RegionID, acctest.PartitionDNSSuffix()), }, + { + Region: endpoints.CnNorth1RegionID, + ExpectedErrCount: 0, + ExpectedOutput: bucket + fmt.Sprintf(".s3.%s.amazonaws.com.cn", endpoints.CnNorth1RegionID), + }, + } + + for _, tc := range testCases { + output, err := tfs3.BucketRegionalDomainName(bucket, tc.Region) + if tc.ExpectedErrCount == 0 && err != nil { + t.Fatalf("expected %q not to trigger an error, received: %s", tc.Region, err) + } + if tc.ExpectedErrCount > 0 && err == nil { + t.Fatalf("expected %q to trigger an error", tc.Region) + } + if output != tc.ExpectedOutput { + t.Fatalf("expected %q, received %q", tc.ExpectedOutput, output) + } + } +} + +func TestWebsiteEndpoint(t *testing.T) { + // https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html + testCases := []struct { + TestingClient *conns.AWSClient + LocationConstraint string + Expected string + }{ { TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com.cn", - Region: endpoints.CnNorthwest1RegionID, + DNSSuffix: "amazonaws.com", + Region: endpoints.UsEast1RegionID, }, - LocationConstraint: endpoints.CnNorthwest1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.amazonaws.com.cn", endpoints.CnNorthwest1RegionID), + LocationConstraint: "", + Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.UsEast1RegionID, acctest.PartitionDNSSuffix()), }, { TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com.cn", - Region: endpoints.CnNorth1RegionID, + DNSSuffix: "amazonaws.com", + Region: endpoints.UsWest2RegionID, + }, + LocationConstraint: endpoints.UsWest2RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.UsWest2RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.UsWest1RegionID, + }, + LocationConstraint: endpoints.UsWest1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.UsWest1RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.EuWest1RegionID, + }, + LocationConstraint: endpoints.EuWest1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.EuWest1RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.EuWest3RegionID, + }, + LocationConstraint: endpoints.EuWest3RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.EuWest3RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.EuCentral1RegionID, + }, + LocationConstraint: endpoints.EuCentral1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.EuCentral1RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.ApSouth1RegionID, + }, + LocationConstraint: endpoints.ApSouth1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.ApSouth1RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.ApSoutheast1RegionID, + }, + LocationConstraint: endpoints.ApSoutheast1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.ApSoutheast1RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.ApNortheast1RegionID, + }, + LocationConstraint: endpoints.ApNortheast1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.ApNortheast1RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.ApSoutheast2RegionID, + }, + LocationConstraint: endpoints.ApSoutheast2RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.ApSoutheast2RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.ApNortheast2RegionID, + }, + LocationConstraint: endpoints.ApNortheast2RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.ApNortheast2RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.SaEast1RegionID, + }, + LocationConstraint: endpoints.SaEast1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.SaEast1RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.UsGovEast1RegionID, + }, + LocationConstraint: endpoints.UsGovEast1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.UsGovEast1RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.UsGovWest1RegionID, + }, + LocationConstraint: endpoints.UsGovWest1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.UsGovWest1RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "c2s.ic.gov", + Region: endpoints.UsIsoEast1RegionID, + }, + LocationConstraint: endpoints.UsIsoEast1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.c2s.ic.gov", endpoints.UsIsoEast1RegionID), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "sc2s.sgov.gov", + Region: endpoints.UsIsobEast1RegionID, + }, + LocationConstraint: endpoints.UsIsobEast1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.sc2s.sgov.gov", endpoints.UsIsobEast1RegionID), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com.cn", + Region: endpoints.CnNorthwest1RegionID, + }, + LocationConstraint: endpoints.CnNorthwest1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.amazonaws.com.cn", endpoints.CnNorthwest1RegionID), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com.cn", + Region: endpoints.CnNorth1RegionID, }, LocationConstraint: endpoints.CnNorth1RegionID, Expected: fmt.Sprintf("bucket-name.s3-website.%s.amazonaws.com.cn", endpoints.CnNorth1RegionID), }, } - for _, testCase := range testCases { - got := tfs3.WebsiteEndpoint(testCase.TestingClient, "bucket-name", testCase.LocationConstraint) - if got.Endpoint != testCase.Expected { - t.Errorf("WebsiteEndpointUrl(\"bucket-name\", %q) => %q, want %q", testCase.LocationConstraint, got.Endpoint, testCase.Expected) - } - } + for _, testCase := range testCases { + got := tfs3.WebsiteEndpoint(testCase.TestingClient, "bucket-name", testCase.LocationConstraint) + if got.Endpoint != testCase.Expected { + t.Errorf("WebsiteEndpointUrl(\"bucket-name\", %q) => %q, want %q", testCase.LocationConstraint, got.Endpoint, testCase.Expected) + } + } +} + +func testAccCheckBucketDestroy(s *terraform.State) error { + return testAccCheckBucketDestroyWithProvider(s, acctest.Provider) +} + +func testAccCheckBucketDestroyWithProvider(s *terraform.State, provider *schema.Provider) error { + conn := provider.Meta().(*conns.AWSClient).S3Conn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_s3_bucket" { + continue + } + + input := &s3.HeadBucketInput{ + Bucket: aws.String(rs.Primary.ID), + } + + // Retry for S3 eventual consistency + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + _, err := conn.HeadBucket(input) + + if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) || tfawserr.ErrCodeEquals(err, "NotFound") { + return nil + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return resource.RetryableError(fmt.Errorf("AWS S3 Bucket still exists: %s", rs.Primary.ID)) + }) + + if tfresource.TimedOut(err) { + _, err = conn.HeadBucket(input) + } + + if err != nil { + return err + } + } + return nil +} + +func testAccCheckBucketExists(n string) resource.TestCheckFunc { + return testAccCheckBucketExistsWithProvider(n, func() *schema.Provider { return acctest.Provider }) +} + +func testAccCheckBucketExistsWithProvider(n string, providerF func() *schema.Provider) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + provider := providerF() + + conn := provider.Meta().(*conns.AWSClient).S3Conn + _, err := conn.HeadBucket(&s3.HeadBucketInput{ + Bucket: aws.String(rs.Primary.ID), + }) + + if err != nil { + if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + return fmt.Errorf("S3 bucket not found") + } + return err + } + return nil + + } +} + +func testAccCheckBucketAddObjects(n string, keys ...string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs := s.RootModule().Resources[n] + conn := acctest.Provider.Meta().(*conns.AWSClient).S3ConnURICleaningDisabled + + for _, key := range keys { + _, err := conn.PutObject(&s3.PutObjectInput{ + Bucket: aws.String(rs.Primary.ID), + Key: aws.String(key), + }) + + if err != nil { + return fmt.Errorf("PutObject error: %s", err) + } + } + + return nil + } +} + +func testAccCheckBucketAddObjectsWithLegalHold(n string, keys ...string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs := s.RootModule().Resources[n] + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn + + for _, key := range keys { + _, err := conn.PutObject(&s3.PutObjectInput{ + Bucket: aws.String(rs.Primary.ID), + Key: aws.String(key), + ObjectLockLegalHoldStatus: aws.String(s3.ObjectLockLegalHoldStatusOn), + }) + + if err != nil { + return fmt.Errorf("PutObject error: %s", err) + } + } + + return nil + } +} + +// Create an S3 bucket via a CF stack so that it has system tags. +func testAccCheckBucketCreateViaCloudFormation(n string, stackID *string) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).CloudFormationConn + stackName := sdkacctest.RandomWithPrefix("tf-acc-test-s3tags") + templateBody := fmt.Sprintf(`{ + "Resources": { + "TfTestBucket": { + "Type": "AWS::S3::Bucket", + "Properties": { + "BucketName": "%s" + } + } + } +}`, n) + + requestToken := resource.UniqueId() + req := &cloudformation.CreateStackInput{ + StackName: aws.String(stackName), + TemplateBody: aws.String(templateBody), + ClientRequestToken: aws.String(requestToken), + } + + log.Printf("[DEBUG] Creating CloudFormation stack: %s", req) + resp, err := conn.CreateStack(req) + if err != nil { + return fmt.Errorf("error creating CloudFormation stack: %w", err) + } + + stack, err := tfcloudformation.WaitStackCreated(conn, aws.StringValue(resp.StackId), requestToken, 10*time.Minute) + if err != nil { + return fmt.Errorf("Error waiting for CloudFormation stack creation: %w", err) + } + status := aws.StringValue(stack.StackStatus) + if status != cloudformation.StackStatusCreateComplete { + return fmt.Errorf("Invalid CloudFormation stack creation status: %s", status) + } + + *stackID = aws.StringValue(resp.StackId) + return nil + } +} + +func testAccCheckBucketTagKeys(n string, keys ...string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs := s.RootModule().Resources[n] + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn + + got, err := tfs3.BucketListTags(conn, rs.Primary.Attributes["bucket"]) + if err != nil { + return err + } + + for _, want := range keys { + ok := false + for _, key := range got.Keys() { + if want == key { + ok = true + break + } + } + if !ok { + return fmt.Errorf("Key %s not found in bucket's tag set", want) + } + } + + return nil + } +} + +func testAccCheckS3BucketDomainName(resourceName string, attributeName string, bucketName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + expectedValue := acctest.Provider.Meta().(*conns.AWSClient).PartitionHostname(fmt.Sprintf("%s.s3", bucketName)) + + return resource.TestCheckResourceAttr(resourceName, attributeName, expectedValue)(s) + } +} + +func testAccBucketRegionalDomainName(bucket, region string) string { + regionalEndpoint, err := tfs3.BucketRegionalDomainName(bucket, region) + if err != nil { + return fmt.Sprintf("Regional endpoint not found for bucket %s", bucket) + } + return regionalEndpoint +} + +func testAccCheckS3BucketWebsiteEndpoint(resourceName string, attributeName string, bucketName string, region string) resource.TestCheckFunc { + return func(s *terraform.State) error { + website := tfs3.WebsiteEndpoint(acctest.Provider.Meta().(*conns.AWSClient), bucketName, region) + expectedValue := website.Endpoint + + return resource.TestCheckResourceAttr(resourceName, attributeName, expectedValue)(s) + } +} + +func testAccCheckBucketUpdateTags(n string, oldTags, newTags map[string]string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs := s.RootModule().Resources[n] + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn + + return tfs3.BucketUpdateTags(conn, rs.Primary.Attributes["bucket"], oldTags, newTags) + } +} + +func testAccCheckBucketCheckTags(n string, expectedTags map[string]string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs := s.RootModule().Resources[n] + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn + + got, err := tfs3.BucketListTags(conn, rs.Primary.Attributes["bucket"]) + if err != nil { + return err + } + + want := tftags.New(expectedTags) + if !reflect.DeepEqual(want, got) { + return fmt.Errorf("Incorrect tags, want: %v got: %v", want, got) + } + + return nil + } +} + +func testAccBucketConfig_Basic(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} +`, bucketName) +} + +func testAccBucketConfig_withAcceleration(bucketName, acceleration string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acceleration_status = %[2]q +} +`, bucketName, acceleration) +} + +func testAccBucketConfig_withACL(bucketName, acl string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = %[2]q +} +`, bucketName, acl) +} + +func testAccBucketConfig_withCORS(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + cors_rule { + allowed_headers = ["*"] + allowed_methods = ["PUT", "POST"] + allowed_origins = ["https://www.example.com"] + expose_headers = ["x-amz-server-side-encryption", "ETag"] + max_age_seconds = 3000 + } +} +`, bucketName) +} + +func testAccBucketConfig_withCORSSingleMethodAndEmptyOrigin(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + cors_rule { + allowed_methods = ["GET"] + allowed_origins = [""] + } +} +`, bucketName) +} + +func testAccBucketConfig_withCORSEmptyOrigin(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + cors_rule { + allowed_headers = ["*"] + allowed_methods = ["PUT", "POST"] + allowed_origins = [""] + expose_headers = ["x-amz-server-side-encryption", "ETag"] + max_age_seconds = 3000 + } +} +`, bucketName) +} + +func testAccBucketConfig_withDefaultEncryption_defaultKey(bucketName, sseAlgorithm string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + server_side_encryption_configuration { + rule { + apply_server_side_encryption_by_default { + sse_algorithm = %[2]q + } + } + } +} +`, bucketName, sseAlgorithm) +} + +func testAccBucketConfig_withDefaultEncryption_KmsMasterKey(bucketName string) string { + return fmt.Sprintf(` +resource "aws_kms_key" "test" { + description = "KMS Key for Bucket %[1]s" + deletion_window_in_days = 10 +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + server_side_encryption_configuration { + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.test.arn + sse_algorithm = "aws:kms" + } + } + } +} +`, bucketName) +} + +func testAccBucketConfig_withDefaultEncryptionAndBucketKeyEnabled_KmsMasterKey(bucketName string) string { + return fmt.Sprintf(` +resource "aws_kms_key" "test" { + description = "KMS Key for Bucket %[1]s" + deletion_window_in_days = 7 +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + server_side_encryption_configuration { + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.test.arn + sse_algorithm = "aws:kms" + } + bucket_key_enabled = true + } + } +} +`, bucketName) +} + +func testAccBucketConfig_withGrants(bucketName string) string { + return fmt.Sprintf(` +data "aws_canonical_user_id" "current" {} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + grant { + id = data.aws_canonical_user_id.current.id + type = "CanonicalUser" + permissions = ["FULL_CONTROL", "WRITE"] + } +} +`, bucketName) +} + +func testAccBucketConfig_withUpdatedGrants(bucketName string) string { + return fmt.Sprintf(` +data "aws_canonical_user_id" "current" {} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + grant { + id = data.aws_canonical_user_id.current.id + type = "CanonicalUser" + permissions = ["READ"] + } + + grant { + type = "Group" + permissions = ["READ_ACP"] + uri = "http://acs.amazonaws.com/groups/s3/LogDelivery" + } +} +`, bucketName) +} + +func testAccBucketConfig_withLifecycle(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "private" + + lifecycle_rule { + id = "id1" + prefix = "path1/" + enabled = true + + expiration { + days = 365 + } + + transition { + days = 30 + storage_class = "STANDARD_IA" + } + + transition { + days = 60 + storage_class = "INTELLIGENT_TIERING" + } + + transition { + days = 90 + storage_class = "ONEZONE_IA" + } + + transition { + days = 120 + storage_class = "GLACIER" + } + + transition { + days = 210 + storage_class = "DEEP_ARCHIVE" + } + } + + lifecycle_rule { + id = "id2" + prefix = "path2/" + enabled = true + + expiration { + date = "2016-01-12" + } + } + + lifecycle_rule { + id = "id3" + prefix = "path3/" + enabled = true + + transition { + days = 0 + storage_class = "GLACIER" + } + } + + lifecycle_rule { + id = "id4" + prefix = "path4/" + enabled = true + + tags = { + "tagKey" = "tagValue" + "terraform" = "hashicorp" + } + + expiration { + date = "2016-01-12" + } + } + + lifecycle_rule { + id = "id5" + enabled = true + + tags = { + "tagKey" = "tagValue" + "terraform" = "hashicorp" + } + + transition { + days = 0 + storage_class = "GLACIER" + } + } + + lifecycle_rule { + id = "id6" + enabled = true + + tags = { + "tagKey" = "tagValue" + } + + transition { + days = 0 + storage_class = "GLACIER" + } + } +} +`, bucketName) } -func testAccCheckBucketDestroy(s *terraform.State) error { - return testAccCheckBucketDestroyWithProvider(s, acctest.Provider) +func testAccBucketConfig_withLifecycleExpireMarker(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "private" + + lifecycle_rule { + id = "id1" + prefix = "path1/" + enabled = true + + expiration { + expired_object_delete_marker = "true" + } + } +} +`, bucketName) } -func testAccCheckBucketDestroyWithProvider(s *terraform.State, provider *schema.Provider) error { - conn := provider.Meta().(*conns.AWSClient).S3Conn +func testAccBucketConfig_withLifecycleRuleExpirationEmptyConfigurationBlock(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_s3_bucket" { - continue - } + lifecycle_rule { + enabled = true + id = "id1" - input := &s3.HeadBucketInput{ - Bucket: aws.String(rs.Primary.ID), - } + expiration {} + } +} +`, rName) +} - // Retry for S3 eventual consistency - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - _, err := conn.HeadBucket(input) +func testAccBucketConfig_withLifecycleRuleAbortIncompleteMultipartUploadDays(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q - if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) || tfawserr.ErrCodeEquals(err, "NotFound") { - return nil - } + lifecycle_rule { + abort_incomplete_multipart_upload_days = 7 + enabled = true + id = "id1" + } +} +`, rName) +} - if err != nil { - return resource.NonRetryableError(err) - } +func testAccBucketConfig_withLogging(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "log_bucket" { + bucket = "%[1]s-log" + acl = "log-delivery-write" +} - return resource.RetryableError(fmt.Errorf("AWS S3 Bucket still exists: %s", rs.Primary.ID)) - }) +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "private" - if tfresource.TimedOut(err) { - _, err = conn.HeadBucket(input) - } + logging { + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" + } +} +`, bucketName) +} - if err != nil { - return err - } - } - return nil +func testAccBucketConfig_ReplicationBase(bucketName string) string { + return acctest.ConfigCompose( + acctest.ConfigMultipleRegionProvider(2), + fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_iam_role" "role" { + name = %[1]q + + assume_role_policy = < **NOTE on S3 Bucket Replication Configuration:** S3 Bucket Replication can be configured in either the standalone resource [`aws_s3_bucket_replicaton_configuration`](s3_bucket_replication_configuration.html) +or with the deprecated parameter `replication_configuration` in the resource `aws_s3_bucket`. +Configuring with both will cause inconsistencies and may overwrite configuration. + ~> **NOTE on S3 Bucket Server Side Encryption Configuration:** S3 Bucket Server Side Encryption can be configured in either the standalone resource [`aws_s3_bucket_server_side_encryption_configuration`](s3_bucket_server_side_encryption_configuration.html) or with the deprecated parameter `server_side_encryption_configuration` in the resource `aws_s3_bucket`. Configuring with both will cause inconsistencies and may overwrite configuration. @@ -240,8 +244,131 @@ To **enable** Object Lock on an **existing** bucket, please contact AWS Support ### Using replication configuration -The `replication_configuration` argument is read-only as of version 4.0 of the Terraform AWS Provider. -See the [`aws_s3_bucket_replication_configuration` resource](s3_bucket_replication_configuration.html.markdown) for configuration details. +-> **NOTE:** The parameter `replication_configuration` is deprecated. +Use the resource [`aws_s3_bucket_replication_configuration`](s3_bucket_replication_configuration.html) instead. + +```terraform +provider "aws" { + region = "eu-west-1" +} + +provider "aws" { + alias = "central" + region = "eu-central-1" +} + +resource "aws_iam_role" "replication" { + name = "tf-iam-role-replication-12345" + + assume_role_policy = < **NOTE:** Currently, changes to the `replication_configuration` configuration of _existing_ resources cannot be automatically detected by Terraform. To manage replication configuration changes to an S3 bucket, use the `aws_s3_bucket_replication_configuration` resource instead. If you use `replication_configuration` on an `aws_s3_bucket`, Terraform will assume management over the full replication configuration for the S3 bucket, treating additional replication configuration rules as drift. For this reason, `replication_configuration` cannot be mixed with the external `aws_s3_bucket_replication_configuration` resource for a given S3 bucket. + +The `replication_configuration` configuration block supports the following arguments: + +* `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. +* `rules` - (Required) Specifies the rules managing the replication ([documented below](#rules)). + +#### Rules + +The `rules` configuration block supports the following arguments: + +~> **NOTE:** Amazon S3's latest version of the replication configuration is V2, which includes the `filter` attribute for replication rules. +With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. +Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. + +* `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `Enabled`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). +* `destination` - (Required) Specifies the destination for the rule ([documented below](#destination)). +* `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies ([documented below](#filter)). +* `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. +* `prefix` - (Optional, Conflicts with `filter`) Object keyname prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. +* `priority` - (Optional) The priority associated with the rule. Priority should only be set if `filter` is configured. If not provided, defaults to `0`. Priority must be unique between multiple rules. +* `source_selection_criteria` - (Optional) Specifies special object selection criteria ([documented below](#source-selection-criteria)). +* `status` - (Required) The status of the rule. Either `Enabled` or `Disabled`. The rule is ignored if status is not Enabled. + +#### Filter + +The `filter` configuration block supports the following arguments: + +* `prefix` - (Optional) Object keyname prefix that identifies subset of objects to which the rule applies. Must be less than or equal to 1024 characters in length. +* `tags` - (Optional) A map of tags that identifies subset of objects to which the rule applies. + The rule applies only to objects having all the tags in its tagset. + +#### Destination + +~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. + +The `destination` configuration block supports the following arguments: + +* `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. +* `storage_class` - (Optional) The [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Destination.html#AmazonS3-Type-Destination-StorageClass) used to store the object. By default, Amazon S3 uses the storage class of the source object to create the object replica. +* `replica_kms_key_id` - (Optional) Destination KMS encryption key ARN for SSE-KMS replication. Must be used in conjunction with + `sse_kms_encrypted_objects` source selection criteria. +* `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Must be used in conjunction with `account_id` owner override configuration. +* `account_id` - (Optional) The Account ID to use for overriding the object owner on replication. Must be used in conjunction with `access_control_translation` override configuration. +* `replication_time` - (Optional) Enables S3 Replication Time Control (S3 RTC) ([documented below](#replication-time)). +* `metrics` - (Optional) Enables replication metrics (required for S3 RTC) ([documented below](#metrics)). + +#### Replication Time + +The `replication_time` configuration block supports the following arguments: + +* `status` - (Optional) The status of RTC. Either `Enabled` or `Disabled`. +* `minutes` - (Optional) Threshold within which objects are to be replicated. The only valid value is `15`. + +#### Metrics + +The `metrics` configuration block supports the following arguments: + +* `status` - (Optional) The status of replication metrics. Either `Enabled` or `Disabled`. +* `minutes` - (Optional) Threshold within which objects are to be replicated. The only valid value is `15`. + +#### Source Selection Criteria + +The `source_selection_criteria` configuration block supports the following argument: + +* `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects ([documented below](#sse-kms-encrypted-objects)). If specified, `replica_kms_key_id` + in `destination` must be specified as well. + +#### SSE KMS Encrypted Objects + +The `sse_kms_encrypted_objects` configuration block supports the following argument: + +* `enabled` - (Required) Boolean which indicates if this criteria is enabled. + ### Server Side Encryption Configuration ~> **NOTE:** Currently, changes to the `server_side_encryption_configuration` configuration of _existing_ resources cannot be automatically detected by Terraform. To manage changes in encryption of an S3 bucket, use the `aws_s3_bucket_server_side_encryption_configuration` resource instead. If you use `server_side_encryption_configuration` on an `aws_s3_bucket`, Terraform will assume management over the encryption configuration for the S3 bucket, treating additional encryption changes as drift. For this reason, `server_side_encryption_configuration` cannot be mixed with the external `aws_s3_bucket_server_side_encryption_configuration` resource for a given S3 bucket. @@ -465,33 +670,6 @@ In addition to all arguments above, the following attributes are exported: * `years` - The number of years specified for the default retention period. * `policy` - The [bucket policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html) JSON document. * `region` - The AWS region this bucket resides in. -* `replication_configuration` - The [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html). - * `role` - The ARN of the IAM role for Amazon S3 assumed when replicating the objects. - * `rules` - The rules managing the replication. - * `delete_marker_replication_status` - Whether delete markers are replicated. - * `destination` - The destination for the rule. - * `access_control_translation` - The overrides to use for object owners on replication. - * `owner` - The override value for the owner on replicated objects. - * `account_id` - The Account ID to use for overriding the object owner on replication. - * `bucket` - The ARN of the S3 bucket where Amazon S3 stores replicas of the object identified by the rule. - * `metrics` - Replication metrics. - * `status` - The status of replication metrics. - * `minutes` - Threshold within which objects are replicated. - * `storage_class` - The [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Destination.html#AmazonS3-Type-Destination-StorageClass) used to store the object. - * `replica_kms_key_id` - Destination KMS encryption key ARN for SSE-KMS replication. - * `replication_time` - S3 Replication Time Control (S3 RTC). - * `status` - The status of RTC. - * `minutes` - Threshold within which objects are to be replicated. - * `filter` - Filter that identifies subset of objects to which the replication rule applies. - * `prefix` - Object keyname prefix that identifies subset of objects to which the rule applies. - * `tags` - Map of tags that identifies subset of objects to which the rule applies. - * `id` - Unique identifier for the rule. - * `prefix` - Object keyname prefix identifying one or more objects to which the rule applies - * `priority` - The priority associated with the rule. - * `source_selection_criteria` - The special object selection criteria. - * `sse_kms_encrypted_objects` - Matched SSE-KMS encrypted objects. - * `enabled` - Whether this criteria is enabled. - * `status` - The status of the rule. * `request_payer` - Either `BucketOwner` or `Requester` that pays for the download and request fees. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). * `website_endpoint` - The website endpoint, if the bucket is configured with a website. If not, this will be an empty string.