From f6afc22d524f617b036fe27a454feccbaba85e8f Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Tue, 22 Mar 2022 20:24:58 -0400 Subject: [PATCH 01/42] r/s3_bucket: make 'acl' and 'grant' configurable --- internal/service/s3/bucket.go | 953 ++++++++++-------- internal/service/s3/bucket_acl_test.go | 238 +++++ .../s3/bucket_public_access_block_test.go | 2 +- internal/service/s3/bucket_test.go | 456 ++++++--- internal/service/s3/object_copy.go | 25 + website/docs/r/s3_bucket.html.markdown | 51 +- 6 files changed, 1146 insertions(+), 579 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 6730d0b37a9..837e6c5eb2e 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -1,7 +1,6 @@ package s3 import ( - "bytes" "context" "encoding/json" "fmt" @@ -24,7 +23,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -75,38 +73,48 @@ func ResourceBucket() *schema.Resource { }, "acl": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_acl resource instead", + Type: schema.TypeString, + Optional: true, + Computed: true, + ConflictsWith: []string{"grant"}, + ValidateFunc: validation.StringInSlice(BucketCannedACL_Values(), false), + Deprecated: "Use the aws_s3_bucket_acl resource instead", }, "grant": { - Type: schema.TypeSet, - Computed: true, - Deprecated: "Use the aws_s3_bucket_acl resource instead", + Type: schema.TypeSet, + Optional: true, + Computed: true, + ConflictsWith: []string{"acl"}, + Deprecated: "Use the aws_s3_bucket_acl resource instead", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "id": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_acl resource instead", + Type: schema.TypeString, + Optional: true, }, "type": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_acl resource instead", + Type: schema.TypeString, + Required: true, + // TypeAmazonCustomerByEmail is not currently supported + ValidateFunc: validation.StringInSlice([]string{ + s3.TypeCanonicalUser, + s3.TypeGroup, + }, false), }, "uri": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_acl resource instead", + Type: schema.TypeString, + Optional: true, }, "permissions": { - Type: schema.TypeSet, - Computed: true, - Deprecated: "Use the aws_s3_bucket_acl resource instead", - Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeSet, + Required: true, + Set: schema.HashString, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice(s3.Permission_Values(), false), + }, }, }, }, @@ -674,7 +682,6 @@ func resourceBucketCreate(d *schema.ResourceData, meta interface{}) error { } else { bucket = resource.UniqueId() } - d.Set("bucket", bucket) log.Printf("[DEBUG] S3 bucket create: %s", bucket) @@ -683,6 +690,16 @@ func resourceBucketCreate(d *schema.ResourceData, meta interface{}) error { ObjectLockEnabledForBucket: aws.Bool(d.Get("object_lock_enabled").(bool)), } + if acl, ok := d.GetOk("acl"); ok { + acl := acl.(string) + req.ACL = aws.String(acl) + log.Printf("[DEBUG] S3 bucket %s has canned ACL %s", bucket, acl) + } else { + // Use default value previously available in v3.x of the provider + req.ACL = aws.String(s3.BucketCannedACLPrivate) + log.Printf("[DEBUG] S3 bucket %s has default canned ACL %s", bucket, s3.BucketCannedACLPrivate) + } + awsRegion := meta.(*conns.AWSClient).Region log.Printf("[DEBUG] S3 bucket create: %s, using region: %s", bucket, awsRegion) @@ -695,7 +712,7 @@ func resourceBucketCreate(d *schema.ResourceData, meta interface{}) error { } if err := ValidBucketName(bucket, awsRegion); err != nil { - return fmt.Errorf("Error validating S3 bucket name: %s", err) + return fmt.Errorf("error validating S3 Bucket (%s) name: %w", bucket, err) } // S3 Object Lock can only be enabled on bucket creation. @@ -708,7 +725,7 @@ func resourceBucketCreate(d *schema.ResourceData, meta interface{}) error { _, err := conn.CreateBucket(req) if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() == ErrCodeOperationAborted { - return resource.RetryableError(fmt.Errorf("Error creating S3 bucket %s, retrying: %w", bucket, err)) + return resource.RetryableError(fmt.Errorf("error creating S3 Bucket (%s), retrying: %w", bucket, err)) } } if err != nil { @@ -721,7 +738,7 @@ func resourceBucketCreate(d *schema.ResourceData, meta interface{}) error { _, err = conn.CreateBucket(req) } if err != nil { - return fmt.Errorf("Error creating S3 bucket: %s", err) + return fmt.Errorf("error creating S3 Bucket (%s): %w", bucket, err) } // Assign the bucket name as the resource ID @@ -745,9 +762,21 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } } + if d.HasChange("acl") && !d.IsNewResource() { + if err := resourceBucketInternalACLUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) ACL: %w", d.Id(), err) + } + } + + if d.HasChange("grant") { + if err := resourceBucketInternalGrantsUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Grants: %w", d.Id(), err) + } + } + if d.HasChange("object_lock_configuration") { if err := resourceBucketInternalObjectLockConfigurationUpdate(conn, d); err != nil { - return err + return fmt.Errorf("error updating S3 Bucket (%s) Object Lock configuration: %w", d.Id(), err) } } @@ -801,10 +830,7 @@ func resourceBucketRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error reading S3 Bucket (%s): %w", d.Id(), err) } - // In the import case, we won't have this - if _, ok := d.GetOk("bucket"); !ok { - d.Set("bucket", d.Id()) - } + d.Set("bucket", d.Id()) d.Set("bucket_domain_name", meta.(*conns.AWSClient).PartitionHostname(fmt.Sprintf("%s.s3", d.Get("bucket").(string)))) @@ -834,44 +860,33 @@ func resourceBucketRead(d *schema.ResourceData, meta interface{}) error { d.Set("policy", nil) } - // Read the Grant ACL if configured outside this resource; + // Read the Grant ACL. // In the event grants are not configured on the bucket, the API returns an empty array - - // Reset `grant` if `acl` (canned ACL) is set. - if acl, ok := d.GetOk("acl"); ok && acl.(string) != s3.BucketCannedACLPrivate { - if err := d.Set("grant", nil); err != nil { - return fmt.Errorf("error resetting grant %w", err) - } - } else { - // Set the ACL to its default i.e. "private" (to mimic pre-v4.0 schema) - d.Set("acl", s3.BucketCannedACLPrivate) - - apResponse, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { - return conn.GetBucketAcl(&s3.GetBucketAclInput{ - Bucket: aws.String(d.Id()), - }) + apResponse, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.GetBucketAcl(&s3.GetBucketAclInput{ + Bucket: aws.String(d.Id()), }) + }) - // The S3 API method calls above can occasionally return no error (i.e. NoSuchBucket) - // after a bucket has been deleted (eventual consistency woes :/), thus, when making extra S3 API calls - // such as GetBucketAcl, the error should be caught for non-new buckets as follows. - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { - log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) - d.SetId("") - return nil - } + // The S3 API method calls above can occasionally return no error (i.e. NoSuchBucket) + // after a bucket has been deleted (eventual consistency woes :/), thus, when making extra S3 API calls + // such as GetBucketAcl, the error should be caught for non-new buckets as follows. + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } - if err != nil { - return fmt.Errorf("error getting S3 Bucket (%s) ACL: %w", d.Id(), err) - } + if err != nil { + return fmt.Errorf("error getting S3 Bucket (%s) ACL: %w", d.Id(), err) + } - if aclOutput, ok := apResponse.(*s3.GetBucketAclOutput); ok { - if err := d.Set("grant", flattenGrants(aclOutput)); err != nil { - return fmt.Errorf("error setting grant %s", err) - } - } else { - d.Set("grant", nil) + if aclOutput, ok := apResponse.(*s3.GetBucketAclOutput); ok { + if err := d.Set("grant", flattenGrants(aclOutput)); err != nil { + return fmt.Errorf("error setting grant %s", err) } + } else { + d.Set("grant", nil) } // Read the CORS @@ -1336,36 +1351,6 @@ func resourceBucketDelete(d *schema.ResourceData, meta interface{}) error { return nil } -func websiteEndpoint(client *conns.AWSClient, d *schema.ResourceData) (*S3Website, error) { - // If the bucket doesn't have a website configuration, return an empty - // endpoint - if _, ok := d.GetOk("website"); !ok { - return nil, nil - } - - bucket := d.Get("bucket").(string) - - // Lookup the region for this bucket - - locationResponse, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { - return client.S3Conn.GetBucketLocation( - &s3.GetBucketLocationInput{ - Bucket: aws.String(bucket), - }, - ) - }) - if err != nil { - return nil, err - } - location := locationResponse.(*s3.GetBucketLocationOutput) - var region string - if location.LocationConstraint != nil { - region = aws.StringValue(location.LocationConstraint) - } - - return WebsiteEndpoint(client, bucket, region), nil -} - // https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region func BucketRegionalDomainName(bucket string, region string) (string, error) { // Return a default AWS Commercial domain name if no region is provided @@ -1380,6 +1365,44 @@ func BucketRegionalDomainName(bucket string, region string) (string, error) { return fmt.Sprintf("%s.%s", bucket, strings.TrimPrefix(endpoint.URL, "https://")), nil } +// ValidBucketName validates any S3 bucket name that is not inside the us-east-1 region. +// Buckets outside of this region have to be DNS-compliant. After the same restrictions are +// applied to buckets in the us-east-1 region, this function can be refactored as a SchemaValidateFunc +func ValidBucketName(value string, region string) error { + if region != endpoints.UsEast1RegionID { + if (len(value) < 3) || (len(value) > 63) { + return fmt.Errorf("%q must contain from 3 to 63 characters", value) + } + if !regexp.MustCompile(`^[0-9a-z-.]+$`).MatchString(value) { + return fmt.Errorf("only lowercase alphanumeric characters and hyphens allowed in %q", value) + } + if regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`).MatchString(value) { + return fmt.Errorf("%q must not be formatted as an IP address", value) + } + if strings.HasPrefix(value, `.`) { + return fmt.Errorf("%q cannot start with a period", value) + } + if strings.HasSuffix(value, `.`) { + return fmt.Errorf("%q cannot end with a period", value) + } + if strings.Contains(value, `..`) { + return fmt.Errorf("%q can be only one period between labels", value) + } + } else { + if len(value) > 255 { + return fmt.Errorf("%q must contain less than 256 characters", value) + } + if !regexp.MustCompile(`^[0-9a-zA-Z-._]+$`).MatchString(value) { + return fmt.Errorf("only alphanumeric characters, hyphens, periods, and underscores allowed in %q", value) + } + } + return nil +} + +type S3Website struct { + Endpoint, Domain string +} + func WebsiteEndpoint(client *conns.AWSClient, bucket string, region string) *S3Website { domain := WebsiteDomainUrl(client, region) return &S3Website{Endpoint: fmt.Sprintf("%s.%s", bucket, domain), Domain: domain} @@ -1397,6 +1420,36 @@ func WebsiteDomainUrl(client *conns.AWSClient, region string) string { return client.RegionalHostname("s3-website") } +func websiteEndpoint(client *conns.AWSClient, d *schema.ResourceData) (*S3Website, error) { + // If the bucket doesn't have a website configuration, return an empty + // endpoint + if _, ok := d.GetOk("website"); !ok { + return nil, nil + } + + bucket := d.Get("bucket").(string) + + // Lookup the region for this bucket + + locationResponse, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return client.S3Conn.GetBucketLocation( + &s3.GetBucketLocationInput{ + Bucket: aws.String(bucket), + }, + ) + }) + if err != nil { + return nil, err + } + location := locationResponse.(*s3.GetBucketLocationOutput) + var region string + if location.LocationConstraint != nil { + region = aws.StringValue(location.LocationConstraint) + } + + return WebsiteEndpoint(client, bucket, region), nil +} + func isOldRegion(region string) bool { oldRegions := []string{ endpoints.ApNortheast1RegionID, @@ -1417,90 +1470,286 @@ func isOldRegion(region string) bool { return false } -func resourceBucketInternalObjectLockConfigurationUpdate(conn *s3.S3, d *schema.ResourceData) error { - // S3 Object Lock configuration cannot be deleted, only updated. - req := &s3.PutObjectLockConfigurationInput{ - Bucket: aws.String(d.Get("bucket").(string)), - ObjectLockConfiguration: expandS3ObjectLockConfiguration(d.Get("object_lock_configuration").([]interface{})), +func normalizeRegion(region string) string { + // Default to us-east-1 if the bucket doesn't have a region: + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html + if region == "" { + region = endpoints.UsEast1RegionID + } + + return region +} + +////////////////////////////////////////// Argument-Specific Update Functions ////////////////////////////////////////// + +func resourceBucketInternalACLUpdate(conn *s3.S3, d *schema.ResourceData) error { + acl := d.Get("acl").(string) + if acl == "" { + // Use default value previously available in v3.x of the provider + acl = s3.BucketCannedACLPrivate + } + + input := &s3.PutBucketAclInput{ + Bucket: aws.String(d.Id()), + ACL: aws.String(acl), } _, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { - return conn.PutObjectLockConfiguration(req) + return conn.PutBucketAcl(input) }) - if err != nil { - return fmt.Errorf("error putting S3 object lock configuration: %s", err) - } - return nil + return err } -func flattenBucketLifecycleRuleExpiration(expiration *s3.LifecycleExpiration) []interface{} { - if expiration == nil { - return []interface{}{} +func resourceBucketInternalGrantsUpdate(conn *s3.S3, d *schema.ResourceData) error { + grants := d.Get("grant").(*schema.Set) + + if grants.Len() == 0 { + log.Printf("[DEBUG] S3 bucket: %s, Grants fallback to canned ACL", d.Id()) + + if err := resourceBucketInternalACLUpdate(conn, d); err != nil { + return fmt.Errorf("error fallback to canned ACL, %s", err) + } + + return nil } - m := make(map[string]interface{}) + resp, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.GetBucketAcl(&s3.GetBucketAclInput{ + Bucket: aws.String(d.Id()), + }) + }) - if expiration.Date != nil { - m["date"] = (aws.TimeValue(expiration.Date)).Format("2006-01-02") + if err != nil { + return fmt.Errorf("error getting S3 Bucket (%s) ACL: %s", d.Id(), err) } - if expiration.Days != nil { - m["days"] = int(aws.Int64Value(expiration.Days)) + + output := resp.(*s3.GetBucketAclOutput) + + if output == nil { + return fmt.Errorf("error getting S3 Bucket (%s) ACL: empty output", d.Id()) } - if expiration.ExpiredObjectDeleteMarker != nil { - m["expired_object_delete_marker"] = aws.BoolValue(expiration.ExpiredObjectDeleteMarker) + + input := &s3.PutBucketAclInput{ + Bucket: aws.String(d.Id()), + AccessControlPolicy: &s3.AccessControlPolicy{ + Grants: expandGrants(grants.List()), + Owner: output.Owner, + }, } - return []interface{}{m} + _, err = verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.PutBucketAcl(input) + }) + + return err } -func flattenBucketLifecycleRules(lifecycleRules []*s3.LifecycleRule) []interface{} { - if len(lifecycleRules) == 0 { - return []interface{}{} +func resourceBucketInternalObjectLockConfigurationUpdate(conn *s3.S3, d *schema.ResourceData) error { + // S3 Object Lock configuration cannot be deleted, only updated. + req := &s3.PutObjectLockConfigurationInput{ + Bucket: aws.String(d.Id()), + ObjectLockConfiguration: expandS3ObjectLockConfiguration(d.Get("object_lock_configuration").([]interface{})), } + _, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.PutObjectLockConfiguration(req) + }) + + return err +} + +///////////////////////////////////////////// Expand and Flatten functions ///////////////////////////////////////////// + +// Cors Rule functions + +func flattenBucketCorsRules(rules []*s3.CORSRule) []interface{} { var results []interface{} - for _, lifecycleRule := range lifecycleRules { - if lifecycleRule == nil { + for _, rule := range rules { + if rule == nil { continue } - rule := make(map[string]interface{}) + m := make(map[string]interface{}) - // AbortIncompleteMultipartUploadDays - if lifecycleRule.AbortIncompleteMultipartUpload != nil { - if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil { - rule["abort_incomplete_multipart_upload_days"] = int(aws.Int64Value(lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation)) - } + if len(rule.AllowedHeaders) > 0 { + m["allowed_headers"] = flex.FlattenStringList(rule.AllowedHeaders) } - // ID - if lifecycleRule.ID != nil { - rule["id"] = aws.StringValue(lifecycleRule.ID) + if len(rule.AllowedMethods) > 0 { + m["allowed_methods"] = flex.FlattenStringList(rule.AllowedMethods) } - // Filter - if filter := lifecycleRule.Filter; filter != nil { - if filter.And != nil { - // Prefix - if filter.And.Prefix != nil { - rule["prefix"] = aws.StringValue(filter.And.Prefix) - } - // Tag - if len(filter.And.Tags) > 0 { - rule["tags"] = KeyValueTags(filter.And.Tags).IgnoreAWS().Map() - } - } else { - // Prefix - if filter.Prefix != nil { - rule["prefix"] = aws.StringValue(filter.Prefix) - } - // Tag - if filter.Tag != nil { - rule["tags"] = KeyValueTags([]*s3.Tag{filter.Tag}).IgnoreAWS().Map() - } - } + if len(rule.AllowedOrigins) > 0 { + m["allowed_origins"] = flex.FlattenStringList(rule.AllowedOrigins) + } + + if len(rule.ExposeHeaders) > 0 { + m["expose_headers"] = flex.FlattenStringList(rule.ExposeHeaders) + } + + if rule.MaxAgeSeconds != nil { + m["max_age_seconds"] = int(aws.Int64Value(rule.MaxAgeSeconds)) + } + + results = append(results, m) + } + + return results +} + +// Grants functions + +func expandGrants(l []interface{}) []*s3.Grant { + var grants []*s3.Grant + + for _, tfMapRaw := range l { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } + + if v, ok := tfMap["permissions"].(*schema.Set); ok { + for _, rawPermission := range v.List() { + permission, ok := rawPermission.(string) + if !ok { + continue + } + + grantee := &s3.Grantee{} + + if v, ok := tfMap["id"].(string); ok && v != "" { + grantee.SetID(v) + } + + if v, ok := tfMap["type"].(string); ok && v != "" { + grantee.SetType(v) + } + + if v, ok := tfMap["uri"].(string); ok && v != "" { + grantee.SetURI(v) + } + + g := &s3.Grant{ + Grantee: grantee, + Permission: aws.String(permission), + } + + grants = append(grants, g) + } + } + } + return grants +} + +func flattenGrants(ap *s3.GetBucketAclOutput) []interface{} { + if len(ap.Grants) == 0 { + return []interface{}{} + } + + getGrant := func(grants []interface{}, grantee map[string]interface{}) (interface{}, bool) { + for _, pg := range grants { + pgt := pg.(map[string]interface{}) + if pgt["type"] == grantee["type"] && pgt["id"] == grantee["id"] && pgt["uri"] == grantee["uri"] && + pgt["permissions"].(*schema.Set).Len() > 0 { + return pg, true + } + } + return nil, false + } + + grants := make([]interface{}, 0, len(ap.Grants)) + for _, granteeObject := range ap.Grants { + grantee := make(map[string]interface{}) + grantee["type"] = aws.StringValue(granteeObject.Grantee.Type) + + if granteeObject.Grantee.ID != nil { + grantee["id"] = aws.StringValue(granteeObject.Grantee.ID) + } + if granteeObject.Grantee.URI != nil { + grantee["uri"] = aws.StringValue(granteeObject.Grantee.URI) + } + if pg, ok := getGrant(grants, grantee); ok { + pg.(map[string]interface{})["permissions"].(*schema.Set).Add(aws.StringValue(granteeObject.Permission)) + } else { + grantee["permissions"] = schema.NewSet(schema.HashString, []interface{}{aws.StringValue(granteeObject.Permission)}) + grants = append(grants, grantee) + } + } + + return grants +} + +// Lifecycle Rule functions + +func flattenBucketLifecycleRuleExpiration(expiration *s3.LifecycleExpiration) []interface{} { + if expiration == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if expiration.Date != nil { + m["date"] = (aws.TimeValue(expiration.Date)).Format("2006-01-02") + } + if expiration.Days != nil { + m["days"] = int(aws.Int64Value(expiration.Days)) + } + if expiration.ExpiredObjectDeleteMarker != nil { + m["expired_object_delete_marker"] = aws.BoolValue(expiration.ExpiredObjectDeleteMarker) + } + + return []interface{}{m} +} + +func flattenBucketLifecycleRules(lifecycleRules []*s3.LifecycleRule) []interface{} { + if len(lifecycleRules) == 0 { + return []interface{}{} + } + + var results []interface{} + + for _, lifecycleRule := range lifecycleRules { + if lifecycleRule == nil { + continue + } + + rule := make(map[string]interface{}) + + // AbortIncompleteMultipartUploadDays + if lifecycleRule.AbortIncompleteMultipartUpload != nil { + if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil { + rule["abort_incomplete_multipart_upload_days"] = int(aws.Int64Value(lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation)) + } + } + + // ID + if lifecycleRule.ID != nil { + rule["id"] = aws.StringValue(lifecycleRule.ID) + } + + // Filter + if filter := lifecycleRule.Filter; filter != nil { + if filter.And != nil { + // Prefix + if filter.And.Prefix != nil { + rule["prefix"] = aws.StringValue(filter.And.Prefix) + } + // Tag + if len(filter.And.Tags) > 0 { + rule["tags"] = KeyValueTags(filter.And.Tags).IgnoreAWS().Map() + } + } else { + // Prefix + if filter.Prefix != nil { + rule["prefix"] = aws.StringValue(filter.Prefix) + } + // Tag + if filter.Tag != nil { + rule["tags"] = KeyValueTags([]*s3.Tag{filter.Tag}).IgnoreAWS().Map() + } + } } // Prefix @@ -1597,6 +1846,8 @@ func flattenBucketLifecycleRuleTransitions(transitions []*s3.Transition) []inter return results } +// Logging functions + func flattenBucketLoggingEnabled(loggingEnabled *s3.LoggingEnabled) []interface{} { if loggingEnabled == nil { return []interface{}{} @@ -1614,137 +1865,52 @@ func flattenBucketLoggingEnabled(loggingEnabled *s3.LoggingEnabled) []interface{ return []interface{}{m} } -func flattenServerSideEncryptionConfiguration(c *s3.ServerSideEncryptionConfiguration) []interface{} { - if c == nil { - return []interface{}{} - } - - m := map[string]interface{}{ - "rule": flattenServerSideEncryptionConfigurationRules(c.Rules), - } - - return []interface{}{m} -} - -func flattenServerSideEncryptionConfigurationRules(rules []*s3.ServerSideEncryptionRule) []interface{} { - var results []interface{} - - for _, rule := range rules { - m := make(map[string]interface{}) - - if rule.BucketKeyEnabled != nil { - m["bucket_key_enabled"] = aws.BoolValue(rule.BucketKeyEnabled) - } - - if rule.ApplyServerSideEncryptionByDefault != nil { - m["apply_server_side_encryption_by_default"] = []interface{}{ - map[string]interface{}{ - "kms_master_key_id": aws.StringValue(rule.ApplyServerSideEncryptionByDefault.KMSMasterKeyID), - "sse_algorithm": aws.StringValue(rule.ApplyServerSideEncryptionByDefault.SSEAlgorithm), - }, - } - } +// Object Lock Configuration functions - results = append(results, m) +func expandS3ObjectLockConfiguration(vConf []interface{}) *s3.ObjectLockConfiguration { + if len(vConf) == 0 || vConf[0] == nil { + return nil } - return results -} - -func flattenBucketCorsRules(rules []*s3.CORSRule) []interface{} { - var results []interface{} - - for _, rule := range rules { - if rule == nil { - continue - } - - m := make(map[string]interface{}) - - if len(rule.AllowedHeaders) > 0 { - m["allowed_headers"] = flex.FlattenStringList(rule.AllowedHeaders) - } - - if len(rule.AllowedMethods) > 0 { - m["allowed_methods"] = flex.FlattenStringList(rule.AllowedMethods) - } - - if len(rule.AllowedOrigins) > 0 { - m["allowed_origins"] = flex.FlattenStringList(rule.AllowedOrigins) - } - - if len(rule.ExposeHeaders) > 0 { - m["expose_headers"] = flex.FlattenStringList(rule.ExposeHeaders) - } + mConf := vConf[0].(map[string]interface{}) - if rule.MaxAgeSeconds != nil { - m["max_age_seconds"] = int(aws.Int64Value(rule.MaxAgeSeconds)) - } + conf := &s3.ObjectLockConfiguration{} - results = append(results, m) + if vObjectLockEnabled, ok := mConf["object_lock_enabled"].(string); ok && vObjectLockEnabled != "" { + conf.ObjectLockEnabled = aws.String(vObjectLockEnabled) } - return results + return conf } -func flattenBucketWebsite(ws *s3.GetBucketWebsiteOutput) ([]interface{}, error) { - if ws == nil { - return []interface{}{}, nil - } - - m := make(map[string]interface{}) - - if v := ws.IndexDocument; v != nil { - m["index_document"] = aws.StringValue(v.Suffix) - } - - if v := ws.ErrorDocument; v != nil { - m["error_document"] = aws.StringValue(v.Key) +func flattenS3ObjectLockConfiguration(conf *s3.ObjectLockConfiguration) []interface{} { + if conf == nil { + return []interface{}{} } - if v := ws.RedirectAllRequestsTo; v != nil { - if v.Protocol == nil { - m["redirect_all_requests_to"] = aws.StringValue(v.HostName) - } else { - var host string - var path string - var query string - parsedHostName, err := url.Parse(aws.StringValue(v.HostName)) - if err == nil { - host = parsedHostName.Host - path = parsedHostName.Path - query = parsedHostName.RawQuery - } else { - host = aws.StringValue(v.HostName) - path = "" - } - - m["redirect_all_requests_to"] = (&url.URL{ - Host: host, - Path: path, - Scheme: aws.StringValue(v.Protocol), - RawQuery: query, - }).String() - } + mConf := map[string]interface{}{ + "object_lock_enabled": aws.StringValue(conf.ObjectLockEnabled), } - if v := ws.RoutingRules; v != nil { - rr, err := normalizeRoutingRules(v) - if err != nil { - return nil, fmt.Errorf("error while marshaling routing rules: %w", err) + if conf.Rule != nil && conf.Rule.DefaultRetention != nil { + mRule := map[string]interface{}{ + "default_retention": []interface{}{ + map[string]interface{}{ + "mode": aws.StringValue(conf.Rule.DefaultRetention.Mode), + "days": int(aws.Int64Value(conf.Rule.DefaultRetention.Days)), + "years": int(aws.Int64Value(conf.Rule.DefaultRetention.Years)), + }, + }, } - m["routing_rules"] = rr - } - // We have special handling for the website configuration, - // so only return the configuration if there is any - if len(m) == 0 { - return []interface{}{}, nil + mConf["rule"] = []interface{}{mRule} } - return []interface{}{m}, nil + return []interface{}{mConf} } +// Replication Configuration functions + func flattenBucketReplicationConfiguration(r *s3.ReplicationConfiguration) []interface{} { if r == nil { return []interface{}{} @@ -1919,138 +2085,43 @@ func flattenBucketReplicationConfigurationReplicationRules(rules []*s3.Replicati return results } -func normalizeRoutingRules(w []*s3.RoutingRule) (string, error) { - withNulls, err := json.Marshal(w) - if err != nil { - return "", err - } - - var rules []map[string]interface{} - if err := json.Unmarshal(withNulls, &rules); err != nil { - return "", err - } +// Server Side Encryption Configuration functions - var cleanRules []map[string]interface{} - for _, rule := range rules { - cleanRules = append(cleanRules, removeNil(rule)) +func flattenServerSideEncryptionConfiguration(c *s3.ServerSideEncryptionConfiguration) []interface{} { + if c == nil { + return []interface{}{} } - withoutNulls, err := json.Marshal(cleanRules) - if err != nil { - return "", err + m := map[string]interface{}{ + "rule": flattenServerSideEncryptionConfigurationRules(c.Rules), } - return string(withoutNulls), nil + return []interface{}{m} } -func removeNil(data map[string]interface{}) map[string]interface{} { - withoutNil := make(map[string]interface{}) +func flattenServerSideEncryptionConfigurationRules(rules []*s3.ServerSideEncryptionRule) []interface{} { + var results []interface{} - for k, v := range data { - if v == nil { - continue - } + for _, rule := range rules { + m := make(map[string]interface{}) - switch v := v.(type) { - case map[string]interface{}: - withoutNil[k] = removeNil(v) - default: - withoutNil[k] = v + if rule.BucketKeyEnabled != nil { + m["bucket_key_enabled"] = aws.BoolValue(rule.BucketKeyEnabled) } - } - - return withoutNil -} - -func normalizeRegion(region string) string { - // Default to us-east-1 if the bucket doesn't have a region: - // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html - if region == "" { - region = endpoints.UsEast1RegionID - } - - return region -} -// ValidBucketName validates any S3 bucket name that is not inside the us-east-1 region. -// Buckets outside of this region have to be DNS-compliant. After the same restrictions are -// applied to buckets in the us-east-1 region, this function can be refactored as a SchemaValidateFunc -func ValidBucketName(value string, region string) error { - if region != endpoints.UsEast1RegionID { - if (len(value) < 3) || (len(value) > 63) { - return fmt.Errorf("%q must contain from 3 to 63 characters", value) - } - if !regexp.MustCompile(`^[0-9a-z-.]+$`).MatchString(value) { - return fmt.Errorf("only lowercase alphanumeric characters and hyphens allowed in %q", value) - } - if regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`).MatchString(value) { - return fmt.Errorf("%q must not be formatted as an IP address", value) - } - if strings.HasPrefix(value, `.`) { - return fmt.Errorf("%q cannot start with a period", value) - } - if strings.HasSuffix(value, `.`) { - return fmt.Errorf("%q cannot end with a period", value) - } - if strings.Contains(value, `..`) { - return fmt.Errorf("%q can be only one period between labels", value) - } - } else { - if len(value) > 255 { - return fmt.Errorf("%q must contain less than 256 characters", value) - } - if !regexp.MustCompile(`^[0-9a-zA-Z-._]+$`).MatchString(value) { - return fmt.Errorf("only alphanumeric characters, hyphens, periods, and underscores allowed in %q", value) + if rule.ApplyServerSideEncryptionByDefault != nil { + m["apply_server_side_encryption_by_default"] = []interface{}{ + map[string]interface{}{ + "kms_master_key_id": aws.StringValue(rule.ApplyServerSideEncryptionByDefault.KMSMasterKeyID), + "sse_algorithm": aws.StringValue(rule.ApplyServerSideEncryptionByDefault.SSEAlgorithm), + }, + } } - } - return nil -} - -func grantHash(v interface{}) int { - var buf bytes.Buffer - m, ok := v.(map[string]interface{}) - - if !ok { - return 0 - } - - if v, ok := m["id"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["type"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["uri"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if p, ok := m["permissions"]; ok { - buf.WriteString(fmt.Sprintf("%v-", p.(*schema.Set).List())) - } - return create.StringHashcode(buf.String()) -} - -type S3Website struct { - Endpoint, Domain string -} - -// -// S3 Object Lock functions. -// - -func expandS3ObjectLockConfiguration(vConf []interface{}) *s3.ObjectLockConfiguration { - if len(vConf) == 0 || vConf[0] == nil { - return nil - } - - mConf := vConf[0].(map[string]interface{}) - - conf := &s3.ObjectLockConfiguration{} - if vObjectLockEnabled, ok := mConf["object_lock_enabled"].(string); ok && vObjectLockEnabled != "" { - conf.ObjectLockEnabled = aws.String(vObjectLockEnabled) + results = append(results, m) } - return conf + return results } // Versioning functions @@ -2077,71 +2148,105 @@ func flattenVersioning(versioning *s3.GetBucketVersioningOutput) []interface{} { return []interface{}{vc} } -func flattenS3ObjectLockConfiguration(conf *s3.ObjectLockConfiguration) []interface{} { - if conf == nil { - return []interface{}{} +// Website functions + +func flattenBucketWebsite(ws *s3.GetBucketWebsiteOutput) ([]interface{}, error) { + if ws == nil { + return []interface{}{}, nil } - mConf := map[string]interface{}{ - "object_lock_enabled": aws.StringValue(conf.ObjectLockEnabled), + m := make(map[string]interface{}) + + if v := ws.IndexDocument; v != nil { + m["index_document"] = aws.StringValue(v.Suffix) } - if conf.Rule != nil && conf.Rule.DefaultRetention != nil { - mRule := map[string]interface{}{ - "default_retention": []interface{}{ - map[string]interface{}{ - "mode": aws.StringValue(conf.Rule.DefaultRetention.Mode), - "days": int(aws.Int64Value(conf.Rule.DefaultRetention.Days)), - "years": int(aws.Int64Value(conf.Rule.DefaultRetention.Years)), - }, - }, + if v := ws.ErrorDocument; v != nil { + m["error_document"] = aws.StringValue(v.Key) + } + + if v := ws.RedirectAllRequestsTo; v != nil { + if v.Protocol == nil { + m["redirect_all_requests_to"] = aws.StringValue(v.HostName) + } else { + var host string + var path string + var query string + parsedHostName, err := url.Parse(aws.StringValue(v.HostName)) + if err == nil { + host = parsedHostName.Host + path = parsedHostName.Path + query = parsedHostName.RawQuery + } else { + host = aws.StringValue(v.HostName) + path = "" + } + + m["redirect_all_requests_to"] = (&url.URL{ + Host: host, + Path: path, + Scheme: aws.StringValue(v.Protocol), + RawQuery: query, + }).String() } + } - mConf["rule"] = []interface{}{mRule} + if v := ws.RoutingRules; v != nil { + rr, err := normalizeRoutingRules(v) + if err != nil { + return nil, fmt.Errorf("error while marshaling routing rules: %w", err) + } + m["routing_rules"] = rr } - return []interface{}{mConf} + // We have special handling for the website configuration, + // so only return the configuration if there is any + if len(m) == 0 { + return []interface{}{}, nil + } + + return []interface{}{m}, nil } -func flattenGrants(ap *s3.GetBucketAclOutput) []interface{} { - if len(ap.Grants) == 0 { - return []interface{}{} +func normalizeRoutingRules(w []*s3.RoutingRule) (string, error) { + withNulls, err := json.Marshal(w) + if err != nil { + return "", err } - //if ACL grants contains bucket owner FULL_CONTROL only - it is default "private" acl - if len(ap.Grants) == 1 && aws.StringValue(ap.Grants[0].Grantee.ID) == aws.StringValue(ap.Owner.ID) && - aws.StringValue(ap.Grants[0].Permission) == s3.PermissionFullControl { - return nil + + var rules []map[string]interface{} + if err := json.Unmarshal(withNulls, &rules); err != nil { + return "", err } - getGrant := func(grants []interface{}, grantee map[string]interface{}) (interface{}, bool) { - for _, pg := range grants { - pgt := pg.(map[string]interface{}) - if pgt["type"] == grantee["type"] && pgt["id"] == grantee["id"] && pgt["uri"] == grantee["uri"] && - pgt["permissions"].(*schema.Set).Len() > 0 { - return pg, true - } - } - return nil, false + var cleanRules []map[string]interface{} + for _, rule := range rules { + cleanRules = append(cleanRules, removeNil(rule)) } - grants := make([]interface{}, 0, len(ap.Grants)) - for _, granteeObject := range ap.Grants { - grantee := make(map[string]interface{}) - grantee["type"] = aws.StringValue(granteeObject.Grantee.Type) + withoutNulls, err := json.Marshal(cleanRules) + if err != nil { + return "", err + } - if granteeObject.Grantee.ID != nil { - grantee["id"] = aws.StringValue(granteeObject.Grantee.ID) - } - if granteeObject.Grantee.URI != nil { - grantee["uri"] = aws.StringValue(granteeObject.Grantee.URI) + return string(withoutNulls), nil +} + +func removeNil(data map[string]interface{}) map[string]interface{} { + withoutNil := make(map[string]interface{}) + + for k, v := range data { + if v == nil { + continue } - if pg, ok := getGrant(grants, grantee); ok { - pg.(map[string]interface{})["permissions"].(*schema.Set).Add(aws.StringValue(granteeObject.Permission)) - } else { - grantee["permissions"] = schema.NewSet(schema.HashString, []interface{}{aws.StringValue(granteeObject.Permission)}) - grants = append(grants, grantee) + + switch v := v.(type) { + case map[string]interface{}: + withoutNil[k] = removeNil(v) + default: + withoutNil[k] = v } } - return grants + return withoutNil } diff --git a/internal/service/s3/bucket_acl_test.go b/internal/service/s3/bucket_acl_test.go index 146dc5e8447..2a07172d9a5 100644 --- a/internal/service/s3/bucket_acl_test.go +++ b/internal/service/s3/bucket_acl_test.go @@ -303,6 +303,159 @@ func TestAccS3BucketAcl_disappears(t *testing.T) { }) } +func TestAccS3BucketAcl_migrate_aclNoChange(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucketResourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_acl.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withACL(bucketName, s3.BucketCannedACLPublicRead), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "acl", s3.BucketCannedACLPublicRead), + ), + }, + { + Config: testAccBucketAcl_Migrate_AclConfig(bucketName, s3.BucketCannedACLPublicRead), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketAclExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "acl", s3.BucketCannedACLPublicRead), + ), + }, + }, + }) +} + +func TestAccS3BucketAcl_migrate_aclWithChange(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucketResourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_acl.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withACL(bucketName, s3.BucketCannedACLPublicRead), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "acl", s3.BucketCannedACLPublicRead), + ), + }, + { + Config: testAccBucketAcl_Migrate_AclConfig(bucketName, s3.BucketCannedACLPrivate), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketAclExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "acl", s3.BucketCannedACLPrivate), + ), + }, + }, + }) +} + +func TestAccS3BucketAcl_migrate_grantsNoChange(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucketResourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_acl.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withGrants(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "grant.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(bucketResourceName, "grant.*", map[string]string{ + "permissions.#": "2", + "type": "CanonicalUser", + }), + resource.TestCheckTypeSetElemAttr(bucketResourceName, "grant.*.permissions.*", "FULL_CONTROL"), + resource.TestCheckTypeSetElemAttr(bucketResourceName, "grant.*.permissions.*", "WRITE"), + ), + }, + { + Config: testAccBucketAcl_Migrate_GrantsNoChangeConfig(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketAclExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "access_control_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "access_control_policy.0.grant.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "access_control_policy.0.grant.*", map[string]string{ + "grantee.#": "1", + "grantee.0.type": s3.TypeCanonicalUser, + "permission": s3.PermissionFullControl, + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "access_control_policy.0.grant.*", map[string]string{ + "grantee.#": "1", + "grantee.0.type": s3.TypeCanonicalUser, + "permission": s3.PermissionWrite, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "access_control_policy.0.grant.*.grantee.0.id", "data.aws_canonical_user_id.current", "id"), + resource.TestCheckResourceAttr(resourceName, "access_control_policy.0.owner.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "access_control_policy.0.owner.0.id", "data.aws_canonical_user_id.current", "id"), + ), + }, + }, + }) +} + +func TestAccS3BucketAcl_migrate_grantsWithChange(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucketResourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket_acl.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withACL(bucketName, s3.BucketCannedACLPublicRead), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "acl", s3.BucketCannedACLPublicRead), + ), + }, + { + Config: testAccBucketAcl_Migrate_GrantsWithChangeConfig(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketAclExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "access_control_policy.#", "1"), + resource.TestCheckResourceAttr(resourceName, "access_control_policy.0.grant.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "access_control_policy.0.grant.*", map[string]string{ + "grantee.#": "1", + "grantee.0.type": s3.TypeCanonicalUser, + "permission": s3.PermissionRead, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "access_control_policy.0.grant.*.grantee.0.id", "data.aws_canonical_user_id.current", "id"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "access_control_policy.0.grant.*", map[string]string{ + "grantee.#": "1", + "grantee.0.type": s3.TypeGroup, + "permission": s3.PermissionReadAcp, + }), + resource.TestMatchTypeSetElemNestedAttrs(resourceName, "access_control_policy.0.grant.*", map[string]*regexp.Regexp{ + "grantee.0.uri": regexp.MustCompile(`http://acs.*/groups/s3/LogDelivery`), + }), + resource.TestCheckResourceAttr(resourceName, "access_control_policy.0.owner.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "access_control_policy.0.owner.0.id", "data.aws_canonical_user_id.current", "id"), + ), + }, + }, + }) +} + func TestAccS3BucketAcl_updateACL(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") resourceName := "aws_s3_bucket_acl.test" @@ -599,3 +752,88 @@ resource "aws_s3_bucket_acl" "test" { } `, bucketName) } + +func testAccBucketAcl_Migrate_AclConfig(rName, acl string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "bucket" { + bucket = %[1]q +} + +resource "aws_s3_bucket_acl" "test" { + bucket = aws_s3_bucket.bucket.id + acl = %[2]q +} +`, rName, acl) +} + +func testAccBucketAcl_Migrate_GrantsNoChangeConfig(rName string) string { + return fmt.Sprintf(` +data "aws_canonical_user_id" "current" {} + +resource "aws_s3_bucket" "bucket" { + bucket = %[1]q +} + +resource "aws_s3_bucket_acl" "test" { + bucket = aws_s3_bucket.bucket.id + access_control_policy { + grant { + grantee { + id = data.aws_canonical_user_id.current.id + type = "CanonicalUser" + } + permission = "FULL_CONTROL" + } + + grant { + grantee { + id = data.aws_canonical_user_id.current.id + type = "CanonicalUser" + } + permission = "WRITE" + } + + owner { + id = data.aws_canonical_user_id.current.id + } + } +} +`, rName) +} + +func testAccBucketAcl_Migrate_GrantsWithChangeConfig(rName string) string { + return fmt.Sprintf(` +data "aws_canonical_user_id" "current" {} + +data "aws_partition" "current" {} + +resource "aws_s3_bucket" "bucket" { + bucket = %[1]q +} + +resource "aws_s3_bucket_acl" "test" { + bucket = aws_s3_bucket.bucket.id + access_control_policy { + grant { + grantee { + id = data.aws_canonical_user_id.current.id + type = "CanonicalUser" + } + permission = "READ" + } + + grant { + grantee { + type = "Group" + uri = "http://acs.${data.aws_partition.current.dns_suffix}/groups/s3/LogDelivery" + } + permission = "READ_ACP" + } + + owner { + id = data.aws_canonical_user_id.current.id + } + } +} +`, rName) +} diff --git a/internal/service/s3/bucket_public_access_block_test.go b/internal/service/s3/bucket_public_access_block_test.go index 96fbb533817..f4ed91b150e 100644 --- a/internal/service/s3/bucket_public_access_block_test.go +++ b/internal/service/s3/bucket_public_access_block_test.go @@ -87,7 +87,7 @@ func TestAccS3BucketPublicAccessBlock_Disappears_bucket(t *testing.T) { Config: testAccBucketPublicAccessBlockConfig(name, "false", "false", "false", "false"), Check: resource.ComposeTestCheckFunc( testAccCheckBucketPublicAccessBlockExists(resourceName, &config), - testAccCheckDestroyBucket(bucketResourceName), + acctest.CheckResourceDisappears(acctest.Provider, tfs3.ResourceBucket(), bucketResourceName), ), ExpectNonEmptyPlan: true, }, diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index a6f917eb1f3..b49919a8e8d 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -106,6 +106,173 @@ func TestAccS3Bucket_Basic_emptyString(t *testing.T) { }) } +func TestAccS3Bucket_Basic_generatedName(t *testing.T) { + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_generatedName, + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "bucket_prefix"}, + }, + }, + }) +} + +func TestAccS3Bucket_Basic_namePrefix(t *testing.T) { + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_namePrefix, + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestMatchResourceAttr(resourceName, "bucket", regexp.MustCompile("^tf-test-")), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "bucket_prefix"}, + }, + }, + }) +} + +func TestAccS3Bucket_Basic_forceDestroy(t *testing.T) { + resourceName := "aws_s3_bucket.bucket" + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_forceDestroy(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + testAccCheckBucketAddObjects(resourceName, "data.txt", "prefix/more_data.txt"), + ), + }, + }, + }) +} + +// By default, the AWS Go SDK cleans up URIs by removing extra slashes +// when the service API requests use the URI as part of making a request. +// While the aws_s3_object resource automatically cleans the key +// to not contain these extra slashes, out-of-band handling and other AWS +// services may create keys with extra slashes (empty "directory" prefixes). +func TestAccS3Bucket_Basic_forceDestroyWithEmptyPrefixes(t *testing.T) { + resourceName := "aws_s3_bucket.bucket" + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_forceDestroy(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + testAccCheckBucketAddObjects(resourceName, "data.txt", "/extraleadingslash.txt"), + ), + }, + }, + }) +} + +func TestAccS3Bucket_Basic_forceDestroyWithObjectLockEnabled(t *testing.T) { + resourceName := "aws_s3_bucket.bucket" + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_forceDestroyWithObjectLockEnabled(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + testAccCheckBucketAddObjectsWithLegalHold(resourceName, "data.txt", "prefix/more_data.txt"), + ), + }, + }, + }) +} + +// Test TestAccS3Bucket_disappears is designed to fail with a "plan +// not empty" error in Terraform, to check against regressions. +// See https://github.com/hashicorp/terraform/pull/2925 +func TestAccS3Bucket_disappears(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.bucket" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + acctest.CheckResourceDisappears(acctest.Provider, tfs3.ResourceBucket(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccS3Bucket_Tags_basic(t *testing.T) { + rInt := sdkacctest.RandInt() + resourceName := "aws_s3_bucket.bucket1" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccMultiBucketWithTagsConfig(rInt), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + func TestAccS3Bucket_Tags_withNoSystemTags(t *testing.T) { resourceName := "aws_s3_bucket.bucket" bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") @@ -205,7 +372,7 @@ func TestAccS3Bucket_Tags_withSystemTags(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(resourceName), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - testAccCheckDestroyBucket(resourceName), + acctest.CheckResourceDisappears(acctest.Provider, tfs3.ResourceBucket(), resourceName), testAccCheckBucketCreateViaCloudFormation(bucketName, &stackID), ), }, @@ -295,30 +462,8 @@ func TestAccS3Bucket_Tags_ignoreTags(t *testing.T) { }) } -func TestAccS3Bucket_Tags_basic(t *testing.T) { - rInt := sdkacctest.RandInt() - resourceName := "aws_s3_bucket.bucket1" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), - Providers: acctest.Providers, - CheckDestroy: testAccCheckBucketDestroy, - Steps: []resource.TestStep{ - { - Config: testAccMultiBucketWithTagsConfig(rInt), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy"}, - }, - }, - }) -} - -func TestAccS3Bucket_Basic_namePrefix(t *testing.T) { +func TestAccS3Bucket_Manage_objectLock(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") resourceName := "aws_s3_bucket.test" resource.ParallelTest(t, resource.TestCase{ @@ -328,23 +473,27 @@ func TestAccS3Bucket_Basic_namePrefix(t *testing.T) { CheckDestroy: testAccCheckBucketDestroy, Steps: []resource.TestStep{ { - Config: testAccBucketConfig_namePrefix, + Config: testAccObjectLockEnabledNoDefaultRetention(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(resourceName), - resource.TestMatchResourceAttr(resourceName, "bucket", regexp.MustCompile("^tf-test-")), + resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.rule.#", "0"), ), }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "bucket_prefix"}, + ImportStateVerifyIgnore: []string{"force_destroy"}, }, }, }) } -func TestAccS3Bucket_Basic_generatedName(t *testing.T) { +func TestAccS3Bucket_Manage_objectLock_deprecatedEnabled(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") resourceName := "aws_s3_bucket.test" resource.ParallelTest(t, resource.TestCase{ @@ -354,27 +503,28 @@ func TestAccS3Bucket_Basic_generatedName(t *testing.T) { CheckDestroy: testAccCheckBucketDestroy, Steps: []resource.TestStep{ { - Config: testAccBucketConfig_generatedName, + Config: testAccObjectLockEnabledNoDefaultRetention_deprecatedEnabled(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.rule.#", "0"), ), }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "bucket_prefix"}, + ImportStateVerifyIgnore: []string{"force_destroy"}, }, }, }) } -// Test TestAccS3Bucket_Basic_shouldFailNotFound is designed to fail with a "plan -// not empty" error in Terraform, to check against regressions. -// See https://github.com/hashicorp/terraform/pull/2925 -func TestAccS3Bucket_Basic_shouldFailNotFound(t *testing.T) { +func TestAccS3Bucket_Manage_objectLock_migrate(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -383,18 +533,23 @@ func TestAccS3Bucket_Basic_shouldFailNotFound(t *testing.T) { CheckDestroy: testAccCheckBucketDestroy, Steps: []resource.TestStep{ { - Config: testAccBucketDestroyedConfig(bucketName), + Config: testAccObjectLockEnabledNoDefaultRetention_deprecatedEnabled(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(resourceName), - testAccCheckDestroyBucket(resourceName), + resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.object_lock_enabled", s3.ObjectLockEnabledEnabled), ), - ExpectNonEmptyPlan: true, + }, + { + Config: testAccObjectLockEnabledNoDefaultRetention(bucketName), + PlanOnly: true, }, }, }) } -func TestAccS3Bucket_Manage_objectLock(t *testing.T) { +func TestAccS3Bucket_Manage_objectLockWithVersioning(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") resourceName := "aws_s3_bucket.test" @@ -405,13 +560,12 @@ func TestAccS3Bucket_Manage_objectLock(t *testing.T) { CheckDestroy: testAccCheckBucketDestroy, Steps: []resource.TestStep{ { - Config: testAccObjectLockEnabledNoDefaultRetention(bucketName), + Config: testAccBucketConfig_objectLockEnabledWithVersioning(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(resourceName), resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.object_lock_enabled", s3.ObjectLockEnabledEnabled), - resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.rule.#", "0"), ), }, { @@ -424,7 +578,7 @@ func TestAccS3Bucket_Manage_objectLock(t *testing.T) { }) } -func TestAccS3Bucket_Manage_objectLock_deprecatedEnabled(t *testing.T) { +func TestAccS3Bucket_Manage_objectLockWithVersioning_deprecatedEnabled(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") resourceName := "aws_s3_bucket.test" @@ -435,13 +589,12 @@ func TestAccS3Bucket_Manage_objectLock_deprecatedEnabled(t *testing.T) { CheckDestroy: testAccCheckBucketDestroy, Steps: []resource.TestStep{ { - Config: testAccObjectLockEnabledNoDefaultRetention_deprecatedEnabled(bucketName), + Config: testAccBucketConfig_objectLockEnabledWithVersioning_deprecatedEnabled(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(resourceName), resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.object_lock_enabled", s3.ObjectLockEnabledEnabled), - resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.rule.#", "0"), ), }, { @@ -454,9 +607,9 @@ func TestAccS3Bucket_Manage_objectLock_deprecatedEnabled(t *testing.T) { }) } -func TestAccS3Bucket_Manage_objectLock_migrate(t *testing.T) { +func TestAccS3Bucket_Security_updateACL(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") - resourceName := "aws_s3_bucket.test" + resourceName := "aws_s3_bucket.bucket" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -465,25 +618,32 @@ func TestAccS3Bucket_Manage_objectLock_migrate(t *testing.T) { CheckDestroy: testAccCheckBucketDestroy, Steps: []resource.TestStep{ { - Config: testAccObjectLockEnabledNoDefaultRetention_deprecatedEnabled(bucketName), + Config: testAccBucketConfig_withACL(bucketName, s3.BucketCannedACLPublicRead), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", "true"), - resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(resourceName, "acl", s3.BucketCannedACLPublicRead), ), }, { - Config: testAccObjectLockEnabledNoDefaultRetention(bucketName), - PlanOnly: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + { + Config: testAccBucketConfig_withACL(bucketName, s3.BucketCannedACLPrivate), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "acl", s3.BucketCannedACLPrivate), + ), }, }, }) } -func TestAccS3Bucket_Manage_objectLockWithVersioning(t *testing.T) { +func TestAccS3Bucket_Security_updateGrant(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") - resourceName := "aws_s3_bucket.test" + resourceName := "aws_s3_bucket.bucket" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -492,12 +652,16 @@ func TestAccS3Bucket_Manage_objectLockWithVersioning(t *testing.T) { CheckDestroy: testAccCheckBucketDestroy, Steps: []resource.TestStep{ { - Config: testAccBucketConfig_objectLockEnabledWithVersioning(bucketName), + Config: testAccBucketConfig_withGrants(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", "true"), - resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(resourceName, "grant.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "grant.*", map[string]string{ + "permissions.#": "2", + "type": "CanonicalUser", + }), + resource.TestCheckTypeSetElemAttr(resourceName, "grant.*.permissions.*", "FULL_CONTROL"), + resource.TestCheckTypeSetElemAttr(resourceName, "grant.*.permissions.*", "WRITE"), ), }, { @@ -506,42 +670,40 @@ func TestAccS3Bucket_Manage_objectLockWithVersioning(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"force_destroy"}, }, - }, - }) -} - -func TestAccS3Bucket_Manage_objectLockWithVersioning_deprecatedEnabled(t *testing.T) { - bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") - resourceName := "aws_s3_bucket.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), - Providers: acctest.Providers, - CheckDestroy: testAccCheckBucketDestroy, - Steps: []resource.TestStep{ { - Config: testAccBucketConfig_objectLockEnabledWithVersioning_deprecatedEnabled(bucketName), + Config: testAccBucketConfig_withUpdatedGrants(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", "true"), - resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(resourceName, "grant.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "grant.*", map[string]string{ + "permissions.#": "1", + "type": "CanonicalUser", + }), + resource.TestCheckTypeSetElemAttr(resourceName, "grant.*.permissions.*", "READ"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "grant.*", map[string]string{ + "permissions.#": "1", + "type": "Group", + "uri": "http://acs.amazonaws.com/groups/s3/LogDelivery", + }), + resource.TestCheckTypeSetElemAttr(resourceName, "grant.*.permissions.*", "READ_ACP"), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy"}, + // As Grant is a Computed field, removing them from terraform will not + // trigger an update to remove them from the S3 bucket. + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "grant.#", "2"), + ), }, }, }) } -func TestAccS3Bucket_Basic_forceDestroy(t *testing.T) { - resourceName := "aws_s3_bucket.bucket" +func TestAccS3Bucket_Security_aclToGrant(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.bucket" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -550,45 +712,28 @@ func TestAccS3Bucket_Basic_forceDestroy(t *testing.T) { CheckDestroy: testAccCheckBucketDestroy, Steps: []resource.TestStep{ { - Config: testAccBucketConfig_forceDestroy(bucketName), + Config: testAccBucketConfig_withACL(bucketName, s3.BucketCannedACLPublicRead), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(resourceName), - testAccCheckBucketAddObjects(resourceName, "data.txt", "prefix/more_data.txt"), + resource.TestCheckResourceAttr(resourceName, "acl", s3.BucketCannedACLPublicRead), + // By default, the S3 Bucket will have 2 grants configured + resource.TestCheckResourceAttr(resourceName, "grant.#", "2"), ), }, - }, - }) -} - -// By default, the AWS Go SDK cleans up URIs by removing extra slashes -// when the service API requests use the URI as part of making a request. -// While the aws_s3_object resource automatically cleans the key -// to not contain these extra slashes, out-of-band handling and other AWS -// services may create keys with extra slashes (empty "directory" prefixes). -func TestAccS3Bucket_Basic_forceDestroyWithEmptyPrefixes(t *testing.T) { - resourceName := "aws_s3_bucket.bucket" - bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), - Providers: acctest.Providers, - CheckDestroy: testAccCheckBucketDestroy, - Steps: []resource.TestStep{ { - Config: testAccBucketConfig_forceDestroy(bucketName), + Config: testAccBucketConfig_withGrants(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(resourceName), - testAccCheckBucketAddObjects(resourceName, "data.txt", "/extraleadingslash.txt"), + resource.TestCheckResourceAttr(resourceName, "grant.#", "1"), ), }, }, }) } -func TestAccS3Bucket_Basic_forceDestroyWithObjectLockEnabled(t *testing.T) { - resourceName := "aws_s3_bucket.bucket" +func TestAccS3Bucket_Security_grantToACL(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.bucket" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -597,10 +742,18 @@ func TestAccS3Bucket_Basic_forceDestroyWithObjectLockEnabled(t *testing.T) { CheckDestroy: testAccCheckBucketDestroy, Steps: []resource.TestStep{ { - Config: testAccBucketConfig_forceDestroyWithObjectLockEnabled(bucketName), + Config: testAccBucketConfig_withGrants(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(resourceName), - testAccCheckBucketAddObjectsWithLegalHold(resourceName, "data.txt", "prefix/more_data.txt"), + resource.TestCheckResourceAttr(resourceName, "grant.#", "1"), + ), + }, + { + Config: testAccBucketConfig_withACL(bucketName, s3.BucketCannedACLPublicRead), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "acl", s3.BucketCannedACLPublicRead), + resource.TestCheckResourceAttr(resourceName, "grant.#", "1"), ), }, }, @@ -959,29 +1112,6 @@ func testAccCheckBucketExistsWithProvider(n string, providerF func() *schema.Pro } } -func testAccCheckDestroyBucket(n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No S3 Bucket ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn - _, err := conn.DeleteBucket(&s3.DeleteBucketInput{ - Bucket: aws.String(rs.Primary.ID), - }) - - if err != nil { - return fmt.Errorf("Error destroying Bucket (%s) in testAccCheckDestroyBucket: %s", rs.Primary.ID, err) - } - return nil - } -} - func testAccCheckBucketAddObjects(n string, keys ...string) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] @@ -1145,6 +1275,53 @@ resource "aws_s3_bucket" "bucket" { `, bucketName) } +func testAccBucketConfig_withACL(bucketName, acl string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "bucket" { + bucket = %[1]q + acl = %[2]q +} +`, bucketName, acl) +} + +func testAccBucketConfig_withGrants(bucketName string) string { + return fmt.Sprintf(` +data "aws_canonical_user_id" "current" {} + +resource "aws_s3_bucket" "bucket" { + bucket = %[1]q + + grant { + id = data.aws_canonical_user_id.current.id + type = "CanonicalUser" + permissions = ["FULL_CONTROL", "WRITE"] + } +} +`, bucketName) +} + +func testAccBucketConfig_withUpdatedGrants(bucketName string) string { + return fmt.Sprintf(` +data "aws_canonical_user_id" "current" {} + +resource "aws_s3_bucket" "bucket" { + bucket = %[1]q + + grant { + id = data.aws_canonical_user_id.current.id + type = "CanonicalUser" + permissions = ["READ"] + } + + grant { + type = "Group" + permissions = ["READ_ACP"] + uri = "http://acs.amazonaws.com/groups/s3/LogDelivery" + } +} +`, bucketName) +} + func testAccBucketConfig_withNoTags(bucketName string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "bucket" { @@ -1294,19 +1471,6 @@ resource "aws_s3_bucket_acl" "test6" { `, randInt) } -func testAccBucketDestroyedConfig(bucketName string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { - bucket = %[1]q -} - -resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.bucket.id - acl = "private" -} -`, bucketName) -} - func testAccObjectLockEnabledNoDefaultRetention(bucketName string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "test" { diff --git a/internal/service/s3/object_copy.go b/internal/service/s3/object_copy.go index 6be56c43ad7..1fa6faf1164 100644 --- a/internal/service/s3/object_copy.go +++ b/internal/service/s3/object_copy.go @@ -1,6 +1,7 @@ package s3 import ( + "bytes" "fmt" "log" "net/http" @@ -14,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -749,3 +751,26 @@ func expandS3Grants(tfList []interface{}) *s3Grants { return apiObjects } + +func grantHash(v interface{}) int { + var buf bytes.Buffer + m, ok := v.(map[string]interface{}) + + if !ok { + return 0 + } + + if v, ok := m["id"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["type"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if v, ok := m["uri"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + if p, ok := m["permissions"]; ok { + buf.WriteString(fmt.Sprintf("%v-", p.(*schema.Set).List())) + } + return create.StringHashcode(buf.String()) +} diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 71148bbc9fa..c94583a7dca 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -12,6 +12,14 @@ Provides a S3 bucket resource. -> This functionality is for managing S3 in an AWS Partition. To manage [S3 on Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html), see the [`aws_s3control_bucket`](/docs/providers/aws/r/s3control_bucket.html) resource. +~> **NOTE on S3 Bucket canned ACL Configuration:** S3 Bucket canned ACL can be configured in either the standalone resource [`aws_s3_bucket_acl`](s3_bucket_acl.html.markdown) +or with the deprecated parameter `acl` in the resource `aws_s3_bucket`. +Configuring with both will cause inconsistencies and may overwrite configuration. + +~> **NOTE on S3 Bucket ACL Grants Configuration:** S3 Bucket grants can be configured in either the standalone resource [`aws_s3_bucket_acl`](s3_bucket_acl.html.markdown) +or with the deprecated parameter `grant` in the resource `aws_s3_bucket`. +Configuring with both will cause inconsistencies and may overwrite configuration. + ## Example Usage ### Private Bucket w/ Tags @@ -76,8 +84,28 @@ See the [`aws_s3_bucket_server_side_encryption_configuration` resource](s3_bucke ### Using ACL policy grants -The `acl` and `grant` arguments are read-only as of version 4.0 of the Terraform AWS Provider. -See the [`aws_s3_bucket_acl` resource](s3_bucket_acl.html.markdown) for configuration details. +-> **NOTE:** The parameters `acl` and `grant` are deprecated. +Use the resource [`aws_s3_bucket_acl`](s3_bucket_acl.html.markdown) instead. + +```terraform +data "aws_canonical_user_id" "current_user" {} + +resource "aws_s3_bucket" "bucket" { + bucket = "mybucket" + + grant { + id = data.aws_canonical_user_id.current_user.id + type = "CanonicalUser" + permissions = ["FULL_CONTROL"] + } + + grant { + type = "Group" + permissions = ["READ_ACP", "WRITE"] + uri = "http://acs.amazonaws.com/groups/s3/LogDelivery" + } +} +``` ## Argument Reference @@ -85,11 +113,24 @@ The following arguments are supported: * `bucket` - (Optional, Forces new resource) The name of the bucket. If omitted, Terraform will assign a random, unique name. Must be lowercase and less than or equal to 63 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). * `bucket_prefix` - (Optional, Forces new resource) Creates a unique bucket name beginning with the specified prefix. Conflicts with `bucket`. Must be lowercase and less than or equal to 37 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). +* `acl` - (Optional, **Deprecated**) The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, and `log-delivery-write`. Defaults to `private`. Conflicts with `grant`. Terraform will only perform drift detection if a configuration value is provided. Use the resource [`aws_s3_bucket_acl`](s3_bucket_acl.html.markdown) instead. +* `grant` - (Optional, **Deprecated**) An [ACL policy grant](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#sample-acl). See [Grant](#grant) below for details. Conflicts with `acl`. Terraform will only perform drift detection if a configuration value is provided. Use the resource [`aws_s3_bucket_acl`](s3_bucket_acl.html.markdown) instead. * `force_destroy` - (Optional, Default:`false`) A boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket so that the bucket can be destroyed without error. These objects are *not* recoverable. * `object_lock_enabled` - (Optional, Default:`false`, Forces new resource) Indicates whether this bucket has an Object Lock configuration enabled. * `object_lock_configuration` - (Optional) A configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). See [Object Lock Configuration](#object-lock-configuration) below. * `tags` - (Optional) A map of tags to assign to the bucket. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +### Grant + +~> **NOTE:** Currently, changes to the `grant` configuration of _existing_ resources cannot be automatically detected by Terraform. To manage changes of ACL grants to an S3 bucket, use the `aws_s3_bucket_acl` resource instead. If you use `grant` on an `aws_s3_bucket`, Terraform will assume management over the full set of ACL grants for the S3 bucket, treating additional ACL grants as drift. For this reason, `grant` cannot be mixed with the external `aws_s3_bucket_acl` resource for a given S3 bucket. + +The `grant` configuration block supports the following arguments: + +* `id` - (optional) Canonical user id to grant for. Used only when `type` is `CanonicalUser`. +* `type` - (required) - Type of grantee to apply for. Valid values are `CanonicalUser` and `Group`. `AmazonCustomerByEmail` is not supported. +* `permissions` - (required) List of permissions to apply for grantee. Valid values are `READ`, `WRITE`, `READ_ACP`, `WRITE_ACP`, `FULL_CONTROL`. +* `uri` - (optional) Uri address to grant for. Used only when `type` is `Group`. + ### Object Lock Configuration ~> **NOTE:** You can only **enable** S3 Object Lock for **new** buckets. If you need to **enable** S3 Object Lock for an **existing** bucket, please contact AWS Support. @@ -107,7 +148,6 @@ In addition to all arguments above, the following attributes are exported: * `id` - The name of the bucket. * `acceleration_status` - (Optional) The accelerate configuration status of the bucket. Not available in `cn-north-1` or `us-gov-west-1`. -* `acl` - The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) applied to the bucket. * `arn` - The ARN of the bucket. Will be of format `arn:aws:s3:::bucketname`. * `bucket_domain_name` - The bucket domain name. Will be of format `bucketname.s3.amazonaws.com`. * `bucket_regional_domain_name` - The bucket region-specific domain name. The bucket domain name including the region name, please refer [here](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) for format. Note: The AWS CloudFront allows specifying S3 region-specific endpoint when creating S3 origin, it will prevent [redirect issues](https://forums.aws.amazon.com/thread.jspa?threadID=216814) from CloudFront to S3 Origin URL. @@ -117,11 +157,6 @@ In addition to all arguments above, the following attributes are exported: * `allowed_origins` - Set of origins customers are able to access the bucket from. * `expose_headers` - Set of headers in the response that customers are able to access from their applications. * `max_age_seconds` The time in seconds that browser can cache the response for a preflight request. -* `grant` - The set of [ACL policy grants](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#sample-acl). - * `id` - Canonical user id of the grantee. - * `type` - Type of grantee. - * `permissions` - List of permissions given to the grantee. - * `uri` - URI of the grantee group. * `hosted_zone_id` - The [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region. * `lifecycle_rule` - A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). * `id` - Unique identifier for the rule. From 32b6a318ee67793d34c524489418f8b134c497ce Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Tue, 22 Mar 2022 20:25:04 -0400 Subject: [PATCH 02/42] Update CHANGELOG for #23798 --- .changelog/23798.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/23798.txt diff --git a/.changelog/23798.txt b/.changelog/23798.txt new file mode 100644 index 00000000000..1e9a0306c33 --- /dev/null +++ b/.changelog/23798.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_s3_bucket: Update `acl` and `grant` parameters to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring these parameters with the standalone `aws_s3_bucket_acl` resource. +``` \ No newline at end of file From 1bbeb42ee2095fc1228624623b598f77140eedcf Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 31 Mar 2022 16:37:31 -0400 Subject: [PATCH 03/42] Reformat arg details --- website/docs/r/s3_bucket.html.markdown | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index c94583a7dca..db6066aa6c1 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -126,10 +126,10 @@ The following arguments are supported: The `grant` configuration block supports the following arguments: -* `id` - (optional) Canonical user id to grant for. Used only when `type` is `CanonicalUser`. -* `type` - (required) - Type of grantee to apply for. Valid values are `CanonicalUser` and `Group`. `AmazonCustomerByEmail` is not supported. -* `permissions` - (required) List of permissions to apply for grantee. Valid values are `READ`, `WRITE`, `READ_ACP`, `WRITE_ACP`, `FULL_CONTROL`. -* `uri` - (optional) Uri address to grant for. Used only when `type` is `Group`. +* `id` - (Optional) Canonical user id to grant for. Used only when `type` is `CanonicalUser`. +* `type` - (Required) Type of grantee to apply for. Valid values are `CanonicalUser` and `Group`. `AmazonCustomerByEmail` is not supported. +* `permissions` - (Required) List of permissions to apply for grantee. Valid values are `READ`, `WRITE`, `READ_ACP`, `WRITE_ACP`, `FULL_CONTROL`. +* `uri` - (Optional) Uri address to grant for. Used only when `type` is `Group`. ### Object Lock Configuration From 3ab5e359264c37285b18f3356a5a11e672ec878a Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 31 Mar 2022 20:40:04 +0000 Subject: [PATCH 04/42] Update CHANGELOG.md for #23798 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1743bebce68..4b602f1c543 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ ENHANCEMENTS: * resource/aws_iot_authorizer: Add `enable_caching_for_http` argument ([#23993](https://github.com/hashicorp/terraform-provider-aws/issues/23993)) * resource/aws_lambda_permission: Add `principal_org_id` argument. ([#24001](https://github.com/hashicorp/terraform-provider-aws/issues/24001)) * resource/aws_mq_broker: Add validation to `broker_name` and `security_groups` arguments ([#18088](https://github.com/hashicorp/terraform-provider-aws/issues/18088)) +* resource/aws_s3_bucket: Update `acl` and `grant` parameters to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring these parameters with the standalone `aws_s3_bucket_acl` resource. ([#23798](https://github.com/hashicorp/terraform-provider-aws/issues/23798)) * resource/aws_route: Add `core_network_arn` argument ([#24024](https://github.com/hashicorp/terraform-provider-aws/issues/24024)) * resource/aws_route_table: Add `core_network_arn` argument to the `route` configuration block ([#24024](https://github.com/hashicorp/terraform-provider-aws/issues/24024)) * resource/aws_vpc_ipam: add `cascade` argument ([#23973](https://github.com/hashicorp/terraform-provider-aws/issues/23973)) From e156790477ba4a8a5a360c24b8213ae139a9b99e Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Tue, 22 Mar 2022 21:04:00 -0400 Subject: [PATCH 05/42] r/s3_bucket: make 'acceleration_status' configurable --- internal/service/s3/bucket.go | 31 ++++++++-- .../bucket_accelerate_configuration_test.go | 60 +++++++++++++++++++ internal/service/s3/bucket_test.go | 47 +++++++++++++++ website/docs/r/s3_bucket.html.markdown | 7 ++- 4 files changed, 140 insertions(+), 5 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 837e6c5eb2e..92399b50b96 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -378,9 +378,11 @@ func ResourceBucket() *schema.Resource { }, "acceleration_status": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_accelerate_configuration resource instead", + Type: schema.TypeString, + Optional: true, + Computed: true, + Deprecated: "Use the aws_s3_bucket_accelerate_configuration resource instead", + ValidateFunc: validation.StringInSlice(s3.BucketAccelerateStatus_Values(), false), }, "request_payer": { @@ -762,6 +764,12 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } } + if d.HasChange("acceleration_status") { + if err := resourceBucketInternalAccelerationUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Acceleration Status: %w", d.Id(), err) + } + } + if d.HasChange("acl") && !d.IsNewResource() { if err := resourceBucketInternalACLUpdate(conn, d); err != nil { return fmt.Errorf("error updating S3 Bucket (%s) ACL: %w", d.Id(), err) @@ -1000,7 +1008,7 @@ func resourceBucketRead(d *schema.ResourceData, meta interface{}) error { // Amazon S3 Transfer Acceleration might not be supported in the region if err != nil && !tfawserr.ErrCodeEquals(err, ErrCodeMethodNotAllowed, ErrCodeUnsupportedArgument, ErrCodeNotImplemented) { - return fmt.Errorf("error getting S3 Bucket acceleration configuration: %w", err) + return fmt.Errorf("error getting S3 Bucket (%s) accelerate configuration: %w", d.Id(), err) } if accelerate, ok := accelerateResponse.(*s3.GetBucketAccelerateConfigurationOutput); ok { @@ -1482,6 +1490,21 @@ func normalizeRegion(region string) string { ////////////////////////////////////////// Argument-Specific Update Functions ////////////////////////////////////////// +func resourceBucketInternalAccelerationUpdate(conn *s3.S3, d *schema.ResourceData) error { + input := &s3.PutBucketAccelerateConfigurationInput{ + Bucket: aws.String(d.Id()), + AccelerateConfiguration: &s3.AccelerateConfiguration{ + Status: aws.String(d.Get("acceleration_status").(string)), + }, + } + + _, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.PutBucketAccelerateConfiguration(input) + }) + + return err +} + func resourceBucketInternalACLUpdate(conn *s3.S3, d *schema.ResourceData) error { acl := d.Get("acl").(string) if acl == "" { diff --git a/internal/service/s3/bucket_accelerate_configuration_test.go b/internal/service/s3/bucket_accelerate_configuration_test.go index aacc2f8c345..223c3c5047d 100644 --- a/internal/service/s3/bucket_accelerate_configuration_test.go +++ b/internal/service/s3/bucket_accelerate_configuration_test.go @@ -109,6 +109,66 @@ func TestAccS3BucketAccelerateConfiguration_disappears(t *testing.T) { }) } +func TestAccS3BucketAccelerateConfiguration_migrate_noChange(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_accelerate_configuration.test" + bucketResourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketAccelerateConfigurationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withAcceleration(rName, s3.BucketAccelerateStatusEnabled), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "acceleration_status", s3.BucketAccelerateStatusEnabled), + ), + }, + { + Config: testAccBucketAccelerateConfigurationBasicConfig(rName, s3.BucketAccelerateStatusEnabled), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketAccelerateConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "bucket", bucketResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "status", s3.BucketAccelerateStatusEnabled), + ), + }, + }, + }) +} + +func TestAccS3BucketAccelerateConfiguration_migrate_withChange(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_accelerate_configuration.test" + bucketResourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketAccelerateConfigurationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withAcceleration(rName, s3.BucketAccelerateStatusEnabled), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "acceleration_status", s3.BucketAccelerateStatusEnabled), + ), + }, + { + Config: testAccBucketAccelerateConfigurationBasicConfig(rName, s3.BucketAccelerateStatusSuspended), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketAccelerateConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "bucket", bucketResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "status", s3.BucketAccelerateStatusSuspended), + ), + }, + }, + }) +} + func testAccCheckBucketAccelerateConfigurationDestroy(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index b49919a8e8d..db39cc31a79 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -12,6 +12,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/cloudformation" + "github.com/aws/aws-sdk-go/service/cloudfront" "github.com/aws/aws-sdk-go/service/s3" "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" @@ -225,6 +226,43 @@ func TestAccS3Bucket_Basic_forceDestroyWithObjectLockEnabled(t *testing.T) { }) } +func TestAccS3Bucket_Basic_acceleration(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(t) + acctest.PreCheckPartitionHasService(cloudfront.EndpointsID, t) + }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withAcceleration(bucketName, s3.BucketAccelerateStatusEnabled), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "acceleration_status", s3.BucketAccelerateStatusEnabled), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccBucketConfig_withAcceleration(bucketName, s3.BucketAccelerateStatusSuspended), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "acceleration_status", s3.BucketAccelerateStatusSuspended), + ), + }, + }, + }) +} + // Test TestAccS3Bucket_disappears is designed to fail with a "plan // not empty" error in Terraform, to check against regressions. // See https://github.com/hashicorp/terraform/pull/2925 @@ -1275,6 +1313,15 @@ resource "aws_s3_bucket" "bucket" { `, bucketName) } +func testAccBucketConfig_withAcceleration(bucketName, acceleration string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acceleration_status = %[2]q +} +`, bucketName, acceleration) +} + func testAccBucketConfig_withACL(bucketName, acl string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "bucket" { diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index db6066aa6c1..4048ae5b871 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -12,6 +12,10 @@ Provides a S3 bucket resource. -> This functionality is for managing S3 in an AWS Partition. To manage [S3 on Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html), see the [`aws_s3control_bucket`](/docs/providers/aws/r/s3control_bucket.html) resource. +~> **NOTE on S3 Bucket Accelerate Configuration:** S3 Bucket Accelerate can be configured in either the standalone resource [`aws_s3_bucket_accelerate_configuration`](s3_bucket_accelerate_configuration.html) +or with the deprecated parameter `acceleration_status` in the resource `aws_s3_bucket`. +Configuring with both will cause inconsistencies and may overwrite configuration. + ~> **NOTE on S3 Bucket canned ACL Configuration:** S3 Bucket canned ACL can be configured in either the standalone resource [`aws_s3_bucket_acl`](s3_bucket_acl.html.markdown) or with the deprecated parameter `acl` in the resource `aws_s3_bucket`. Configuring with both will cause inconsistencies and may overwrite configuration. @@ -113,6 +117,8 @@ The following arguments are supported: * `bucket` - (Optional, Forces new resource) The name of the bucket. If omitted, Terraform will assign a random, unique name. Must be lowercase and less than or equal to 63 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). * `bucket_prefix` - (Optional, Forces new resource) Creates a unique bucket name beginning with the specified prefix. Conflicts with `bucket`. Must be lowercase and less than or equal to 37 characters in length. A full list of bucket naming rules [may be found here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html). +* `acceleration_status` - (Optional, **Deprecated**) Sets the accelerate configuration of an existing bucket. Can be `Enabled` or `Suspended`. Cannot be used in `cn-north-1` or `us-gov-west-1`. Terraform will only perform drift detection if a configuration value is provided. + Use the resource [`aws_s3_bucket_accelerate_configuration`](s3_bucket_accelerate_configuration.html) instead. * `acl` - (Optional, **Deprecated**) The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, and `log-delivery-write`. Defaults to `private`. Conflicts with `grant`. Terraform will only perform drift detection if a configuration value is provided. Use the resource [`aws_s3_bucket_acl`](s3_bucket_acl.html.markdown) instead. * `grant` - (Optional, **Deprecated**) An [ACL policy grant](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#sample-acl). See [Grant](#grant) below for details. Conflicts with `acl`. Terraform will only perform drift detection if a configuration value is provided. Use the resource [`aws_s3_bucket_acl`](s3_bucket_acl.html.markdown) instead. * `force_destroy` - (Optional, Default:`false`) A boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket so that the bucket can be destroyed without error. These objects are *not* recoverable. @@ -147,7 +153,6 @@ The `object_lock_configuration` configuration block supports the following argum In addition to all arguments above, the following attributes are exported: * `id` - The name of the bucket. -* `acceleration_status` - (Optional) The accelerate configuration status of the bucket. Not available in `cn-north-1` or `us-gov-west-1`. * `arn` - The ARN of the bucket. Will be of format `arn:aws:s3:::bucketname`. * `bucket_domain_name` - The bucket domain name. Will be of format `bucketname.s3.amazonaws.com`. * `bucket_regional_domain_name` - The bucket region-specific domain name. The bucket domain name including the region name, please refer [here](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) for format. Note: The AWS CloudFront allows specifying S3 region-specific endpoint when creating S3 origin, it will prevent [redirect issues](https://forums.aws.amazon.com/thread.jspa?threadID=216814) from CloudFront to S3 Origin URL. From d4025da83a65ed8877d55b2d6f1ee7afd632f619 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Tue, 22 Mar 2022 21:06:37 -0400 Subject: [PATCH 06/42] Update CHANGELOG for #23816 --- .changelog/23816.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/23816.txt diff --git a/.changelog/23816.txt b/.changelog/23816.txt new file mode 100644 index 00000000000..ccec53a7cd9 --- /dev/null +++ b/.changelog/23816.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_s3_bucket: Update `acceleration_status` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_accelerate_configuration` resource. +``` \ No newline at end of file From 1fdfe2e8b7c2db427f7a761a2c73cfc3d7686c2e Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 31 Mar 2022 21:10:03 +0000 Subject: [PATCH 07/42] Update CHANGELOG.md for #23816 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b602f1c543..74921e46767 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ ENHANCEMENTS: * resource/aws_iot_authorizer: Add `enable_caching_for_http` argument ([#23993](https://github.com/hashicorp/terraform-provider-aws/issues/23993)) * resource/aws_lambda_permission: Add `principal_org_id` argument. ([#24001](https://github.com/hashicorp/terraform-provider-aws/issues/24001)) * resource/aws_mq_broker: Add validation to `broker_name` and `security_groups` arguments ([#18088](https://github.com/hashicorp/terraform-provider-aws/issues/18088)) +* resource/aws_s3_bucket: Update `acceleration_status` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_accelerate_configuration` resource. ([#23816](https://github.com/hashicorp/terraform-provider-aws/issues/23816)) * resource/aws_s3_bucket: Update `acl` and `grant` parameters to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring these parameters with the standalone `aws_s3_bucket_acl` resource. ([#23798](https://github.com/hashicorp/terraform-provider-aws/issues/23798)) * resource/aws_route: Add `core_network_arn` argument ([#24024](https://github.com/hashicorp/terraform-provider-aws/issues/24024)) * resource/aws_route_table: Add `core_network_arn` argument to the `route` configuration block ([#24024](https://github.com/hashicorp/terraform-provider-aws/issues/24024)) From 6ffc28d22358fe572a9779fd68ad5980f5e9b166 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Tue, 22 Mar 2022 21:46:06 -0400 Subject: [PATCH 08/42] r/s3_bucket: make 'cors_rule' configurable --- internal/service/s3/bucket.go | 106 ++++++-- .../s3/bucket_cors_configuration_test.go | 120 +++++++++ internal/service/s3/bucket_test.go | 234 ++++++++++++++++++ website/docs/r/s3_bucket.html.markdown | 42 +++- 4 files changed, 475 insertions(+), 27 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 92399b50b96..d0c9fb82917 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -128,38 +128,34 @@ func ResourceBucket() *schema.Resource { "cors_rule": { Type: schema.TypeList, + Optional: true, Computed: true, Deprecated: "Use the aws_s3_bucket_cors_configuration resource instead", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "allowed_headers": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_cors_configuration resource instead", - Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, "allowed_methods": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_cors_configuration resource instead", - Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, "allowed_origins": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_cors_configuration resource instead", - Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, "expose_headers": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_cors_configuration resource instead", - Elem: &schema.Schema{Type: schema.TypeString}, + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, "max_age_seconds": { - Type: schema.TypeInt, - Computed: true, - Deprecated: "Use the aws_s3_bucket_cors_configuration resource instead", + Type: schema.TypeInt, + Optional: true, }, }, }, @@ -776,6 +772,12 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } } + if d.HasChange("cors_rule") { + if err := resourceBucketInternalCorsUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) CORS Rules: %w", d.Id(), err) + } + } + if d.HasChange("grant") { if err := resourceBucketInternalGrantsUpdate(conn, d); err != nil { return fmt.Errorf("error updating S3 Bucket (%s) Grants: %w", d.Id(), err) @@ -1524,6 +1526,72 @@ func resourceBucketInternalACLUpdate(conn *s3.S3, d *schema.ResourceData) error return err } +func resourceBucketInternalCorsUpdate(conn *s3.S3, d *schema.ResourceData) error { + rawCors := d.Get("cors_rule").([]interface{}) + + if len(rawCors) == 0 { + // Delete CORS + _, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.DeleteBucketCors(&s3.DeleteBucketCorsInput{ + Bucket: aws.String(d.Id()), + }) + }) + + if err != nil { + return fmt.Errorf("error deleting S3 Bucket (%s) CORS: %w", d.Id(), err) + } + + return nil + } + // Put CORS + rules := make([]*s3.CORSRule, 0, len(rawCors)) + for _, cors := range rawCors { + // Prevent panic + // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/7546 + corsMap, ok := cors.(map[string]interface{}) + if !ok { + continue + } + r := &s3.CORSRule{} + for k, v := range corsMap { + if k == "max_age_seconds" { + r.MaxAgeSeconds = aws.Int64(int64(v.(int))) + } else { + vMap := make([]*string, len(v.([]interface{}))) + for i, vv := range v.([]interface{}) { + if str, ok := vv.(string); ok { + vMap[i] = aws.String(str) + } + } + switch k { + case "allowed_headers": + r.AllowedHeaders = vMap + case "allowed_methods": + r.AllowedMethods = vMap + case "allowed_origins": + r.AllowedOrigins = vMap + case "expose_headers": + r.ExposeHeaders = vMap + } + } + } + rules = append(rules, r) + } + + input := &s3.PutBucketCorsInput{ + Bucket: aws.String(d.Id()), + CORSConfiguration: &s3.CORSConfiguration{ + CORSRules: rules, + }, + } + + _, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.PutBucketCors(input) + }) + + return err +} + func resourceBucketInternalGrantsUpdate(conn *s3.S3, d *schema.ResourceData) error { grants := d.Get("grant").(*schema.Set) diff --git a/internal/service/s3/bucket_cors_configuration_test.go b/internal/service/s3/bucket_cors_configuration_test.go index 5e94468b56c..0143c8771e4 100644 --- a/internal/service/s3/bucket_cors_configuration_test.go +++ b/internal/service/s3/bucket_cors_configuration_test.go @@ -219,6 +219,89 @@ func TestAccS3BucketCorsConfiguration_MultipleRules(t *testing.T) { }) } +func TestAccS3BucketCorsConfiguration_migrate_corsRuleNoChange(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucketResourceName := "aws_s3_bucket.test" + resourceName := "aws_s3_bucket_cors_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withCORS(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "cors_rule.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "cors_rule.0.allowed_headers.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "cors_rule.0.allowed_methods.#", "2"), + resource.TestCheckResourceAttr(bucketResourceName, "cors_rule.0.allowed_origins.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "cors_rule.0.expose_headers.#", "2"), + resource.TestCheckResourceAttr(bucketResourceName, "cors_rule.0.max_age_seconds", "3000"), + ), + }, + { + Config: testAccBucketCorsConfigurationConfig_Migrate_CorsRuleNoChange(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketCorsConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "bucket", bucketResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "cors_rule.*", map[string]string{ + "allowed_headers.#": "1", + "allowed_methods.#": "2", + "allowed_origins.#": "1", + "expose_headers.#": "2", + "max_age_seconds": "3000", + }), + ), + }, + }, + }) +} + +func TestAccS3BucketCorsConfiguration_migrate_corsRuleWithChange(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucketResourceName := "aws_s3_bucket.test" + resourceName := "aws_s3_bucket_cors_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withCORS(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "cors_rule.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "cors_rule.0.allowed_headers.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "cors_rule.0.allowed_methods.#", "2"), + resource.TestCheckResourceAttr(bucketResourceName, "cors_rule.0.allowed_origins.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "cors_rule.0.expose_headers.#", "2"), + resource.TestCheckResourceAttr(bucketResourceName, "cors_rule.0.max_age_seconds", "3000"), + ), + }, + { + Config: testAccBucketCorsConfigurationConfig_Migrate_CorsRuleWithChange(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketCorsConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "bucket", bucketResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "cors_rule.*", map[string]string{ + "allowed_methods.#": "1", + "allowed_origins.#": "1", + }), + resource.TestCheckTypeSetElemAttr(resourceName, "cors_rule.*.allowed_methods.*", "PUT"), + resource.TestCheckTypeSetElemAttr(resourceName, "cors_rule.*.allowed_origins.*", "https://www.example.com"), + ), + }, + }, + }) +} + func testAccCheckBucketCorsConfigurationDestroy(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn @@ -360,3 +443,40 @@ resource "aws_s3_bucket_cors_configuration" "test" { } `, rName) } + +func testAccBucketCorsConfigurationConfig_Migrate_CorsRuleNoChange(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_bucket_cors_configuration" "test" { + bucket = aws_s3_bucket.test.id + + cors_rule { + allowed_headers = ["*"] + allowed_methods = ["PUT", "POST"] + allowed_origins = ["https://www.example.com"] + expose_headers = ["x-amz-server-side-encryption", "ETag"] + max_age_seconds = 3000 + } +} +`, rName) +} + +func testAccBucketCorsConfigurationConfig_Migrate_CorsRuleWithChange(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_bucket_cors_configuration" "test" { + bucket = aws_s3_bucket.test.id + + cors_rule { + allowed_methods = ["PUT"] + allowed_origins = ["https://www.example.com"] + } +} +`, rName) +} diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index db39cc31a79..7c9ca3171e4 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -798,6 +798,195 @@ func TestAccS3Bucket_Security_grantToACL(t *testing.T) { }) } +func TestAccS3Bucket_Security_corsUpdate(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + updateBucketCors := func(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn + _, err := conn.PutBucketCors(&s3.PutBucketCorsInput{ + Bucket: aws.String(rs.Primary.ID), + CORSConfiguration: &s3.CORSConfiguration{ + CORSRules: []*s3.CORSRule{ + { + AllowedHeaders: []*string{aws.String("*")}, + AllowedMethods: []*string{aws.String("GET")}, + AllowedOrigins: []*string{aws.String("https://www.example.com")}, + }, + }, + }, + }) + if err != nil && !tfawserr.ErrCodeEquals(err, tfs3.ErrCodeNoSuchCORSConfiguration) { + return err + } + return nil + } + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withCORS(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "cors_rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_headers.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_headers.0", "*"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_methods.#", "2"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_methods.0", "PUT"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_methods.1", "POST"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_origins.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_origins.0", "https://www.example.com"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.expose_headers.#", "2"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.expose_headers.0", "x-amz-server-side-encryption"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.expose_headers.1", "ETag"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.max_age_seconds", "3000"), + updateBucketCors(resourceName), + ), + ExpectNonEmptyPlan: true, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + { + Config: testAccBucketConfig_withCORS(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "cors_rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_headers.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_headers.0", "*"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_methods.#", "2"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_methods.0", "PUT"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_methods.1", "POST"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_origins.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_origins.0", "https://www.example.com"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.expose_headers.#", "2"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.expose_headers.0", "x-amz-server-side-encryption"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.expose_headers.1", "ETag"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.max_age_seconds", "3000"), + ), + }, + }, + }) +} + +func TestAccS3Bucket_Security_corsDelete(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + deleteBucketCors := func(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn + _, err := conn.DeleteBucketCors(&s3.DeleteBucketCorsInput{ + Bucket: aws.String(rs.Primary.ID), + }) + if err != nil && !tfawserr.ErrCodeEquals(err, tfs3.ErrCodeNoSuchCORSConfiguration) { + return err + } + return nil + } + } + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withCORS(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + deleteBucketCors(resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccS3Bucket_Security_corsEmptyOrigin(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withCORSEmptyOrigin(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "cors_rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_headers.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_headers.0", "*"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_methods.#", "2"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_methods.0", "PUT"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_methods.1", "POST"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_origins.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_origins.0", ""), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.expose_headers.#", "2"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.expose_headers.0", "x-amz-server-side-encryption"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.expose_headers.1", "ETag"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.max_age_seconds", "3000"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccS3Bucket_Security_corsSingleMethodAndEmptyOrigin(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withCORSSingleMethodAndEmptyOrigin(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + func TestBucketName(t *testing.T) { validDnsNames := []string{ "foobar", @@ -1331,6 +1520,51 @@ resource "aws_s3_bucket" "bucket" { `, bucketName, acl) } +func testAccBucketConfig_withCORS(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + cors_rule { + allowed_headers = ["*"] + allowed_methods = ["PUT", "POST"] + allowed_origins = ["https://www.example.com"] + expose_headers = ["x-amz-server-side-encryption", "ETag"] + max_age_seconds = 3000 + } +} +`, bucketName) +} + +func testAccBucketConfig_withCORSSingleMethodAndEmptyOrigin(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + cors_rule { + allowed_methods = ["GET"] + allowed_origins = [""] + } +} +`, bucketName) +} + +func testAccBucketConfig_withCORSEmptyOrigin(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + cors_rule { + allowed_headers = ["*"] + allowed_methods = ["PUT", "POST"] + allowed_origins = [""] + expose_headers = ["x-amz-server-side-encryption", "ETag"] + max_age_seconds = 3000 + } +} +`, bucketName) +} + func testAccBucketConfig_withGrants(bucketName string) string { return fmt.Sprintf(` data "aws_canonical_user_id" "current" {} diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 4048ae5b871..f896105f4ce 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -24,6 +24,10 @@ Configuring with both will cause inconsistencies and may overwrite configuration or with the deprecated parameter `grant` in the resource `aws_s3_bucket`. Configuring with both will cause inconsistencies and may overwrite configuration. +~> **NOTE on S3 Bucket CORS Configuration:** S3 Bucket CORS can be configured in either the standalone resource [`aws_s3_bucket_cors_configuration`](s3_bucket_cors_configuration.html.markdown) +or with the deprecated parameter `cors_rule` in the resource `aws_s3_bucket`. +Configuring with both will cause inconsistencies and may overwrite configuration. + ## Example Usage ### Private Bucket w/ Tags @@ -51,8 +55,23 @@ See the [`aws_s3_bucket_website_configuration` resource](s3_bucket_website_confi ### Using CORS -The `cors_rule` argument is read-only as of version 4.0 of the Terraform AWS Provider. -See the [`aws_s3_bucket_cors_configuration` resource](s3_bucket_cors_configuration.html.markdown) for configuration details. +-> **NOTE:** The parameter `cors_rule` is deprecated. +Use the resource [`aws_s3_bucket_cors_configuration`](s3_bucket_cors_configuration.html.markdown) instead. + +```terraform +resource "aws_s3_bucket" "b" { + bucket = "s3-website-test.hashicorp.com" + acl = "public-read" + + cors_rule { + allowed_headers = ["*"] + allowed_methods = ["PUT", "POST"] + allowed_origins = ["https://s3-website-test.hashicorp.com"] + expose_headers = ["ETag"] + max_age_seconds = 3000 + } +} +``` ### Using versioning @@ -121,11 +140,24 @@ The following arguments are supported: Use the resource [`aws_s3_bucket_accelerate_configuration`](s3_bucket_accelerate_configuration.html) instead. * `acl` - (Optional, **Deprecated**) The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Valid values are `private`, `public-read`, `public-read-write`, `aws-exec-read`, `authenticated-read`, and `log-delivery-write`. Defaults to `private`. Conflicts with `grant`. Terraform will only perform drift detection if a configuration value is provided. Use the resource [`aws_s3_bucket_acl`](s3_bucket_acl.html.markdown) instead. * `grant` - (Optional, **Deprecated**) An [ACL policy grant](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#sample-acl). See [Grant](#grant) below for details. Conflicts with `acl`. Terraform will only perform drift detection if a configuration value is provided. Use the resource [`aws_s3_bucket_acl`](s3_bucket_acl.html.markdown) instead. +* `cors_rule` - (Optional, **Deprecated**) A rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). See [CORS rule](#cors-rule) below for details. Terraform will only perform drift detection if a configuration value is provided. Use the resource [`aws_s3_bucket_cors_configuration`](s3_bucket_cors_configuration.html.markdown) instead. * `force_destroy` - (Optional, Default:`false`) A boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket so that the bucket can be destroyed without error. These objects are *not* recoverable. * `object_lock_enabled` - (Optional, Default:`false`, Forces new resource) Indicates whether this bucket has an Object Lock configuration enabled. * `object_lock_configuration` - (Optional) A configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). See [Object Lock Configuration](#object-lock-configuration) below. * `tags` - (Optional) A map of tags to assign to the bucket. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +### CORS Rule + +~> **NOTE:** Currently, changes to the `cors_rule` configuration of _existing_ resources cannot be automatically detected by Terraform. To manage changes of CORS rules to an S3 bucket, use the `aws_s3_bucket_cors_configuration` resource instead. If you use `cors_rule` on an `aws_s3_bucket`, Terraform will assume management over the full set of CORS rules for the S3 bucket, treating additional CORS rules as drift. For this reason, `cors_rule` cannot be mixed with the external `aws_s3_bucket_cors_configuration` resource for a given S3 bucket. + +The `cors_rule` configuration block supports the following arguments: + +* `allowed_headers` - (Optional) List of headers allowed. +* `allowed_methods` - (Required) One or more HTTP methods that you allow the origin to execute. Can be `GET`, `PUT`, `POST`, `DELETE` or `HEAD`. +* `allowed_origins` - (Required) One or more origins you want customers to be able to access the bucket from. +* `expose_headers` - (Optional) One or more headers in the response that you want customers to be able to access from their applications (for example, from a JavaScript `XMLHttpRequest` object). +* `max_age_seconds` - (Optional) Specifies time in seconds that browser can cache the response for a preflight request. + ### Grant ~> **NOTE:** Currently, changes to the `grant` configuration of _existing_ resources cannot be automatically detected by Terraform. To manage changes of ACL grants to an S3 bucket, use the `aws_s3_bucket_acl` resource instead. If you use `grant` on an `aws_s3_bucket`, Terraform will assume management over the full set of ACL grants for the S3 bucket, treating additional ACL grants as drift. For this reason, `grant` cannot be mixed with the external `aws_s3_bucket_acl` resource for a given S3 bucket. @@ -156,12 +188,6 @@ In addition to all arguments above, the following attributes are exported: * `arn` - The ARN of the bucket. Will be of format `arn:aws:s3:::bucketname`. * `bucket_domain_name` - The bucket domain name. Will be of format `bucketname.s3.amazonaws.com`. * `bucket_regional_domain_name` - The bucket region-specific domain name. The bucket domain name including the region name, please refer [here](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) for format. Note: The AWS CloudFront allows specifying S3 region-specific endpoint when creating S3 origin, it will prevent [redirect issues](https://forums.aws.amazon.com/thread.jspa?threadID=216814) from CloudFront to S3 Origin URL. -* `cors_rule` - Set of origins and methods ([cross-origin](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) access allowed). - * `allowed_headers` - Set of headers that are specified in the Access-Control-Request-Headers header. - * `allowed_methods` - Set of HTTP methods that the origin is allowed to execute. - * `allowed_origins` - Set of origins customers are able to access the bucket from. - * `expose_headers` - Set of headers in the response that customers are able to access from their applications. - * `max_age_seconds` The time in seconds that browser can cache the response for a preflight request. * `hosted_zone_id` - The [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region. * `lifecycle_rule` - A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). * `id` - Unique identifier for the rule. From 2e172197f567036d202772bc52049c076672044f Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Tue, 22 Mar 2022 21:51:28 -0400 Subject: [PATCH 09/42] Update CHANGELOG for #23817 --- .changelog/23817.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/23817.txt diff --git a/.changelog/23817.txt b/.changelog/23817.txt new file mode 100644 index 00000000000..9fe2bc08546 --- /dev/null +++ b/.changelog/23817.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_s3_bucket: Update `cors_rule` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_cors_configuration` resource. +``` \ No newline at end of file From 764b18d2458dd39672104a113cb0f5bf4b5641ca Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 31 Mar 2022 21:51:51 +0000 Subject: [PATCH 10/42] Update CHANGELOG.md for #23817 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 74921e46767..0250b830320 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,7 @@ ENHANCEMENTS: * resource/aws_mq_broker: Add validation to `broker_name` and `security_groups` arguments ([#18088](https://github.com/hashicorp/terraform-provider-aws/issues/18088)) * resource/aws_s3_bucket: Update `acceleration_status` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_accelerate_configuration` resource. ([#23816](https://github.com/hashicorp/terraform-provider-aws/issues/23816)) * resource/aws_s3_bucket: Update `acl` and `grant` parameters to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring these parameters with the standalone `aws_s3_bucket_acl` resource. ([#23798](https://github.com/hashicorp/terraform-provider-aws/issues/23798)) +* resource/aws_s3_bucket: Update `cors_rule` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_cors_configuration` resource. ([#23817](https://github.com/hashicorp/terraform-provider-aws/issues/23817)) * resource/aws_route: Add `core_network_arn` argument ([#24024](https://github.com/hashicorp/terraform-provider-aws/issues/24024)) * resource/aws_route_table: Add `core_network_arn` argument to the `route` configuration block ([#24024](https://github.com/hashicorp/terraform-provider-aws/issues/24024)) * resource/aws_vpc_ipam: add `cascade` argument ([#23973](https://github.com/hashicorp/terraform-provider-aws/issues/23973)) From 7f08096739a049415612f330f45a9e4dee60b491 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Tue, 22 Mar 2022 23:01:21 -0400 Subject: [PATCH 11/42] r/s3_bucket: make 'lifecycle_rule' configurable --- internal/service/s3/bucket.go | 297 +++++++++---- internal/service/s3/bucket_acl_test.go | 20 +- .../s3/bucket_lifecycle_configuration_test.go | 137 ++++++ internal/service/s3/bucket_test.go | 416 ++++++++++++++++-- internal/service/s3/validate.go | 55 +++ website/docs/r/s3_bucket.html.markdown | 149 ++++++- 6 files changed, 929 insertions(+), 145 deletions(-) create mode 100644 internal/service/s3/validate.go diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index d0c9fb82917..231d4d46400 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -7,7 +7,6 @@ import ( "log" "net/http" "net/url" - "regexp" "strings" "time" @@ -257,108 +256,104 @@ func ResourceBucket() *schema.Resource { "lifecycle_rule": { Type: schema.TypeList, + Optional: true, Computed: true, Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "id": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringLenBetween(0, 255), }, "prefix": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeString, + Optional: true, }, - "tags": tftags.TagsSchemaComputedDeprecated("Use the aws_s3_bucket_lifecycle_configuration resource instead"), + "tags": tftags.TagsSchema(), "enabled": { - Type: schema.TypeBool, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeBool, + Required: true, }, "abort_incomplete_multipart_upload_days": { - Type: schema.TypeInt, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeInt, + Optional: true, }, "expiration": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeList, + Optional: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "date": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validBucketLifecycleTimestamp, }, "days": { - Type: schema.TypeInt, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), }, "expired_object_delete_marker": { - Type: schema.TypeBool, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeBool, + Optional: true, }, }, }, }, "noncurrent_version_expiration": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeList, + MaxItems: 1, + Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "days": { - Type: schema.TypeInt, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(1), }, }, }, }, "transition": { - Type: schema.TypeSet, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeSet, + Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "date": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validBucketLifecycleTimestamp, }, "days": { - Type: schema.TypeInt, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), }, "storage_class": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.TransitionStorageClass_Values(), false), }, }, }, }, "noncurrent_version_transition": { - Type: schema.TypeSet, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeSet, + Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "days": { - Type: schema.TypeInt, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), }, "storage_class": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.TransitionStorageClass_Values(), false), }, }, }, @@ -784,6 +779,12 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } } + if d.HasChange("lifecycle_rule") { + if err := resourceBucketInternalLifecycleUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Lifecycle Rules: %w", d.Id(), err) + } + } + if d.HasChange("object_lock_configuration") { if err := resourceBucketInternalObjectLockConfigurationUpdate(conn, d); err != nil { return fmt.Errorf("error updating S3 Bucket (%s) Object Lock configuration: %w", d.Id(), err) @@ -1375,40 +1376,6 @@ func BucketRegionalDomainName(bucket string, region string) (string, error) { return fmt.Sprintf("%s.%s", bucket, strings.TrimPrefix(endpoint.URL, "https://")), nil } -// ValidBucketName validates any S3 bucket name that is not inside the us-east-1 region. -// Buckets outside of this region have to be DNS-compliant. After the same restrictions are -// applied to buckets in the us-east-1 region, this function can be refactored as a SchemaValidateFunc -func ValidBucketName(value string, region string) error { - if region != endpoints.UsEast1RegionID { - if (len(value) < 3) || (len(value) > 63) { - return fmt.Errorf("%q must contain from 3 to 63 characters", value) - } - if !regexp.MustCompile(`^[0-9a-z-.]+$`).MatchString(value) { - return fmt.Errorf("only lowercase alphanumeric characters and hyphens allowed in %q", value) - } - if regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`).MatchString(value) { - return fmt.Errorf("%q must not be formatted as an IP address", value) - } - if strings.HasPrefix(value, `.`) { - return fmt.Errorf("%q cannot start with a period", value) - } - if strings.HasSuffix(value, `.`) { - return fmt.Errorf("%q cannot end with a period", value) - } - if strings.Contains(value, `..`) { - return fmt.Errorf("%q can be only one period between labels", value) - } - } else { - if len(value) > 255 { - return fmt.Errorf("%q must contain less than 256 characters", value) - } - if !regexp.MustCompile(`^[0-9a-zA-Z-._]+$`).MatchString(value) { - return fmt.Errorf("only alphanumeric characters, hyphens, periods, and underscores allowed in %q", value) - } - } - return nil -} - type S3Website struct { Endpoint, Domain string } @@ -1636,6 +1603,162 @@ func resourceBucketInternalGrantsUpdate(conn *s3.S3, d *schema.ResourceData) err return err } +func resourceBucketInternalLifecycleUpdate(conn *s3.S3, d *schema.ResourceData) error { + lifecycleRules := d.Get("lifecycle_rule").([]interface{}) + + if len(lifecycleRules) == 0 || lifecycleRules[0] == nil { + input := &s3.DeleteBucketLifecycleInput{ + Bucket: aws.String(d.Id()), + } + + _, err := conn.DeleteBucketLifecycle(input) + + if err != nil { + return fmt.Errorf("error removing S3 Bucket (%s) lifecycle: %w", d.Id(), err) + } + + return nil + } + + rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules)) + + for i, lifecycleRule := range lifecycleRules { + r := lifecycleRule.(map[string]interface{}) + + rule := &s3.LifecycleRule{} + + // Filter + tags := Tags(tftags.New(r["tags"]).IgnoreAWS()) + filter := &s3.LifecycleRuleFilter{} + if len(tags) > 0 { + lifecycleRuleAndOp := &s3.LifecycleRuleAndOperator{} + lifecycleRuleAndOp.SetPrefix(r["prefix"].(string)) + lifecycleRuleAndOp.SetTags(tags) + filter.SetAnd(lifecycleRuleAndOp) + } else { + filter.SetPrefix(r["prefix"].(string)) + } + rule.SetFilter(filter) + + // ID + if val, ok := r["id"].(string); ok && val != "" { + rule.ID = aws.String(val) + } else { + rule.ID = aws.String(resource.PrefixedUniqueId("tf-s3-lifecycle-")) + } + + // Enabled + if val, ok := r["enabled"].(bool); ok && val { + rule.Status = aws.String(s3.ExpirationStatusEnabled) + } else { + rule.Status = aws.String(s3.ExpirationStatusDisabled) + } + + // AbortIncompleteMultipartUpload + if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 { + rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{ + DaysAfterInitiation: aws.Int64(int64(val)), + } + } + + // Expiration + expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).([]interface{}) + if len(expiration) > 0 && expiration[0] != nil { + e := expiration[0].(map[string]interface{}) + i := &s3.LifecycleExpiration{} + if val, ok := e["date"].(string); ok && val != "" { + t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) + if err != nil { + return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) + } + i.Date = aws.Time(t) + } else if val, ok := e["days"].(int); ok && val > 0 { + i.Days = aws.Int64(int64(val)) + } else if val, ok := e["expired_object_delete_marker"].(bool); ok { + i.ExpiredObjectDeleteMarker = aws.Bool(val) + } + rule.Expiration = i + } + + // NoncurrentVersionExpiration + nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).([]interface{}) + if len(nc_expiration) > 0 && nc_expiration[0] != nil { + e := nc_expiration[0].(map[string]interface{}) + + if val, ok := e["days"].(int); ok && val > 0 { + rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{ + NoncurrentDays: aws.Int64(int64(val)), + } + } + } + + // Transitions + transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.transition", i)).(*schema.Set).List() + if len(transitions) > 0 { + rule.Transitions = make([]*s3.Transition, 0, len(transitions)) + for _, transition := range transitions { + transition := transition.(map[string]interface{}) + i := &s3.Transition{} + if val, ok := transition["date"].(string); ok && val != "" { + t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) + if err != nil { + return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) + } + i.Date = aws.Time(t) + } else if val, ok := transition["days"].(int); ok && val >= 0 { + i.Days = aws.Int64(int64(val)) + } + if val, ok := transition["storage_class"].(string); ok && val != "" { + i.StorageClass = aws.String(val) + } + + rule.Transitions = append(rule.Transitions, i) + } + } + // NoncurrentVersionTransitions + nc_transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_transition", i)).(*schema.Set).List() + if len(nc_transitions) > 0 { + rule.NoncurrentVersionTransitions = make([]*s3.NoncurrentVersionTransition, 0, len(nc_transitions)) + for _, transition := range nc_transitions { + transition := transition.(map[string]interface{}) + i := &s3.NoncurrentVersionTransition{} + if val, ok := transition["days"].(int); ok && val >= 0 { + i.NoncurrentDays = aws.Int64(int64(val)) + } + if val, ok := transition["storage_class"].(string); ok && val != "" { + i.StorageClass = aws.String(val) + } + + rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, i) + } + } + + // As a lifecycle rule requires 1 or more transition/expiration actions, + // we explicitly pass a default ExpiredObjectDeleteMarker value to be able to create + // the rule while keeping the policy unaffected if the conditions are not met. + if rule.Expiration == nil && rule.NoncurrentVersionExpiration == nil && + rule.Transitions == nil && rule.NoncurrentVersionTransitions == nil && + rule.AbortIncompleteMultipartUpload == nil { + rule.Expiration = &s3.LifecycleExpiration{ExpiredObjectDeleteMarker: aws.Bool(false)} + } + + rules = append(rules, rule) + } + + input := &s3.PutBucketLifecycleConfigurationInput{ + Bucket: aws.String(d.Id()), + LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ + Rules: rules, + }, + } + + _, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.PutBucketLifecycleConfiguration(input) + }) + + return err +} + func resourceBucketInternalObjectLockConfigurationUpdate(conn *s3.S3, d *schema.ResourceData) error { // S3 Object Lock configuration cannot be deleted, only updated. req := &s3.PutObjectLockConfigurationInput{ diff --git a/internal/service/s3/bucket_acl_test.go b/internal/service/s3/bucket_acl_test.go index 2a07172d9a5..e3825bceebd 100644 --- a/internal/service/s3/bucket_acl_test.go +++ b/internal/service/s3/bucket_acl_test.go @@ -305,7 +305,7 @@ func TestAccS3BucketAcl_disappears(t *testing.T) { func TestAccS3BucketAcl_migrate_aclNoChange(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - bucketResourceName := "aws_s3_bucket.bucket" + bucketResourceName := "aws_s3_bucket.test" resourceName := "aws_s3_bucket_acl.test" resource.ParallelTest(t, resource.TestCase{ @@ -334,7 +334,7 @@ func TestAccS3BucketAcl_migrate_aclNoChange(t *testing.T) { func TestAccS3BucketAcl_migrate_aclWithChange(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - bucketResourceName := "aws_s3_bucket.bucket" + bucketResourceName := "aws_s3_bucket.test" resourceName := "aws_s3_bucket_acl.test" resource.ParallelTest(t, resource.TestCase{ @@ -363,7 +363,7 @@ func TestAccS3BucketAcl_migrate_aclWithChange(t *testing.T) { func TestAccS3BucketAcl_migrate_grantsNoChange(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - bucketResourceName := "aws_s3_bucket.bucket" + bucketResourceName := "aws_s3_bucket.test" resourceName := "aws_s3_bucket_acl.test" resource.ParallelTest(t, resource.TestCase{ @@ -412,7 +412,7 @@ func TestAccS3BucketAcl_migrate_grantsNoChange(t *testing.T) { func TestAccS3BucketAcl_migrate_grantsWithChange(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - bucketResourceName := "aws_s3_bucket.bucket" + bucketResourceName := "aws_s3_bucket.test" resourceName := "aws_s3_bucket_acl.test" resource.ParallelTest(t, resource.TestCase{ @@ -755,12 +755,12 @@ resource "aws_s3_bucket_acl" "test" { func testAccBucketAcl_Migrate_AclConfig(rName, acl string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q } resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.bucket.id + bucket = aws_s3_bucket.test.id acl = %[2]q } `, rName, acl) @@ -770,12 +770,12 @@ func testAccBucketAcl_Migrate_GrantsNoChangeConfig(rName string) string { return fmt.Sprintf(` data "aws_canonical_user_id" "current" {} -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q } resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.bucket.id + bucket = aws_s3_bucket.test.id access_control_policy { grant { grantee { @@ -807,12 +807,12 @@ data "aws_canonical_user_id" "current" {} data "aws_partition" "current" {} -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q } resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.bucket.id + bucket = aws_s3_bucket.test.id access_control_policy { grant { grantee { diff --git a/internal/service/s3/bucket_lifecycle_configuration_test.go b/internal/service/s3/bucket_lifecycle_configuration_test.go index c7633061ce2..1d75237e10a 100644 --- a/internal/service/s3/bucket_lifecycle_configuration_test.go +++ b/internal/service/s3/bucket_lifecycle_configuration_test.go @@ -842,6 +842,89 @@ func TestAccS3BucketLifecycleConfiguration_EmptyFilter_NonCurrentVersions(t *tes }, }) } +func TestAccS3BucketLifecycleConfiguration_migrate_noChange(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_lifecycle_configuration.test" + bucketResourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withLifecycleExpireMarker(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.id", "id1"), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.enabled", "true"), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.prefix", "path1/"), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.expiration.0.days", "0"), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.expiration.0.date", ""), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.expiration.0.expired_object_delete_marker", "true"), + ), + }, + { + Config: testAccBucketLifecycleConfiguration_Migrate_NoChangeConfig(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketLifecycleConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "bucket", bucketResourceName, "bucket"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.id", "id1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.status", "Enabled"), + resource.TestCheckResourceAttr(resourceName, "rule.0.prefix", "path1/"), + resource.TestCheckResourceAttr(resourceName, "rule.0.expiration.0.days", "0"), + resource.TestCheckResourceAttr(resourceName, "rule.0.expiration.0.date", ""), + resource.TestCheckResourceAttr(resourceName, "rule.0.expiration.0.expired_object_delete_marker", "true"), + ), + }, + }, + }) +} + +func TestAccS3BucketLifecycleConfiguration_migrate_withChange(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_lifecycle_configuration.test" + bucketResourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withLifecycleExpireMarker(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.id", "id1"), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.enabled", "true"), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.prefix", "path1/"), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.expiration.0.days", "0"), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.expiration.0.date", ""), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.expiration.0.expired_object_delete_marker", "true"), + ), + }, + { + Config: testAccBucketLifecycleConfiguration_Migrate_WithChangeConfig(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketLifecycleConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "bucket", bucketResourceName, "bucket"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.id", "id1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.status", "Disabled"), + resource.TestCheckResourceAttr(resourceName, "rule.0.prefix", "path1/"), + resource.TestCheckResourceAttr(resourceName, "rule.0.expiration.0.days", "0"), + resource.TestCheckResourceAttr(resourceName, "rule.0.expiration.0.date", ""), + resource.TestCheckResourceAttr(resourceName, "rule.0.expiration.0.expired_object_delete_marker", "false"), + ), + }, + }, + }) +} // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/23884 func TestAccS3BucketLifecycleConfiguration_Update_filterWithAndToFilterWithPrefix(t *testing.T) { @@ -1683,3 +1766,57 @@ resource "aws_s3_bucket_lifecycle_configuration" "test" { } }`, rName, prefix) } + +func testAccBucketLifecycleConfiguration_Migrate_NoChangeConfig(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_bucket_acl" "test" { + bucket = aws_s3_bucket.test.id + acl = "private" +} + +resource "aws_s3_bucket_lifecycle_configuration" "test" { + bucket = aws_s3_bucket.test.bucket + + rule { + id = "id1" + prefix = "path1/" + status = "Enabled" + + expiration { + expired_object_delete_marker = true + } + } +} +`, rName) +} + +func testAccBucketLifecycleConfiguration_Migrate_WithChangeConfig(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_bucket_acl" "test" { + bucket = aws_s3_bucket.test.id + acl = "private" +} + +resource "aws_s3_bucket_lifecycle_configuration" "test" { + bucket = aws_s3_bucket.test.bucket + + rule { + id = "id1" + prefix = "path1/" + status = "Disabled" + + expiration { + expired_object_delete_marker = false + } + } +} +`, rName) +} diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index 7c9ca3171e4..e8703a97a24 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -43,7 +43,7 @@ func TestAccS3Bucket_Basic_basic(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") region := acctest.Region() hostedZoneID, _ := tfs3.HostedZoneIDForRegion(region) - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -159,7 +159,7 @@ func TestAccS3Bucket_Basic_namePrefix(t *testing.T) { } func TestAccS3Bucket_Basic_forceDestroy(t *testing.T) { - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") resource.ParallelTest(t, resource.TestCase{ @@ -185,7 +185,7 @@ func TestAccS3Bucket_Basic_forceDestroy(t *testing.T) { // to not contain these extra slashes, out-of-band handling and other AWS // services may create keys with extra slashes (empty "directory" prefixes). func TestAccS3Bucket_Basic_forceDestroyWithEmptyPrefixes(t *testing.T) { - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") resource.ParallelTest(t, resource.TestCase{ @@ -206,7 +206,7 @@ func TestAccS3Bucket_Basic_forceDestroyWithEmptyPrefixes(t *testing.T) { } func TestAccS3Bucket_Basic_forceDestroyWithObjectLockEnabled(t *testing.T) { - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") resource.ParallelTest(t, resource.TestCase{ @@ -268,7 +268,7 @@ func TestAccS3Bucket_Basic_acceleration(t *testing.T) { // See https://github.com/hashicorp/terraform/pull/2925 func TestAccS3Bucket_disappears(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -312,7 +312,7 @@ func TestAccS3Bucket_Tags_basic(t *testing.T) { } func TestAccS3Bucket_Tags_withNoSystemTags(t *testing.T) { - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") resource.ParallelTest(t, resource.TestCase{ @@ -371,7 +371,7 @@ func TestAccS3Bucket_Tags_withNoSystemTags(t *testing.T) { } func TestAccS3Bucket_Tags_withSystemTags(t *testing.T) { - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") var stackID string @@ -456,7 +456,7 @@ func TestAccS3Bucket_Tags_withSystemTags(t *testing.T) { } func TestAccS3Bucket_Tags_ignoreTags(t *testing.T) { - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ @@ -500,6 +500,211 @@ func TestAccS3Bucket_Tags_ignoreTags(t *testing.T) { }) } +func TestAccS3Bucket_Manage_lifecycleBasic(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withLifecycle(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.#", "6"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.id", "id1"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.prefix", "path1/"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.days", "365"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.date", ""), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.expired_object_delete_marker", "false"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.transition.*", map[string]string{ + "date": "", + "days": "30", + "storage_class": "STANDARD_IA", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.transition.*", map[string]string{ + "date": "", + "days": "60", + "storage_class": "INTELLIGENT_TIERING", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.transition.*", map[string]string{ + "date": "", + "days": "90", + "storage_class": "ONEZONE_IA", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.transition.*", map[string]string{ + "date": "", + "days": "120", + "storage_class": "GLACIER", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.transition.*", map[string]string{ + "date": "", + "days": "210", + "storage_class": "DEEP_ARCHIVE", + }), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.id", "id2"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.prefix", "path2/"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.expiration.0.date", "2016-01-12"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.expiration.0.days", "0"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.expiration.0.expired_object_delete_marker", "false"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.2.id", "id3"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.2.prefix", "path3/"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.2.transition.*", map[string]string{ + "days": "0", + }), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.3.id", "id4"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.3.prefix", "path4/"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.3.tags.tagKey", "tagValue"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.3.tags.terraform", "hashicorp"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.4.id", "id5"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.4.tags.tagKey", "tagValue"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.4.tags.terraform", "hashicorp"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.4.transition.*", map[string]string{ + "days": "0", + "storage_class": "GLACIER", + }), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.5.id", "id6"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.5.tags.tagKey", "tagValue"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.5.transition.*", map[string]string{ + "days": "0", + "storage_class": "GLACIER", + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + { + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + ), + }, + }, + }) +} + +func TestAccS3Bucket_Manage_lifecycleExpireMarkerOnly(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withLifecycleExpireMarker(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.id", "id1"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.prefix", "path1/"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.days", "0"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.date", ""), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.expired_object_delete_marker", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + { + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + ), + }, + }, + }) +} + +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/11420 +func TestAccS3Bucket_Manage_lifecycleRuleExpirationEmptyBlock(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withLifecycleRuleExpirationEmptyConfigurationBlock(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + ), + }, + }, + }) +} + +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/15138 +func TestAccS3Bucket_Manage_lifecycleRuleAbortIncompleteMultipartUploadDaysNoExpiration(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withLifecycleRuleAbortIncompleteMultipartUploadDays(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccS3Bucket_Manage_lifecycleRemove(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withLifecycle(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.#", "6"), + ), + }, + { + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + // As Lifecycle Rule is a Computed field, removing them from terraform will not + // trigger an update to remove them from the S3 bucket. + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.#", "6"), + ), + }, + }, + }) +} + func TestAccS3Bucket_Manage_objectLock(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") resourceName := "aws_s3_bucket.test" @@ -647,7 +852,7 @@ func TestAccS3Bucket_Manage_objectLockWithVersioning_deprecatedEnabled(t *testin func TestAccS3Bucket_Security_updateACL(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -681,7 +886,7 @@ func TestAccS3Bucket_Security_updateACL(t *testing.T) { func TestAccS3Bucket_Security_updateGrant(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -741,7 +946,7 @@ func TestAccS3Bucket_Security_updateGrant(t *testing.T) { func TestAccS3Bucket_Security_aclToGrant(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -771,7 +976,7 @@ func TestAccS3Bucket_Security_aclToGrant(t *testing.T) { func TestAccS3Bucket_Security_grantToACL(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -1496,7 +1701,7 @@ func testAccCheckBucketCheckTags(n string, expectedTags map[string]string) resou func testAccBucketConfig_Basic(bucketName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q } `, bucketName) @@ -1513,7 +1718,7 @@ resource "aws_s3_bucket" "test" { func testAccBucketConfig_withACL(bucketName, acl string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q acl = %[2]q } @@ -1569,7 +1774,7 @@ func testAccBucketConfig_withGrants(bucketName string) string { return fmt.Sprintf(` data "aws_canonical_user_id" "current" {} -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q grant { @@ -1585,7 +1790,7 @@ func testAccBucketConfig_withUpdatedGrants(bucketName string) string { return fmt.Sprintf(` data "aws_canonical_user_id" "current" {} -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q grant { @@ -1603,15 +1808,172 @@ resource "aws_s3_bucket" "bucket" { `, bucketName) } +func testAccBucketConfig_withLifecycle(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "private" + + lifecycle_rule { + id = "id1" + prefix = "path1/" + enabled = true + + expiration { + days = 365 + } + + transition { + days = 30 + storage_class = "STANDARD_IA" + } + + transition { + days = 60 + storage_class = "INTELLIGENT_TIERING" + } + + transition { + days = 90 + storage_class = "ONEZONE_IA" + } + + transition { + days = 120 + storage_class = "GLACIER" + } + + transition { + days = 210 + storage_class = "DEEP_ARCHIVE" + } + } + + lifecycle_rule { + id = "id2" + prefix = "path2/" + enabled = true + + expiration { + date = "2016-01-12" + } + } + + lifecycle_rule { + id = "id3" + prefix = "path3/" + enabled = true + + transition { + days = 0 + storage_class = "GLACIER" + } + } + + lifecycle_rule { + id = "id4" + prefix = "path4/" + enabled = true + + tags = { + "tagKey" = "tagValue" + "terraform" = "hashicorp" + } + + expiration { + date = "2016-01-12" + } + } + + lifecycle_rule { + id = "id5" + enabled = true + + tags = { + "tagKey" = "tagValue" + "terraform" = "hashicorp" + } + + transition { + days = 0 + storage_class = "GLACIER" + } + } + + lifecycle_rule { + id = "id6" + enabled = true + + tags = { + "tagKey" = "tagValue" + } + + transition { + days = 0 + storage_class = "GLACIER" + } + } +} +`, bucketName) +} + +func testAccBucketConfig_withLifecycleExpireMarker(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "private" + + lifecycle_rule { + id = "id1" + prefix = "path1/" + enabled = true + + expiration { + expired_object_delete_marker = "true" + } + } +} +`, bucketName) +} + +func testAccBucketConfig_withLifecycleRuleExpirationEmptyConfigurationBlock(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + lifecycle_rule { + enabled = true + id = "id1" + + expiration {} + } +} +`, rName) +} + +func testAccBucketConfig_withLifecycleRuleAbortIncompleteMultipartUploadDays(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + lifecycle_rule { + abort_incomplete_multipart_upload_days = 7 + enabled = true + id = "id1" + } +} +`, rName) +} + func testAccBucketConfig_withNoTags(bucketName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q force_destroy = false } resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.bucket.id + bucket = aws_s3_bucket.test.id acl = "private" } `, bucketName) @@ -1619,7 +1981,7 @@ resource "aws_s3_bucket_acl" "test" { func testAccBucketConfig_withTags(bucketName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q force_destroy = false @@ -1631,7 +1993,7 @@ resource "aws_s3_bucket" "bucket" { } resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.bucket.id + bucket = aws_s3_bucket.test.id acl = "private" } `, bucketName) @@ -1639,7 +2001,7 @@ resource "aws_s3_bucket_acl" "test" { func testAccBucketConfig_withUpdatedTags(bucketName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q force_destroy = false @@ -1652,7 +2014,7 @@ resource "aws_s3_bucket" "bucket" { } resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.bucket.id + bucket = aws_s3_bucket.test.id acl = "private" } `, bucketName) @@ -1824,13 +2186,13 @@ resource "aws_s3_bucket_versioning" "test" { func testAccBucketConfig_forceDestroy(bucketName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = "%s" force_destroy = true } resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.bucket.id + bucket = aws_s3_bucket.test.id acl = "private" } `, bucketName) @@ -1838,7 +2200,7 @@ resource "aws_s3_bucket_acl" "test" { func testAccBucketConfig_forceDestroyWithObjectLockEnabled(bucketName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = "%s" force_destroy = true @@ -1846,12 +2208,12 @@ resource "aws_s3_bucket" "bucket" { } resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.bucket.id + bucket = aws_s3_bucket.test.id acl = "private" } resource "aws_s3_bucket_versioning" "bucket" { - bucket = aws_s3_bucket.bucket.id + bucket = aws_s3_bucket.test.id versioning_configuration { status = "Enabled" } diff --git a/internal/service/s3/validate.go b/internal/service/s3/validate.go new file mode 100644 index 00000000000..0c5ce27b36a --- /dev/null +++ b/internal/service/s3/validate.go @@ -0,0 +1,55 @@ +package s3 + +import ( + "fmt" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// ValidBucketName validates any S3 bucket name that is not inside the us-east-1 region. +// Buckets outside of this region have to be DNS-compliant. After the same restrictions are +// applied to buckets in the us-east-1 region, this function can be refactored as a SchemaValidateFunc +func ValidBucketName(value string, region string) error { + if region != endpoints.UsEast1RegionID { + if (len(value) < 3) || (len(value) > 63) { + return fmt.Errorf("%q must contain from 3 to 63 characters", value) + } + if !regexp.MustCompile(`^[0-9a-z-.]+$`).MatchString(value) { + return fmt.Errorf("only lowercase alphanumeric characters and hyphens allowed in %q", value) + } + if regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`).MatchString(value) { + return fmt.Errorf("%q must not be formatted as an IP address", value) + } + if strings.HasPrefix(value, `.`) { + return fmt.Errorf("%q cannot start with a period", value) + } + if strings.HasSuffix(value, `.`) { + return fmt.Errorf("%q cannot end with a period", value) + } + if strings.Contains(value, `..`) { + return fmt.Errorf("%q can be only one period between labels", value) + } + } else { + if len(value) > 255 { + return fmt.Errorf("%q must contain less than 256 characters", value) + } + if !regexp.MustCompile(`^[0-9a-zA-Z-._]+$`).MatchString(value) { + return fmt.Errorf("only alphanumeric characters, hyphens, periods, and underscores allowed in %q", value) + } + } + return nil +} + +func validBucketLifecycleTimestamp(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + _, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", value)) + if err != nil { + errors = append(errors, fmt.Errorf( + "%q cannot be parsed as RFC3339 Timestamp Format", value)) + } + + return +} diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index f896105f4ce..3d0c3d5b2f1 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -28,6 +28,10 @@ Configuring with both will cause inconsistencies and may overwrite configuration or with the deprecated parameter `cors_rule` in the resource `aws_s3_bucket`. Configuring with both will cause inconsistencies and may overwrite configuration. +~> **NOTE on S3 Bucket Lifecycle Configuration:** S3 Bucket Lifecycle can be configured in either the standalone resource [`aws_s3_bucket_lifecycle_configuration`](s3_bucket_lifecycle_configuration.html) +or with the deprecated parameter `lifecycle_rule` in the resource `aws_s3_bucket`. +Configuring with both will cause inconsistencies and may overwrite configuration. + ## Example Usage ### Private Bucket w/ Tags @@ -85,8 +89,81 @@ See the [`aws_s3_bucket_logging` resource](s3_bucket_logging.html.markdown) for ### Using object lifecycle -The `lifecycle_rule` argument is read-only as of version 4.0 of the Terraform AWS Provider. -See the [`aws_s3_bucket_lifecycle_configuration` resource](s3_bucket_lifecycle_configuration.html.markdown) for configuration details. +### Using object lifecycle + +-> **NOTE:** The parameter `lifecycle_rule` is deprecated. +Use the resource [`aws_s3_bucket_lifecycle_configuration`](s3_bucket_lifecycle_configuration.html) instead. + +```terraform +resource "aws_s3_bucket" "bucket" { + bucket = "my-bucket" + acl = "private" + + lifecycle_rule { + id = "log" + enabled = true + + prefix = "log/" + + tags = { + rule = "log" + autoclean = "true" + } + + transition { + days = 30 + storage_class = "STANDARD_IA" # or "ONEZONE_IA" + } + + transition { + days = 60 + storage_class = "GLACIER" + } + + expiration { + days = 90 + } + } + + lifecycle_rule { + id = "tmp" + prefix = "tmp/" + enabled = true + + expiration { + date = "2016-01-12" + } + } +} + +resource "aws_s3_bucket" "versioning_bucket" { + bucket = "my-versioning-bucket" + acl = "private" + + versioning { + enabled = true + } + + lifecycle_rule { + prefix = "config/" + enabled = true + + noncurrent_version_transition { + days = 30 + storage_class = "STANDARD_IA" + } + + noncurrent_version_transition { + days = 60 + storage_class = "GLACIER" + } + + noncurrent_version_expiration { + days = 90 + } + } +} +``` ### Using object lock configuration @@ -142,6 +219,8 @@ The following arguments are supported: * `grant` - (Optional, **Deprecated**) An [ACL policy grant](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#sample-acl). See [Grant](#grant) below for details. Conflicts with `acl`. Terraform will only perform drift detection if a configuration value is provided. Use the resource [`aws_s3_bucket_acl`](s3_bucket_acl.html.markdown) instead. * `cors_rule` - (Optional, **Deprecated**) A rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). See [CORS rule](#cors-rule) below for details. Terraform will only perform drift detection if a configuration value is provided. Use the resource [`aws_s3_bucket_cors_configuration`](s3_bucket_cors_configuration.html.markdown) instead. * `force_destroy` - (Optional, Default:`false`) A boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket so that the bucket can be destroyed without error. These objects are *not* recoverable. +* `lifecycle_rule` - (Optional, **Deprecated**) A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). See [Lifecycle Rule](#lifecycle-rule) below for details. Terraform will only perform drift detection if a configuration value is provided. + Use the resource [`aws_s3_bucket_lifecycle_configuration`](s3_bucket_lifecycle_configuration.html) instead. * `object_lock_enabled` - (Optional, Default:`false`, Forces new resource) Indicates whether this bucket has an Object Lock configuration enabled. * `object_lock_configuration` - (Optional) A configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). See [Object Lock Configuration](#object-lock-configuration) below. * `tags` - (Optional) A map of tags to assign to the bucket. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -169,6 +248,53 @@ The `grant` configuration block supports the following arguments: * `permissions` - (Required) List of permissions to apply for grantee. Valid values are `READ`, `WRITE`, `READ_ACP`, `WRITE_ACP`, `FULL_CONTROL`. * `uri` - (Optional) Uri address to grant for. Used only when `type` is `Group`. +### Lifecycle Rule + +~> **NOTE:** Currently, changes to the `lifecycle_rule` configuration of _existing_ resources cannot be automatically detected by Terraform. To manage changes of Lifecycle rules to an S3 bucket, use the `aws_s3_bucket_lifecycle_configuration` resource instead. If you use `lifecycle_rule` on an `aws_s3_bucket`, Terraform will assume management over the full set of Lifecycle rules for the S3 bucket, treating additional Lifecycle rules as drift. For this reason, `lifecycle_rule` cannot be mixed with the external `aws_s3_bucket_lifecycle_configuration` resource for a given S3 bucket. + +~> **NOTE:** At least one of `abort_incomplete_multipart_upload_days`, `expiration`, `transition`, `noncurrent_version_expiration`, `noncurrent_version_transition` must be specified. + +The `lifecycle_rule` configuration block supports the following arguments: + +* `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. +* `prefix` - (Optional) Object key prefix identifying one or more objects to which the rule applies. +* `tags` - (Optional) Specifies object tags key and value. +* `enabled` - (Required) Specifies lifecycle rule status. +* `abort_incomplete_multipart_upload_days` (Optional) Specifies the number of days after initiating a multipart upload when the multipart upload must be completed. +* `expiration` - (Optional) Specifies a period in the object's expire. See [Expiration](#expiration) below for details. +* `transition` - (Optional) Specifies a period in the object's transitions. See [Transition](#transition) below for details. +* `noncurrent_version_expiration` - (Optional) Specifies when noncurrent object versions expire. See [Noncurrent Version Expiration](#noncurrent-version-expiration) below for details. +* `noncurrent_version_transition` - (Optional) Specifies when noncurrent object versions transitions. See [Noncurrent Version Transition](#noncurrent-version-transition) below for details. + +### Expiration + +The `expiration` configuration block supports the following arguments: + +* `date` - (Optional) Specifies the date after which you want the corresponding action to take effect. +* `days` - (Optional) Specifies the number of days after object creation when the specific rule action takes effect. +* `expired_object_delete_marker` - (Optional) On a versioned bucket (versioning-enabled or versioning-suspended bucket), you can add this element in the lifecycle configuration to direct Amazon S3 to delete expired object delete markers. This cannot be specified with Days or Date in a Lifecycle Expiration Policy. + +### Transition + +The `transition` configuration block supports the following arguments: + +* `date` - (Optional) Specifies the date after which you want the corresponding action to take effect. +* `days` - (Optional) Specifies the number of days after object creation when the specific rule action takes effect. +* `storage_class` - (Required) Specifies the Amazon S3 [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Transition.html#AmazonS3-Type-Transition-StorageClass) to which you want the object to transition. + +### Noncurrent Version Expiration + +The `noncurrent_version_expiration` configuration block supports the following arguments: + +* `days` - (Required) Specifies the number of days noncurrent object versions expire. + +### Noncurrent Version Transition + +The `noncurrent_version_transition` configuration supports the following arguments: + +* `days` - (Required) Specifies the number of days noncurrent object versions transition. +* `storage_class` - (Required) Specifies the Amazon S3 [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Transition.html#AmazonS3-Type-Transition-StorageClass) to which you want the object to transition. + ### Object Lock Configuration ~> **NOTE:** You can only **enable** S3 Object Lock for **new** buckets. If you need to **enable** S3 Object Lock for an **existing** bucket, please contact AWS Support. @@ -189,25 +315,6 @@ In addition to all arguments above, the following attributes are exported: * `bucket_domain_name` - The bucket domain name. Will be of format `bucketname.s3.amazonaws.com`. * `bucket_regional_domain_name` - The bucket region-specific domain name. The bucket domain name including the region name, please refer [here](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) for format. Note: The AWS CloudFront allows specifying S3 region-specific endpoint when creating S3 origin, it will prevent [redirect issues](https://forums.aws.amazon.com/thread.jspa?threadID=216814) from CloudFront to S3 Origin URL. * `hosted_zone_id` - The [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region. -* `lifecycle_rule` - A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). - * `id` - Unique identifier for the rule. - * `prefix` - Object key prefix identifying one or more objects to which the rule applies. - * `tags` - Object tags key and value. - * `enabled` - Lifecycle rule status. - * `abort_incomplete_multipart_upload_days` - Number of days after initiating a multipart upload when the multipart upload must be completed. - * `expiration` - The expiration for the lifecycle of the object in the form of date, days and, whether the object has a delete marker. - * `date` - Indicates at what date the object is to be moved or deleted. - * `days` - Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer. - * `expired_object_delete_marker` - Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. - * `transition` - Specifies when an Amazon S3 object transitions to a specified storage class. - * `date` - The date after which you want the corresponding action to take effect. - * `days` - The number of days after object creation when the specific rule action takes effect. - * `storage_class` - The Amazon S3 [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Transition.html#AmazonS3-Type-Transition-StorageClass) an object will transition to. - * `noncurrent_version_expiration` - When noncurrent object versions expire. - * `days` - The number of days noncurrent object versions expire. - * `noncurrent_version_transition` - When noncurrent object versions transition. - * `days` - The number of days noncurrent object versions transition. - * `storage_class` - The Amazon S3 [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Transition.html#AmazonS3-Type-Transition-StorageClass) an object will transition to. * `logging` - The [logging parameters](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) for the bucket. * `target_bucket` - The name of the bucket that receives the log objects. * `target_prefix` - The prefix for all log object keys/ From 6fc046347d1e395491921c65c794759b620cfe8b Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Tue, 22 Mar 2022 23:01:28 -0400 Subject: [PATCH 12/42] Update CHANGELOG for #23818 --- .changelog/23818.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/23818.txt diff --git a/.changelog/23818.txt b/.changelog/23818.txt new file mode 100644 index 00000000000..63e822ed575 --- /dev/null +++ b/.changelog/23818.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_s3_bucket: Update `lifecycle_rule` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_lifecycle_configuration` resource. +``` \ No newline at end of file From 121463332b4cada6066882a1b49d946a03c9150a Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 31 Mar 2022 23:01:26 +0000 Subject: [PATCH 13/42] Update CHANGELOG.md for #23818 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0250b830320..e2af326df5d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -36,6 +36,7 @@ ENHANCEMENTS: * resource/aws_s3_bucket: Update `acceleration_status` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_accelerate_configuration` resource. ([#23816](https://github.com/hashicorp/terraform-provider-aws/issues/23816)) * resource/aws_s3_bucket: Update `acl` and `grant` parameters to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring these parameters with the standalone `aws_s3_bucket_acl` resource. ([#23798](https://github.com/hashicorp/terraform-provider-aws/issues/23798)) * resource/aws_s3_bucket: Update `cors_rule` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_cors_configuration` resource. ([#23817](https://github.com/hashicorp/terraform-provider-aws/issues/23817)) +* resource/aws_s3_bucket: Update `lifecycle_rule` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_lifecycle_configuration` resource. ([#23818](https://github.com/hashicorp/terraform-provider-aws/issues/23818)) * resource/aws_route: Add `core_network_arn` argument ([#24024](https://github.com/hashicorp/terraform-provider-aws/issues/24024)) * resource/aws_route_table: Add `core_network_arn` argument to the `route` configuration block ([#24024](https://github.com/hashicorp/terraform-provider-aws/issues/24024)) * resource/aws_vpc_ipam: add `cascade` argument ([#23973](https://github.com/hashicorp/terraform-provider-aws/issues/23973)) From f00ccb9200152259721122bd424276c5f2237fdc Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 31 Mar 2022 19:12:14 -0400 Subject: [PATCH 14/42] r/s3_bucket: make 'logging' configurable --- internal/service/s3/bucket.go | 50 ++++++++++-- internal/service/s3/bucket_logging_test.go | 93 ++++++++++++++++++++++ internal/service/s3/bucket_test.go | 48 +++++++++++ website/docs/r/s3_bucket.html.markdown | 39 +++++++-- 4 files changed, 217 insertions(+), 13 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 231d4d46400..f597462022e 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -235,20 +235,20 @@ func ResourceBucket() *schema.Resource { }, "logging": { - Type: schema.TypeSet, + Type: schema.TypeList, + Optional: true, Computed: true, + MaxItems: 1, Deprecated: "Use the aws_s3_bucket_logging resource instead", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "target_bucket": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_logging resource instead", + Type: schema.TypeString, + Required: true, }, "target_prefix": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_logging resource instead", + Type: schema.TypeString, + Optional: true, }, }, }, @@ -785,6 +785,12 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } } + if d.HasChange("logging") { + if err := resourceBucketInternalLoggingUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Logging: %w", d.Id(), err) + } + } + if d.HasChange("object_lock_configuration") { if err := resourceBucketInternalObjectLockConfigurationUpdate(conn, d); err != nil { return fmt.Errorf("error updating S3 Bucket (%s) Object Lock configuration: %w", d.Id(), err) @@ -1759,6 +1765,36 @@ func resourceBucketInternalLifecycleUpdate(conn *s3.S3, d *schema.ResourceData) return err } +func resourceBucketInternalLoggingUpdate(conn *s3.S3, d *schema.ResourceData) error { + logging := d.Get("logging").([]interface{}) + loggingStatus := &s3.BucketLoggingStatus{} + + if len(logging) > 0 { + c := logging[0].(map[string]interface{}) + + loggingEnabled := &s3.LoggingEnabled{} + if val, ok := c["target_bucket"].(string); ok { + loggingEnabled.TargetBucket = aws.String(val) + } + if val, ok := c["target_prefix"].(string); ok { + loggingEnabled.TargetPrefix = aws.String(val) + } + + loggingStatus.LoggingEnabled = loggingEnabled + } + + input := &s3.PutBucketLoggingInput{ + Bucket: aws.String(d.Id()), + BucketLoggingStatus: loggingStatus, + } + + _, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.PutBucketLogging(input) + }) + + return err +} + func resourceBucketInternalObjectLockConfigurationUpdate(conn *s3.S3, d *schema.ResourceData) error { // S3 Object Lock configuration cannot be deleted, only updated. req := &s3.PutObjectLockConfigurationInput{ diff --git a/internal/service/s3/bucket_logging_test.go b/internal/service/s3/bucket_logging_test.go index f59581a6802..25ca6487892 100644 --- a/internal/service/s3/bucket_logging_test.go +++ b/internal/service/s3/bucket_logging_test.go @@ -298,6 +298,70 @@ func TestAccS3BucketLogging_TargetGrantByGroup(t *testing.T) { }) } +func TestAccS3BucketLogging_migrate_loggingNoChange(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucketResourceName := "aws_s3_bucket.test" + resourceName := "aws_s3_bucket_logging.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withLogging(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "logging.#", "1"), + resource.TestCheckResourceAttrPair(bucketResourceName, "logging.0.target_bucket", "aws_s3_bucket.log_bucket", "id"), + resource.TestCheckResourceAttr(bucketResourceName, "logging.0.target_prefix", "log/"), + ), + }, + { + Config: testAccBucketLogging_Migrate_LoggingConfig(bucketName, "log/"), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketLoggingExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "target_bucket", "aws_s3_bucket.log_bucket", "id"), + resource.TestCheckResourceAttr(resourceName, "target_prefix", "log/"), + ), + }, + }, + }) +} + +func TestAccS3BucketLogging_migrate_loggingWithChange(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucketResourceName := "aws_s3_bucket.test" + resourceName := "aws_s3_bucket_logging.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withLogging(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "logging.#", "1"), + resource.TestCheckResourceAttrPair(bucketResourceName, "logging.0.target_bucket", "aws_s3_bucket.log_bucket", "id"), + resource.TestCheckResourceAttr(bucketResourceName, "logging.0.target_prefix", "log/"), + ), + }, + { + Config: testAccBucketLogging_Migrate_LoggingConfig(bucketName, "tmp/"), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketLoggingExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "target_bucket", "aws_s3_bucket.log_bucket", "id"), + resource.TestCheckResourceAttr(resourceName, "target_prefix", "tmp/"), + ), + }, + }, + }) +} + func testAccCheckBucketLoggingDestroy(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn @@ -561,3 +625,32 @@ resource "aws_s3_bucket_logging" "test" { } `, rName, permission) } + +func testAccBucketLogging_Migrate_LoggingConfig(rName, targetPrefix string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "log_bucket" { + bucket = "%[1]s-log" +} + +resource "aws_s3_bucket_acl" "log_bucket_acl" { + bucket = aws_s3_bucket.log_bucket.id + acl = "log-delivery-write" +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_bucket_acl" "test" { + bucket = aws_s3_bucket.test.id + acl = "private" +} + +resource "aws_s3_bucket_logging" "test" { + bucket = aws_s3_bucket.test.id + + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = %[2]q +} +`, rName, targetPrefix) +} diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index e8703a97a24..4d5f393c74b 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -1192,6 +1192,35 @@ func TestAccS3Bucket_Security_corsSingleMethodAndEmptyOrigin(t *testing.T) { }) } +func TestAccS3Bucket_Security_logging(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withLogging(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "logging.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "logging.0.target_bucket", "aws_s3_bucket.log_bucket", "id"), + resource.TestCheckResourceAttr(resourceName, "logging.0.target_prefix", "log/"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + func TestBucketName(t *testing.T) { validDnsNames := []string{ "foobar", @@ -1965,6 +1994,25 @@ resource "aws_s3_bucket" "test" { `, rName) } +func testAccBucketConfig_withLogging(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "log_bucket" { + bucket = "%[1]s-log" + acl = "log-delivery-write" +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "private" + + logging { + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" + } +} +`, bucketName) +} + func testAccBucketConfig_withNoTags(bucketName string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "test" { diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 3d0c3d5b2f1..27c0a7cff9f 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -32,6 +32,10 @@ Configuring with both will cause inconsistencies and may overwrite configuration or with the deprecated parameter `lifecycle_rule` in the resource `aws_s3_bucket`. Configuring with both will cause inconsistencies and may overwrite configuration. +~> **NOTE on S3 Bucket Logging Configuration:** S3 Bucket logging can be configured in either the standalone resource [`aws_s3_bucket_logging`](s3_bucket_logging.html.markdown) +or with the deprecated parameter `logging` in the resource `aws_s3_bucket`. +Configuring with both will cause inconsistencies and may overwrite configuration. + ## Example Usage ### Private Bucket w/ Tags @@ -84,10 +88,25 @@ See the [`aws_s3_bucket_versioning` resource](s3_bucket_versioning.html.markdown ### Enable Logging -The `logging` argument is read-only as of version 4.0 of the Terraform AWS Provider. -See the [`aws_s3_bucket_logging` resource](s3_bucket_logging.html.markdown) for configuration details. +-> **NOTE:** The parameter `logging` is deprecated. +Use the resource [`aws_s3_bucket_logging`](s3_bucket_logging.html.markdown) instead. -### Using object lifecycle +```terraform +resource "aws_s3_bucket" "log_bucket" { + bucket = "my-tf-log-bucket" + acl = "log-delivery-write" +} + +resource "aws_s3_bucket" "b" { + bucket = "my-tf-test-bucket" + acl = "private" + + logging { + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" + } +} +``` ### Using object lifecycle @@ -221,6 +240,8 @@ The following arguments are supported: * `force_destroy` - (Optional, Default:`false`) A boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket so that the bucket can be destroyed without error. These objects are *not* recoverable. * `lifecycle_rule` - (Optional, **Deprecated**) A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). See [Lifecycle Rule](#lifecycle-rule) below for details. Terraform will only perform drift detection if a configuration value is provided. Use the resource [`aws_s3_bucket_lifecycle_configuration`](s3_bucket_lifecycle_configuration.html) instead. +* `logging` - (Optional, **Deprecated**) A configuration of [S3 bucket logging](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) parameters. See [Logging](#logging) below for details. Terraform will only perform drift detection if a configuration value is provided. + Use the resource [`aws_s3_bucket_logging`](s3_bucket_logging.html.markdown) instead. * `object_lock_enabled` - (Optional, Default:`false`, Forces new resource) Indicates whether this bucket has an Object Lock configuration enabled. * `object_lock_configuration` - (Optional) A configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). See [Object Lock Configuration](#object-lock-configuration) below. * `tags` - (Optional) A map of tags to assign to the bucket. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -295,6 +316,15 @@ The `noncurrent_version_transition` configuration supports the following argumen * `days` - (Required) Specifies the number of days noncurrent object versions transition. * `storage_class` - (Required) Specifies the Amazon S3 [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Transition.html#AmazonS3-Type-Transition-StorageClass) to which you want the object to transition. +### Logging + +~> **NOTE:** Currently, changes to the `logging` configuration of _existing_ resources cannot be automatically detected by Terraform. To manage changes of logging parameters to an S3 bucket, use the `aws_s3_bucket_logging` resource instead. If you use `logging` on an `aws_s3_bucket`, Terraform will assume management over the full set of logging parameters for the S3 bucket, treating additional logging parameters as drift. For this reason, `logging` cannot be mixed with the external `aws_s3_bucket_logging` resource for a given S3 bucket. + +The `logging` configuration block supports the following arguments: + +* `target_bucket` - (Required) The name of the bucket that will receive the log objects. +* `target_prefix` - (Optional) To specify a key prefix for log objects. + ### Object Lock Configuration ~> **NOTE:** You can only **enable** S3 Object Lock for **new** buckets. If you need to **enable** S3 Object Lock for an **existing** bucket, please contact AWS Support. @@ -315,9 +345,6 @@ In addition to all arguments above, the following attributes are exported: * `bucket_domain_name` - The bucket domain name. Will be of format `bucketname.s3.amazonaws.com`. * `bucket_regional_domain_name` - The bucket region-specific domain name. The bucket domain name including the region name, please refer [here](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) for format. Note: The AWS CloudFront allows specifying S3 region-specific endpoint when creating S3 origin, it will prevent [redirect issues](https://forums.aws.amazon.com/thread.jspa?threadID=216814) from CloudFront to S3 Origin URL. * `hosted_zone_id` - The [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region. -* `logging` - The [logging parameters](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) for the bucket. - * `target_bucket` - The name of the bucket that receives the log objects. - * `target_prefix` - The prefix for all log object keys/ * `object_lock_configuration` - The [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) configuration. * `rule` - The Object Lock rule in place for this bucket. * `default_retention` - The default retention period applied to new objects placed in this bucket. From 2cf464c772c921c9ba93699a1bf77dd2d27546f4 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 31 Mar 2022 19:12:26 -0400 Subject: [PATCH 15/42] Update CHANGELOG for #23819 --- .changelog/23819.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/23819.txt diff --git a/.changelog/23819.txt b/.changelog/23819.txt new file mode 100644 index 00000000000..c60b34caefa --- /dev/null +++ b/.changelog/23819.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_s3_bucket: Update `logging` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_logging` resource. +``` \ No newline at end of file From ed0c5f349d6c6154498054c0cf911ab84a629eb0 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 31 Mar 2022 23:31:14 +0000 Subject: [PATCH 16/42] Update CHANGELOG.md for #23819 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e2af326df5d..2a4bb23b5e6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -37,6 +37,7 @@ ENHANCEMENTS: * resource/aws_s3_bucket: Update `acl` and `grant` parameters to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring these parameters with the standalone `aws_s3_bucket_acl` resource. ([#23798](https://github.com/hashicorp/terraform-provider-aws/issues/23798)) * resource/aws_s3_bucket: Update `cors_rule` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_cors_configuration` resource. ([#23817](https://github.com/hashicorp/terraform-provider-aws/issues/23817)) * resource/aws_s3_bucket: Update `lifecycle_rule` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_lifecycle_configuration` resource. ([#23818](https://github.com/hashicorp/terraform-provider-aws/issues/23818)) +* resource/aws_s3_bucket: Update `logging` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_logging` resource. ([#23819](https://github.com/hashicorp/terraform-provider-aws/issues/23819)) * resource/aws_route: Add `core_network_arn` argument ([#24024](https://github.com/hashicorp/terraform-provider-aws/issues/24024)) * resource/aws_route_table: Add `core_network_arn` argument to the `route` configuration block ([#24024](https://github.com/hashicorp/terraform-provider-aws/issues/24024)) * resource/aws_vpc_ipam: add `cascade` argument ([#23973](https://github.com/hashicorp/terraform-provider-aws/issues/23973)) From 8acc6bc1bff13c4baba91c4d9c411d020f7f95ac Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 31 Mar 2022 19:37:43 -0400 Subject: [PATCH 17/42] r/s3_bucket: make 'versioning' configurable --- internal/service/s3/bucket.go | 109 +++++++++- internal/service/s3/bucket_test.go | 170 +++++++++++++++- internal/service/s3/bucket_versioning_test.go | 188 ++++++++++++++++++ website/docs/r/s3_bucket.html.markdown | 32 ++- 4 files changed, 487 insertions(+), 12 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index f597462022e..6e9254f33f0 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -216,19 +216,21 @@ func ResourceBucket() *schema.Resource { "versioning": { Type: schema.TypeList, + Optional: true, Computed: true, + MaxItems: 1, Deprecated: "Use the aws_s3_bucket_versioning resource instead", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enabled": { - Type: schema.TypeBool, - Computed: true, - Deprecated: "Use the aws_s3_bucket_versioning resource instead", + Type: schema.TypeBool, + Optional: true, + Default: false, }, "mfa_delete": { - Type: schema.TypeBool, - Computed: true, - Deprecated: "Use the aws_s3_bucket_versioning resource instead", + Type: schema.TypeBool, + Optional: true, + Default: false, }, }, }, @@ -797,6 +799,23 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } } + if d.HasChange("versioning") { + v := d.Get("versioning").([]interface{}) + + if d.IsNewResource() { + if versioning := expandVersioningWhenIsNewResource(v); versioning != nil { + err := resourceBucketInternalVersioningUpdate(conn, d.Id(), versioning) + if err != nil { + return fmt.Errorf("error updating (new) S3 Bucket (%s) Versioning: %w", d.Id(), err) + } + } + } else { + if err := resourceBucketInternalVersioningUpdate(conn, d.Id(), expandVersioning(v)); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Versioning: %w", d.Id(), err) + } + } + } + return resourceBucketRead(d, meta) } @@ -1809,6 +1828,19 @@ func resourceBucketInternalObjectLockConfigurationUpdate(conn *s3.S3, d *schema. return err } +func resourceBucketInternalVersioningUpdate(conn *s3.S3, bucket string, versioningConfig *s3.VersioningConfiguration) error { + input := &s3.PutBucketVersioningInput{ + Bucket: aws.String(bucket), + VersioningConfiguration: versioningConfig, + } + + _, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.PutBucketVersioning(input) + }) + + return err +} + ///////////////////////////////////////////// Expand and Flatten functions ///////////////////////////////////////////// // Cors Rule functions @@ -2376,6 +2408,71 @@ func flattenServerSideEncryptionConfigurationRules(rules []*s3.ServerSideEncrypt // Versioning functions +func expandVersioning(l []interface{}) *s3.VersioningConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + output := &s3.VersioningConfiguration{} + + if v, ok := tfMap["enabled"].(bool); ok { + if v { + output.Status = aws.String(s3.BucketVersioningStatusEnabled) + } else { + output.Status = aws.String(s3.BucketVersioningStatusSuspended) + } + } + + if v, ok := tfMap["mfa_delete"].(bool); ok { + if v { + output.MFADelete = aws.String(s3.MFADeleteEnabled) + } else { + output.MFADelete = aws.String(s3.MFADeleteDisabled) + } + } + + return output +} + +func expandVersioningWhenIsNewResource(l []interface{}) *s3.VersioningConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + + if !ok { + return nil + } + + output := &s3.VersioningConfiguration{} + + // Only set and return a non-nil VersioningConfiguration with at least one of + // MFADelete or Status enabled as the PutBucketVersioning API request + // does not need to be made for new buckets that don't require versioning. + // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/4494 + + if v, ok := tfMap["enabled"].(bool); ok && v { + output.Status = aws.String(s3.BucketVersioningStatusEnabled) + } + + if v, ok := tfMap["mfa_delete"].(bool); ok && v { + output.MFADelete = aws.String(s3.MFADeleteEnabled) + } + + if output.MFADelete == nil && output.Status == nil { + return nil + } + + return output +} + func flattenVersioning(versioning *s3.GetBucketVersioningOutput) []interface{} { if versioning == nil { return []interface{}{} diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index 4d5f393c74b..21bec5f38cd 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -850,8 +850,139 @@ func TestAccS3Bucket_Manage_objectLockWithVersioning_deprecatedEnabled(t *testin }) } +func TestAccS3Bucket_Manage_versioning(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withVersioning(bucketName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "versioning.#", "1"), + resource.TestCheckResourceAttr(resourceName, "versioning.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "versioning.0.mfa_delete", "false"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + { + Config: testAccBucketConfig_withVersioning(bucketName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "versioning.#", "1"), + resource.TestCheckResourceAttr(resourceName, "versioning.0.enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "versioning.0.mfa_delete", "false"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccS3Bucket_Manage_versioningDisabled(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withVersioning(bucketName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "versioning.#", "1"), + resource.TestCheckResourceAttr(resourceName, "versioning.0.enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "versioning.0.mfa_delete", "false"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccS3Bucket_Manage_MfaDeleteDisabled(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withVersioningMfaDelete(bucketName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "versioning.#", "1"), + resource.TestCheckResourceAttr(resourceName, "versioning.0.enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "versioning.0.mfa_delete", "false"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccS3Bucket_Manage_versioningAndMfaDeleteDisabled(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withVersioningDisabledAndMfaDelete(bucketName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "versioning.#", "1"), + resource.TestCheckResourceAttr(resourceName, "versioning.0.enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "versioning.0.mfa_delete", "false"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + func TestAccS3Bucket_Security_updateACL(t *testing.T) { - bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_s3_bucket.test" resource.ParallelTest(t, resource.TestCase{ @@ -2013,6 +2144,43 @@ resource "aws_s3_bucket" "test" { `, bucketName) } +func testAccBucketConfig_withVersioning(bucketName string, enabled bool) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + versioning { + enabled = %[2]t + } +} +`, bucketName, enabled) +} + +func testAccBucketConfig_withVersioningMfaDelete(bucketName string, mfaDelete bool) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + versioning { + mfa_delete = %[2]t + } +} +`, bucketName, mfaDelete) +} + +func testAccBucketConfig_withVersioningDisabledAndMfaDelete(bucketName string, mfaDelete bool) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + versioning { + enabled = false + mfa_delete = %[2]t + } +} +`, bucketName, mfaDelete) +} + func testAccBucketConfig_withNoTags(bucketName string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "test" { diff --git a/internal/service/s3/bucket_versioning_test.go b/internal/service/s3/bucket_versioning_test.go index 09bb6c3029d..67e1f407831 100644 --- a/internal/service/s3/bucket_versioning_test.go +++ b/internal/service/s3/bucket_versioning_test.go @@ -135,6 +135,163 @@ func TestAccS3BucketVersioning_MFADelete(t *testing.T) { }) } +func TestAccS3BucketVersioning_migrate_versioningDisabledNoChange(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucketResourceName := "aws_s3_bucket.test" + resourceName := "aws_s3_bucket_versioning.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withVersioning(bucketName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "versioning.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "versioning.0.enabled", "false"), + ), + }, + { + Config: testAccBucketVersioning_Migrate_VersioningEnabledConfig(bucketName, tfs3.BucketVersioningStatusDisabled), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketVersioningExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "versioning_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "versioning_configuration.0.status", tfs3.BucketVersioningStatusDisabled), + ), + }, + }, + }) +} + +func TestAccS3BucketVersioning_migrate_versioningDisabledWithChange(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucketResourceName := "aws_s3_bucket.test" + resourceName := "aws_s3_bucket_versioning.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withVersioning(bucketName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "versioning.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "versioning.0.enabled", "false"), + ), + }, + { + Config: testAccBucketVersioning_Migrate_VersioningEnabledConfig(bucketName, s3.BucketVersioningStatusEnabled), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketVersioningExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "versioning_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "versioning_configuration.0.status", s3.BucketVersioningStatusEnabled), + ), + }, + }, + }) +} + +func TestAccS3BucketVersioning_migrate_versioningEnabledNoChange(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucketResourceName := "aws_s3_bucket.test" + resourceName := "aws_s3_bucket_versioning.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withVersioning(bucketName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "versioning.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "versioning.0.enabled", "true"), + ), + }, + { + Config: testAccBucketVersioning_Migrate_VersioningEnabledConfig(bucketName, s3.BucketVersioningStatusEnabled), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketVersioningExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "versioning_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "versioning_configuration.0.status", s3.BucketVersioningStatusEnabled), + ), + }, + }, + }) +} + +func TestAccS3BucketVersioning_migrate_versioningEnabledWithChange(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucketResourceName := "aws_s3_bucket.test" + resourceName := "aws_s3_bucket_versioning.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withVersioning(bucketName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "versioning.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "versioning.0.enabled", "true"), + ), + }, + { + Config: testAccBucketVersioning_Migrate_VersioningEnabledConfig(bucketName, s3.BucketVersioningStatusSuspended), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketVersioningExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "versioning_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "versioning_configuration.0.status", s3.BucketVersioningStatusSuspended), + ), + }, + }, + }) +} + +// TestAccS3BucketVersioning_migrate_mfaDeleteNoChange can only test for a "Disabled" +// mfa_delete configuration as the "mfa" argument is required if it's enabled +func TestAccS3BucketVersioning_migrate_mfaDeleteNoChange(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucketResourceName := "aws_s3_bucket.test" + resourceName := "aws_s3_bucket_versioning.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withVersioningMfaDelete(bucketName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "versioning.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "versioning.0.mfa_delete", "false"), + ), + }, + { + Config: testAccBucketVersioning_Migrate_MfaDeleteConfig(bucketName, s3.MFADeleteDisabled), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketVersioningExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "versioning_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "versioning_configuration.0.mfa_delete", s3.MFADeleteDisabled), + ), + }, + }, + }) +} + func TestAccS3BucketVersioning_Status_disabled(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_s3_bucket_versioning.test" @@ -385,3 +542,34 @@ resource "aws_s3_bucket_versioning" "test" { } `, rName, mfaDelete) } + +func testAccBucketVersioning_Migrate_VersioningEnabledConfig(rName, status string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_bucket_versioning" "test" { + bucket = aws_s3_bucket.test.id + versioning_configuration { + status = %[2]q + } +} +`, rName, status) +} + +func testAccBucketVersioning_Migrate_MfaDeleteConfig(rName, mfaDelete string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_bucket_versioning" "test" { + bucket = aws_s3_bucket.test.id + versioning_configuration { + mfa_delete = %[2]q + status = "Enabled" + } +} +`, rName, mfaDelete) +} diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 27c0a7cff9f..41383de7aee 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -36,6 +36,10 @@ Configuring with both will cause inconsistencies and may overwrite configuration or with the deprecated parameter `logging` in the resource `aws_s3_bucket`. Configuring with both will cause inconsistencies and may overwrite configuration. +~> **NOTE on S3 Bucket Versioning Configuration:** S3 Bucket versioning can be configured in either the standalone resource [`aws_s3_bucket_versioning`](s3_bucket_versioning.html.markdown) +or with the deprecated parameter `versioning` in the resource `aws_s3_bucket`. +Configuring with both will cause inconsistencies and may overwrite configuration. + ## Example Usage ### Private Bucket w/ Tags @@ -83,8 +87,19 @@ resource "aws_s3_bucket" "b" { ### Using versioning -The `versioning` argument is read-only as of version 4.0 of the Terraform AWS Provider. -See the [`aws_s3_bucket_versioning` resource](s3_bucket_versioning.html.markdown) for configuration details. +-> **NOTE:** The parameter `versioning` is deprecated. +Use the resource [`aws_s3_bucket_versioning`](s3_bucket_versioning.html.markdown) instead. + +```terraform +resource "aws_s3_bucket" "b" { + bucket = "my-tf-test-bucket" + acl = "private" + + versioning { + enabled = true + } +} +``` ### Enable Logging @@ -244,6 +259,7 @@ The following arguments are supported: Use the resource [`aws_s3_bucket_logging`](s3_bucket_logging.html.markdown) instead. * `object_lock_enabled` - (Optional, Default:`false`, Forces new resource) Indicates whether this bucket has an Object Lock configuration enabled. * `object_lock_configuration` - (Optional) A configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). See [Object Lock Configuration](#object-lock-configuration) below. +* `versioning` - (Optional, **Deprecated**) A configuration of the [S3 bucket versioning state](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). See [Versioning](#versioning) below for details. Terraform will only perform drift detection if a configuration value is provided. Use the resource [`aws_s3_bucket_versioning`](s3_bucket_versioning.html.markdown) instead. * `tags` - (Optional) A map of tags to assign to the bucket. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### CORS Rule @@ -336,6 +352,15 @@ The `object_lock_configuration` configuration block supports the following argum * `object_lock_enabled` - (Optional, **Deprecated**) Indicates whether this bucket has an Object Lock configuration enabled. Valid value is `Enabled`. Use the top-level argument `object_lock_enabled` instead. +### Versioning + +~> **NOTE:** Currently, changes to the `versioning` configuration of _existing_ resources cannot be automatically detected by Terraform. To manage changes of versioning state to an S3 bucket, use the `aws_s3_bucket_versioning` resource instead. If you use `versioning` on an `aws_s3_bucket`, Terraform will assume management over the versioning state of the S3 bucket, treating additional versioning state changes as drift. For this reason, `versioning` cannot be mixed with the external `aws_s3_bucket_versioning` resource for a given S3 bucket. + +The `versioning` configuration block supports the following arguments: + +* `enabled` - (Optional) Enable versioning. Once you version-enable a bucket, it can never return to an unversioned state. You can, however, suspend versioning on that bucket. +* `mfa_delete` - (Optional) Enable MFA delete for either `Change the versioning state of your bucket` or `Permanently delete an object version`. Default is `false`. This cannot be used to toggle this setting but is available to allow managed buckets to reflect the state in AWS + ## Attributes Reference In addition to all arguments above, the following attributes are exported: @@ -388,9 +413,6 @@ In addition to all arguments above, the following attributes are exported: * `sse_algorithm` - (required) The server-side encryption algorithm used. * `bucket_key_enabled` - (Optional) Whether an [Amazon S3 Bucket Key](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) is used for SSE-KMS. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). -* `versioning` - The [versioning](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) state of the bucket. - * `enabled` - Whether versioning is enabled. - * `mfa_delete` - Whether MFA delete is enabled. * `website` - The website configuration, if configured. * `error_document` - The name of the error document for the website. * `index_document` - The name of the index document for the website. From ba25d0f48efa107b496d93012eeeee8fe42d3c13 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 31 Mar 2022 19:37:47 -0400 Subject: [PATCH 18/42] Update CHANGELOG for #23820 --- .changelog/23820.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/23820.txt diff --git a/.changelog/23820.txt b/.changelog/23820.txt new file mode 100644 index 00000000000..e53b49cf787 --- /dev/null +++ b/.changelog/23820.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_s3_bucket: Update `versioning` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_versioning` resource. +``` \ No newline at end of file From d8893eab438ebac86c9c140cbffa704f5f970d5f Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 31 Mar 2022 23:58:18 +0000 Subject: [PATCH 19/42] Update CHANGELOG.md for #23820 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a4bb23b5e6..4646747c977 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ ENHANCEMENTS: * resource/aws_s3_bucket: Update `cors_rule` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_cors_configuration` resource. ([#23817](https://github.com/hashicorp/terraform-provider-aws/issues/23817)) * resource/aws_s3_bucket: Update `lifecycle_rule` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_lifecycle_configuration` resource. ([#23818](https://github.com/hashicorp/terraform-provider-aws/issues/23818)) * resource/aws_s3_bucket: Update `logging` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_logging` resource. ([#23819](https://github.com/hashicorp/terraform-provider-aws/issues/23819)) +* resource/aws_s3_bucket: Update `versioning` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_versioning` resource. ([#23820](https://github.com/hashicorp/terraform-provider-aws/issues/23820)) * resource/aws_route: Add `core_network_arn` argument ([#24024](https://github.com/hashicorp/terraform-provider-aws/issues/24024)) * resource/aws_route_table: Add `core_network_arn` argument to the `route` configuration block ([#24024](https://github.com/hashicorp/terraform-provider-aws/issues/24024)) * resource/aws_vpc_ipam: add `cascade` argument ([#23973](https://github.com/hashicorp/terraform-provider-aws/issues/23973)) From e9bbebc15f4c244b196e9aa2200a38696e41580c Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 31 Mar 2022 20:08:29 -0400 Subject: [PATCH 20/42] r/s3_bucket: make 'website' configurable --- internal/service/s3/bucket.go | 146 ++++++++++- internal/service/s3/bucket_test.go | 237 ++++++++++++++++++ .../s3/bucket_website_configuration_test.go | 233 +++++++++++++++++ website/docs/r/s3_bucket.html.markdown | 51 +++- 4 files changed, 647 insertions(+), 20 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 6e9254f33f0..e7b985231d7 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -1,6 +1,7 @@ package s3 import ( + "bytes" "context" "encoding/json" "fmt" @@ -20,6 +21,7 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/flex" @@ -162,32 +164,47 @@ func ResourceBucket() *schema.Resource { "website": { Type: schema.TypeList, + Optional: true, Computed: true, - Deprecated: "Use the aws_s3_bucket_website_configuration resource", + MaxItems: 1, + Deprecated: "Use the aws_s3_bucket_website_configuration resource instead", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "index_document": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_website_configuration resource", + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: []string{ + "website.0.index_document", + "website.0.redirect_all_requests_to", + }, }, "error_document": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_website_configuration resource", + Type: schema.TypeString, + Optional: true, }, "redirect_all_requests_to": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_website_configuration resource", + Type: schema.TypeString, + ExactlyOneOf: []string{ + "website.0.index_document", + "website.0.redirect_all_requests_to", + }, + ConflictsWith: []string{ + "website.0.error_document", + "website.0.routing_rules", + }, + Optional: true, }, "routing_rules": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_website_configuration resource", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + StateFunc: func(v interface{}) string { + json, _ := structure.NormalizeJsonString(v) + return json + }, }, }, }, @@ -816,6 +833,12 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } } + if d.HasChange("website") { + if err := resourceBucketInternalWebsiteUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Website: %w", d.Id(), err) + } + } + return resourceBucketRead(d, meta) } @@ -1841,6 +1864,45 @@ func resourceBucketInternalVersioningUpdate(conn *s3.S3, bucket string, versioni return err } +func resourceBucketInternalWebsiteUpdate(conn *s3.S3, d *schema.ResourceData) error { + ws := d.Get("website").([]interface{}) + + if len(ws) == 0 { + input := &s3.DeleteBucketWebsiteInput{ + Bucket: aws.String(d.Id()), + } + + _, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.DeleteBucketWebsite(input) + }) + + if err != nil { + return fmt.Errorf("error deleting S3 Bucket (%s) Website: %w", d.Id(), err) + } + + d.Set("website_endpoint", "") + d.Set("website_domain", "") + + return nil + } + + websiteConfig, err := expandWebsiteConfiguration(ws) + if err != nil { + return fmt.Errorf("error expanding S3 Bucket (%s) website configuration: %w", d.Id(), err) + } + + input := &s3.PutBucketWebsiteInput{ + Bucket: aws.String(d.Id()), + WebsiteConfiguration: websiteConfig, + } + + _, err = verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.PutBucketWebsite(input) + }) + + return err +} + ///////////////////////////////////////////// Expand and Flatten functions ///////////////////////////////////////////// // Cors Rule functions @@ -2497,6 +2559,64 @@ func flattenVersioning(versioning *s3.GetBucketVersioningOutput) []interface{} { // Website functions +func expandWebsiteConfiguration(l []interface{}) (*s3.WebsiteConfiguration, error) { + if len(l) == 0 || l[0] == nil { + return nil, nil + } + + website, ok := l[0].(map[string]interface{}) + if !ok { + return nil, nil + } + + websiteConfiguration := &s3.WebsiteConfiguration{} + + if v, ok := website["index_document"].(string); ok && v != "" { + websiteConfiguration.IndexDocument = &s3.IndexDocument{ + Suffix: aws.String(v), + } + } + + if v, ok := website["error_document"].(string); ok && v != "" { + websiteConfiguration.ErrorDocument = &s3.ErrorDocument{ + Key: aws.String(v), + } + } + + if v, ok := website["redirect_all_requests_to"].(string); ok && v != "" { + redirect, err := url.Parse(v) + if err == nil && redirect.Scheme != "" { + var redirectHostBuf bytes.Buffer + redirectHostBuf.WriteString(redirect.Host) + if redirect.Path != "" { + redirectHostBuf.WriteString(redirect.Path) + } + if redirect.RawQuery != "" { + redirectHostBuf.WriteString("?") + redirectHostBuf.WriteString(redirect.RawQuery) + } + websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{ + HostName: aws.String(redirectHostBuf.String()), + Protocol: aws.String(redirect.Scheme), + } + } else { + websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{ + HostName: aws.String(v), + } + } + } + + if v, ok := website["routing_rules"].(string); ok && v != "" { + var unmarshaledRules []*s3.RoutingRule + if err := json.Unmarshal([]byte(v), &unmarshaledRules); err != nil { + return nil, err + } + websiteConfiguration.RoutingRules = unmarshaledRules + } + + return websiteConfiguration, nil +} + func flattenBucketWebsite(ws *s3.GetBucketWebsiteOutput) ([]interface{}, error) { if ws == nil { return []interface{}{}, nil diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index 21bec5f38cd..10f89f58665 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -1352,6 +1352,153 @@ func TestAccS3Bucket_Security_logging(t *testing.T) { }) } +func TestAccS3Bucket_Web_simple(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + region := acctest.Region() + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withWebsite(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.index_document", "index.html"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl", "grant"}, + }, + { + Config: testAccBucketConfig_withWebsiteAndError(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.index_document", "index.html"), + resource.TestCheckResourceAttr(resourceName, "website.0.error_document", "error.html"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + { + // As Website is a Computed field, removing them from terraform will not + // trigger an update to remove them from the S3 bucket. + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.index_document", "index.html"), + resource.TestCheckResourceAttr(resourceName, "website.0.error_document", "error.html"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + }, + }) +} + +func TestAccS3Bucket_Web_redirect(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + region := acctest.Region() + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withWebsiteAndRedirect(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.redirect_all_requests_to", "hashicorp.com?my=query"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl", "grant"}, + }, + { + Config: testAccBucketConfig_withWebsiteAndHTTPSRedirect(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.redirect_all_requests_to", "https://hashicorp.com?my=query"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + { + // As Website is a Computed field, removing them from terraform will not + // trigger an update to remove them from the S3 bucket. + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.redirect_all_requests_to", "https://hashicorp.com?my=query"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + }, + }) +} + +func TestAccS3Bucket_Web_routingRules(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + region := acctest.Region() + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withWebsiteAndRoutingRules(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.error_document", "error.html"), + resource.TestCheckResourceAttr(resourceName, "website.0.index_document", "index.html"), + resource.TestCheckResourceAttrSet(resourceName, "website.0.routing_rules"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl", "grant"}, + }, + { + // As Website is a Computed field, removing them from terraform will not + // trigger an update to remove them from the S3 bucket. + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.error_document", "error.html"), + resource.TestCheckResourceAttr(resourceName, "website.0.index_document", "index.html"), + resource.TestCheckResourceAttrSet(resourceName, "website.0.routing_rules"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + }, + }) +} + func TestBucketName(t *testing.T) { validDnsNames := []string{ "foobar", @@ -1831,6 +1978,15 @@ func testAccBucketRegionalDomainName(bucket, region string) string { return regionalEndpoint } +func testAccCheckS3BucketWebsiteEndpoint(resourceName string, attributeName string, bucketName string, region string) resource.TestCheckFunc { + return func(s *terraform.State) error { + website := tfs3.WebsiteEndpoint(acctest.Provider.Meta().(*conns.AWSClient), bucketName, region) + expectedValue := website.Endpoint + + return resource.TestCheckResourceAttr(resourceName, attributeName, expectedValue)(s) + } +} + func testAccCheckBucketUpdateTags(n string, oldTags, newTags map[string]string) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] @@ -2181,6 +2337,87 @@ resource "aws_s3_bucket" "test" { `, bucketName, mfaDelete) } +func testAccBucketConfig_withWebsite(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "public-read" + + website { + index_document = "index.html" + } +} +`, bucketName) +} + +func testAccBucketConfig_withWebsiteAndError(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "public-read" + + website { + index_document = "index.html" + error_document = "error.html" + } +} +`, bucketName) +} + +func testAccBucketConfig_withWebsiteAndRedirect(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "public-read" + + website { + redirect_all_requests_to = "hashicorp.com?my=query" + } +} +`, bucketName) +} + +func testAccBucketConfig_withWebsiteAndHTTPSRedirect(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "public-read" + + website { + redirect_all_requests_to = "https://hashicorp.com?my=query" + } +} +`, bucketName) +} + +func testAccBucketConfig_withWebsiteAndRoutingRules(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "public-read" + + website { + index_document = "index.html" + error_document = "error.html" + + routing_rules = < **NOTE on S3 Bucket Website Configuration:** S3 Bucket Website can be configured in either the standalone resource [`aws_s3_bucket_website_configuration`](s3_bucket_website_configuration.html.markdown) +or with the deprecated parameter `website` in the resource `aws_s3_bucket`. +Configuring with both will cause inconsistencies and may overwrite configuration. + ## Example Usage ### Private Bucket w/ Tags @@ -62,8 +66,32 @@ resource "aws_s3_bucket_acl" "example" { ### Static Website Hosting -The `website` argument is read-only as of version 4.0 of the Terraform AWS Provider. -See the [`aws_s3_bucket_website_configuration` resource](s3_bucket_website_configuration.html.markdown) for configuration details. +-> **NOTE:** The parameter `website` is deprecated. +Use the resource [`aws_s3_bucket_website_configuration`](s3_bucket_website_configuration.html.markdown) instead. + +```terraform +resource "aws_s3_bucket" "b" { + bucket = "s3-website-test.hashicorp.com" + acl = "public-read" + policy = file("policy.json") + + website { + index_document = "index.html" + error_document = "error.html" + + routing_rules = < **NOTE:** Currently, changes to the `website` configuration of _existing_ resources cannot be automatically detected by Terraform. To manage changes to the website configuration of an S3 bucket, use the `aws_s3_bucket_website_configuration` resource instead. If you use `website` on an `aws_s3_bucket`, Terraform will assume management over the configuration of the website of the S3 bucket, treating additional website configuration changes as drift. For this reason, `website` cannot be mixed with the external `aws_s3_bucket_website_configuration` resource for a given S3 bucket. + +The `website` configuration block supports the following arguments: + +* `index_document` - (Required, unless using `redirect_all_requests_to`) Amazon S3 returns this index document when requests are made to the root domain or any of the subfolders. +* `error_document` - (Optional) An absolute path to the document to return in case of a 4XX error. +* `redirect_all_requests_to` - (Optional) A hostname to redirect all website requests for this bucket to. Hostname can optionally be prefixed with a protocol (`http://` or `https://`) to use when redirecting requests. The default is the protocol that is used in the original request. +* `routing_rules` - (Optional) A json array containing [routing rules](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules.html) + describing redirect behavior and when redirects are applied. + ## Attributes Reference In addition to all arguments above, the following attributes are exported: @@ -413,11 +455,6 @@ In addition to all arguments above, the following attributes are exported: * `sse_algorithm` - (required) The server-side encryption algorithm used. * `bucket_key_enabled` - (Optional) Whether an [Amazon S3 Bucket Key](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) is used for SSE-KMS. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). -* `website` - The website configuration, if configured. - * `error_document` - The name of the error document for the website. - * `index_document` - The name of the index document for the website. - * `redirect_all_requests_to` - The redirect behavior for every request to this bucket's website endpoint. - * `routing_rules` - (Optional) The rules that define when a redirect is applied and the redirect behavior. * `website_endpoint` - The website endpoint, if the bucket is configured with a website. If not, this will be an empty string. * `website_domain` - The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records. From 0d502fdb05e184d17b6fbcdb796cc31d06d1d22e Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 31 Mar 2022 20:08:38 -0400 Subject: [PATCH 21/42] Update CHANGELOG for #23821 --- .changelog/23821.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/23821.txt diff --git a/.changelog/23821.txt b/.changelog/23821.txt new file mode 100644 index 00000000000..561b71ed534 --- /dev/null +++ b/.changelog/23821.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_s3_bucket: Update `website` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_website_configuration` resource. +``` \ No newline at end of file From 3638a90ed44e951960c00d29cb943b2aee6a9338 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 1 Apr 2022 00:38:58 +0000 Subject: [PATCH 22/42] Update CHANGELOG.md for #23821 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4646747c977..9caecd24ffb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ ENHANCEMENTS: * resource/aws_s3_bucket: Update `lifecycle_rule` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_lifecycle_configuration` resource. ([#23818](https://github.com/hashicorp/terraform-provider-aws/issues/23818)) * resource/aws_s3_bucket: Update `logging` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_logging` resource. ([#23819](https://github.com/hashicorp/terraform-provider-aws/issues/23819)) * resource/aws_s3_bucket: Update `versioning` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_versioning` resource. ([#23820](https://github.com/hashicorp/terraform-provider-aws/issues/23820)) +* resource/aws_s3_bucket: Update `website` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_website_configuration` resource. ([#23821](https://github.com/hashicorp/terraform-provider-aws/issues/23821)) * resource/aws_route: Add `core_network_arn` argument ([#24024](https://github.com/hashicorp/terraform-provider-aws/issues/24024)) * resource/aws_route_table: Add `core_network_arn` argument to the `route` configuration block ([#24024](https://github.com/hashicorp/terraform-provider-aws/issues/24024)) * resource/aws_vpc_ipam: add `cascade` argument ([#23973](https://github.com/hashicorp/terraform-provider-aws/issues/23973)) From 60f0140abb5234cdf536baeb3b36d5c4bd7e8898 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Wed, 23 Mar 2022 01:59:32 -0400 Subject: [PATCH 23/42] r/s3_bucket: make 'server_side_encryption_configuration' configurable --- internal/service/s3/bucket.go | 100 +++++++-- ...rver_side_encryption_configuration_test.go | 92 +++++++++ internal/service/s3/bucket_test.go | 189 ++++++++++++++++++ website/docs/r/s3_bucket.html.markdown | 55 ++++- 4 files changed, 413 insertions(+), 23 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index e7b985231d7..ee28aa9ff68 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -571,39 +571,39 @@ func ResourceBucket() *schema.Resource { "server_side_encryption_configuration": { Type: schema.TypeList, + MaxItems: 1, + Optional: true, Computed: true, Deprecated: "Use the aws_s3_bucket_server_side_encryption_configuration resource instead", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "rule": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_server_side_encryption_configuration resource instead", + Type: schema.TypeList, + MaxItems: 1, + Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "apply_server_side_encryption_by_default": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_server_side_encryption_configuration resource instead", + Type: schema.TypeList, + MaxItems: 1, + Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "kms_master_key_id": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_server_side_encryption_configuration resource instead", + Type: schema.TypeString, + Optional: true, }, "sse_algorithm": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_server_side_encryption_configuration resource instead", + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.ServerSideEncryption_Values(), false), }, }, }, }, "bucket_key_enabled": { - Type: schema.TypeBool, - Computed: true, - Deprecated: "Use the aws_s3_bucket_server_side_encryption_configuration resource instead", + Type: schema.TypeBool, + Optional: true, }, }, }, @@ -816,6 +816,12 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } } + if d.HasChange("server_side_encryption_configuration") { + if err := resourceBucketInternalServerSideEncryptionConfigurationUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Server-side Encryption configuration: %w", d.Id(), err) + } + } + if d.HasChange("versioning") { v := d.Get("versioning").([]interface{}) @@ -1851,6 +1857,70 @@ func resourceBucketInternalObjectLockConfigurationUpdate(conn *s3.S3, d *schema. return err } +func resourceBucketInternalServerSideEncryptionConfigurationUpdate(conn *s3.S3, d *schema.ResourceData) error { + serverSideEncryptionConfiguration := d.Get("server_side_encryption_configuration").([]interface{}) + + if len(serverSideEncryptionConfiguration) == 0 { + input := &s3.DeleteBucketEncryptionInput{ + Bucket: aws.String(d.Id()), + } + + _, err := conn.DeleteBucketEncryption(input) + + if err != nil { + return fmt.Errorf("error removing S3 Bucket (%s) Server-side Encryption: %w", d.Id(), err) + } + + return nil + } + + c := serverSideEncryptionConfiguration[0].(map[string]interface{}) + + rc := &s3.ServerSideEncryptionConfiguration{} + + rcRules := c["rule"].([]interface{}) + var rules []*s3.ServerSideEncryptionRule + for _, v := range rcRules { + rr := v.(map[string]interface{}) + rrDefault := rr["apply_server_side_encryption_by_default"].([]interface{}) + sseAlgorithm := rrDefault[0].(map[string]interface{})["sse_algorithm"].(string) + kmsMasterKeyId := rrDefault[0].(map[string]interface{})["kms_master_key_id"].(string) + rcDefaultRule := &s3.ServerSideEncryptionByDefault{ + SSEAlgorithm: aws.String(sseAlgorithm), + } + if kmsMasterKeyId != "" { + rcDefaultRule.KMSMasterKeyID = aws.String(kmsMasterKeyId) + } + rcRule := &s3.ServerSideEncryptionRule{ + ApplyServerSideEncryptionByDefault: rcDefaultRule, + } + + if val, ok := rr["bucket_key_enabled"].(bool); ok { + rcRule.BucketKeyEnabled = aws.Bool(val) + } + + rules = append(rules, rcRule) + } + + rc.Rules = rules + + input := &s3.PutBucketEncryptionInput{ + Bucket: aws.String(d.Id()), + ServerSideEncryptionConfiguration: rc, + } + + _, err := tfresource.RetryWhenAWSErrCodeEquals( + propagationTimeout, + func() (interface{}, error) { + return conn.PutBucketEncryption(input) + }, + s3.ErrCodeNoSuchBucket, + ErrCodeOperationAborted, + ) + + return err +} + func resourceBucketInternalVersioningUpdate(conn *s3.S3, bucket string, versioningConfig *s3.VersioningConfiguration) error { input := &s3.PutBucketVersioningInput{ Bucket: aws.String(bucket), diff --git a/internal/service/s3/bucket_server_side_encryption_configuration_test.go b/internal/service/s3/bucket_server_side_encryption_configuration_test.go index a3636e4472b..a0fb844f8e1 100644 --- a/internal/service/s3/bucket_server_side_encryption_configuration_test.go +++ b/internal/service/s3/bucket_server_side_encryption_configuration_test.go @@ -314,6 +314,80 @@ func TestAccS3BucketServerSideEncryptionConfiguration_ApplySSEByDefault_BucketKe }) } +func TestAccS3BucketServerSideEncryptionConfiguration_migrate_noChange(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_server_side_encryption_configuration.test" + bucketResourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketServerSideEncryptionConfigurationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withDefaultEncryption_defaultKey(rName, s3.ServerSideEncryptionAwsKms), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.0.rule.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAwsKms), + resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.0.rule.0.bucket_key_enabled", "false"), + ), + }, + { + Config: testAccBucketServerSideEncryptionConfigurationConfig_migrate_noChange(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketServerSideEncryptionConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "bucket", bucketResourceName, "bucket"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAwsKms), + resource.TestCheckNoResourceAttr(resourceName, "rule.0.bucket_key_enabled"), + ), + }, + }, + }) +} + +func TestAccS3BucketServerSideEncryptionConfiguration_migrate_withChange(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_server_side_encryption_configuration.test" + bucketResourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketServerSideEncryptionConfigurationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withDefaultEncryption_defaultKey(rName, s3.ServerSideEncryptionAwsKms), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.0.rule.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAwsKms), + resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.0.rule.0.bucket_key_enabled", "false"), + ), + }, + { + Config: testAccBucketServerSideEncryptionConfigurationConfig_ApplySSEByDefault_SSEAlgorithm(rName, s3.ServerSideEncryptionAes256), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketServerSideEncryptionConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "bucket", bucketResourceName, "bucket"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAes256), + resource.TestCheckNoResourceAttr(resourceName, "rule.0.bucket_key_enabled"), + ), + }, + }, + }) +} + func testAccCheckBucketServerSideEncryptionConfigurationDestroy(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn @@ -513,3 +587,21 @@ resource "aws_s3_bucket_server_side_encryption_configuration" "test" { } `, rName, enabled) } + +func testAccBucketServerSideEncryptionConfigurationConfig_migrate_noChange(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "test" { + bucket = aws_s3_bucket.test.id + + rule { + apply_server_side_encryption_by_default { + sse_algorithm = "aws:kms" + } + } +} +`, rName) +} diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index 10f89f58665..28d0bb9127b 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -263,6 +263,38 @@ func TestAccS3Bucket_Basic_acceleration(t *testing.T) { }) } +func TestAccS3Bucket_Basic_keyEnabled(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withDefaultEncryptionAndBucketKeyEnabled_KmsMasterKey(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", "aws:kms"), + resource.TestMatchResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.kms_master_key_id", regexp.MustCompile("^arn")), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.bucket_key_enabled", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + // Test TestAccS3Bucket_disappears is designed to fail with a "plan // not empty" error in Terraform, to check against regressions. // See https://github.com/hashicorp/terraform/pull/2925 @@ -1351,6 +1383,102 @@ func TestAccS3Bucket_Security_logging(t *testing.T) { }, }) } +func TestAccS3Bucket_Security_enableDefaultEncryptionWhenTypical(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withDefaultEncryption_KmsMasterKey(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAwsKms), + resource.TestMatchResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.kms_master_key_id", regexp.MustCompile("^arn")), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccS3Bucket_Security_enableDefaultEncryptionWhenAES256IsUsed(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withDefaultEncryption_defaultKey(bucketName, s3.ServerSideEncryptionAes256), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAes256), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.kms_master_key_id", ""), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + }, + }) +} + +func TestAccS3Bucket_Security_disableDefaultEncryptionWhenDefaultEncryptionIsEnabled(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withDefaultEncryption_defaultKey(bucketName, s3.ServerSideEncryptionAwsKms), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + { + // As ServerSide Encryption Configuration is a Computed field, removing them from terraform will not + // trigger an update to remove it from the S3 bucket. + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.#", "1"), + ), + }, + }, + }) +} func TestAccS3Bucket_Web_simple(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -2086,6 +2214,67 @@ resource "aws_s3_bucket" "test" { `, bucketName) } +func testAccBucketConfig_withDefaultEncryption_defaultKey(bucketName, sseAlgorithm string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + server_side_encryption_configuration { + rule { + apply_server_side_encryption_by_default { + sse_algorithm = %[2]q + } + } + } +} +`, bucketName, sseAlgorithm) +} + +func testAccBucketConfig_withDefaultEncryption_KmsMasterKey(bucketName string) string { + return fmt.Sprintf(` +resource "aws_kms_key" "test" { + description = "KMS Key for Bucket %[1]s" + deletion_window_in_days = 10 +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + server_side_encryption_configuration { + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.test.arn + sse_algorithm = "aws:kms" + } + } + } +} +`, bucketName) +} + +func testAccBucketConfig_withDefaultEncryptionAndBucketKeyEnabled_KmsMasterKey(bucketName string) string { + return fmt.Sprintf(` +resource "aws_kms_key" "test" { + description = "KMS Key for Bucket %[1]s" + deletion_window_in_days = 7 +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + server_side_encryption_configuration { + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.test.arn + sse_algorithm = "aws:kms" + } + bucket_key_enabled = true + } + } +} +`, bucketName) +} + func testAccBucketConfig_withGrants(bucketName string) string { return fmt.Sprintf(` data "aws_canonical_user_id" "current" {} diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 540d361a760..98fba183ff5 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -36,6 +36,10 @@ Configuring with both will cause inconsistencies and may overwrite configuration or with the deprecated parameter `logging` in the resource `aws_s3_bucket`. Configuring with both will cause inconsistencies and may overwrite configuration. +~> **NOTE on S3 Bucket Server Side Encryption Configuration:** S3 Bucket Server Side Encryption can be configured in either the standalone resource [`aws_s3_bucket_server_side_encryption_configuration`](s3_bucket_server_side_encryption_configuration.html) +or with the deprecated parameter `server_side_encryption_configuration` in the resource `aws_s3_bucket`. +Configuring with both will cause inconsistencies and may overwrite configuration. + ~> **NOTE on S3 Bucket Versioning Configuration:** S3 Bucket versioning can be configured in either the standalone resource [`aws_s3_bucket_versioning`](s3_bucket_versioning.html.markdown) or with the deprecated parameter `versioning` in the resource `aws_s3_bucket`. Configuring with both will cause inconsistencies and may overwrite configuration. @@ -241,8 +245,28 @@ See the [`aws_s3_bucket_replication_configuration` resource](s3_bucket_replicati ### Enable Default Server Side Encryption -The `server_side_encryption_configuration` argument is read-only as of version 4.0 of the Terraform AWS Provider. -See the [`aws_s3_bucket_server_side_encryption_configuration` resource](s3_bucket_server_side_encryption_configuration.html.markdown) for configuration details. +-> **NOTE:** The parameter `server_side_encryption_configuration` is deprecated. +Use the resource [`aws_s3_bucket_server_side_encryption_configuration`](s3_bucket_server_side_encryption_configuration.html) instead. + +```terraform +resource "aws_kms_key" "mykey" { + description = "This key is used to encrypt bucket objects" + deletion_window_in_days = 10 +} + +resource "aws_s3_bucket" "mybucket" { + bucket = "mybucket" + + server_side_encryption_configuration { + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.mykey.arn + sse_algorithm = "aws:kms" + } + } + } +} +``` ### Using ACL policy grants @@ -287,6 +311,9 @@ The following arguments are supported: Use the resource [`aws_s3_bucket_logging`](s3_bucket_logging.html.markdown) instead. * `object_lock_enabled` - (Optional, Default:`false`, Forces new resource) Indicates whether this bucket has an Object Lock configuration enabled. * `object_lock_configuration` - (Optional) A configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). See [Object Lock Configuration](#object-lock-configuration) below. +* `server_side_encryption_configuration` - (Optional, **Deprecated**) A configuration of [server-side encryption configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). See [Server Side Encryption Configuration](#server-side-encryption-configuration) below for details. + Terraform will only perform drift detection if a configuration value is provided. + Use the resource [`aws_s3_bucket_server_side_encryption_configuration`](s3_bucket_server_side_encryption_configuration.html) instead. * `versioning` - (Optional, **Deprecated**) A configuration of the [S3 bucket versioning state](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). See [Versioning](#versioning) below for details. Terraform will only perform drift detection if a configuration value is provided. Use the resource [`aws_s3_bucket_versioning`](s3_bucket_versioning.html.markdown) instead. * `website` - (Optional, **Deprecated**) A configuration of the [S3 bucket website](https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteHosting.html). See [Website](#website) below for details. Terraform will only perform drift detection if a configuration value is provided. Use the resource [`aws_s3_bucket_website_configuration`](s3_bucket_website_configuration.html.markdown) instead. @@ -382,6 +409,24 @@ The `object_lock_configuration` configuration block supports the following argum * `object_lock_enabled` - (Optional, **Deprecated**) Indicates whether this bucket has an Object Lock configuration enabled. Valid value is `Enabled`. Use the top-level argument `object_lock_enabled` instead. +### Server Side Encryption Configuration + +~> **NOTE:** Currently, changes to the `server_side_encryption_configuration` configuration of _existing_ resources cannot be automatically detected by Terraform. To manage changes in encryption of an S3 bucket, use the `aws_s3_bucket_server_side_encryption_configuration` resource instead. If you use `server_side_encryption_configuration` on an `aws_s3_bucket`, Terraform will assume management over the encryption configuration for the S3 bucket, treating additional encryption changes as drift. For this reason, `server_side_encryption_configuration` cannot be mixed with the external `aws_s3_bucket_server_side_encryption_configuration` resource for a given S3 bucket. + +The `server_side_encryption_configuration` configuration block supports the following argument: + +* `rule` - (Required) A single object for server-side encryption by default configuration. (documented below) + +The `rule` configuration block supports the following arguments: + +* `apply_server_side_encryption_by_default` - (Required) A single object for setting server-side encryption by default. (documented below) +* `bucket_key_enabled` - (Optional) Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS. + +The `apply_server_side_encryption_by_default` configuration block supports the following arguments: + +* `sse_algorithm` - (Required) The server-side encryption algorithm to use. Valid values are `AES256` and `aws:kms` +* `kms_master_key_id` - (Optional) The AWS KMS master key ID used for the SSE-KMS encryption. This can only be used when you set the value of `sse_algorithm` as `aws:kms`. The default `aws/s3` AWS KMS master key is used if this element is absent while the `sse_algorithm` is `aws:kms`. + ### Versioning ~> **NOTE:** Currently, changes to the `versioning` configuration of _existing_ resources cannot be automatically detected by Terraform. To manage changes of versioning state to an S3 bucket, use the `aws_s3_bucket_versioning` resource instead. If you use `versioning` on an `aws_s3_bucket`, Terraform will assume management over the versioning state of the S3 bucket, treating additional versioning state changes as drift. For this reason, `versioning` cannot be mixed with the external `aws_s3_bucket_versioning` resource for a given S3 bucket. @@ -448,12 +493,6 @@ In addition to all arguments above, the following attributes are exported: * `enabled` - Whether this criteria is enabled. * `status` - The status of the rule. * `request_payer` - Either `BucketOwner` or `Requester` that pays for the download and request fees. -* `server_side_encryption_configuration` - The [server-side encryption configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). - * `rule` - (required) Information about a particular server-side encryption configuration rule. - * `apply_server_side_encryption_by_default` - The default server-side encryption applied to new objects in the bucket. - * `kms_master_key_id` - (optional) The AWS KMS master key ID used for the SSE-KMS encryption. - * `sse_algorithm` - (required) The server-side encryption algorithm used. - * `bucket_key_enabled` - (Optional) Whether an [Amazon S3 Bucket Key](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) is used for SSE-KMS. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). * `website_endpoint` - The website endpoint, if the bucket is configured with a website. If not, this will be an empty string. * `website_domain` - The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records. From b4c08d338d74149646ae8a5577f7353c65bfceed Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Wed, 23 Mar 2022 02:01:08 -0400 Subject: [PATCH 24/42] Update CHANGELOG for #23822 --- .changelog/23822.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/23822.txt diff --git a/.changelog/23822.txt b/.changelog/23822.txt new file mode 100644 index 00000000000..aeadeacdf08 --- /dev/null +++ b/.changelog/23822.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_s3_bucket: Update `server_side_encryption_configuration` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_server_side_encryption_configuration` resource. +``` \ No newline at end of file From e1cfa23de81d81b0cd3b2e73a24bf1ef5c448ee0 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 1 Apr 2022 01:02:51 +0000 Subject: [PATCH 25/42] Update CHANGELOG.md for #23822 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9caecd24ffb..c046360feb1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ ENHANCEMENTS: * resource/aws_s3_bucket: Update `cors_rule` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_cors_configuration` resource. ([#23817](https://github.com/hashicorp/terraform-provider-aws/issues/23817)) * resource/aws_s3_bucket: Update `lifecycle_rule` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_lifecycle_configuration` resource. ([#23818](https://github.com/hashicorp/terraform-provider-aws/issues/23818)) * resource/aws_s3_bucket: Update `logging` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_logging` resource. ([#23819](https://github.com/hashicorp/terraform-provider-aws/issues/23819)) +* resource/aws_s3_bucket: Update `server_side_encryption_configuration` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_server_side_encryption_configuration` resource. ([#23822](https://github.com/hashicorp/terraform-provider-aws/issues/23822)) * resource/aws_s3_bucket: Update `versioning` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_versioning` resource. ([#23820](https://github.com/hashicorp/terraform-provider-aws/issues/23820)) * resource/aws_s3_bucket: Update `website` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_website_configuration` resource. ([#23821](https://github.com/hashicorp/terraform-provider-aws/issues/23821)) * resource/aws_route: Add `core_network_arn` argument ([#24024](https://github.com/hashicorp/terraform-provider-aws/issues/24024)) From d825c9ec20bfb88d7b6baad04905e985608ac32a Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 31 Mar 2022 21:20:01 -0400 Subject: [PATCH 26/42] r/s3_bucket: make 'replication_configuration' configurable --- internal/service/s3/bucket.go | 469 ++- .../bucket_replication_configuration_test.go | 216 + internal/service/s3/bucket_test.go | 3472 ++++++++++++----- internal/service/s3/errors.go | 1 + website/docs/r/s3_bucket.html.markdown | 236 +- 5 files changed, 3287 insertions(+), 1107 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index ee28aa9ff68..155e81ef959 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -403,100 +403,105 @@ func ResourceBucket() *schema.Resource { "replication_configuration": { Type: schema.TypeList, + Optional: true, Computed: true, + MaxItems: 1, Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "role": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Required: true, }, "rules": { - Type: schema.TypeSet, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeSet, + Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "id": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 255), }, "destination": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeList, + MaxItems: 1, + MinItems: 1, + Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "account_id": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidAccountID, }, "bucket": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, }, "storage_class": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(s3.StorageClass_Values(), false), }, "replica_kms_key_id": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Optional: true, }, "access_control_translation": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "owner": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.OwnerOverride_Values(), false), }, }, }, }, "replication_time": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeList, + Optional: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "minutes": { - Type: schema.TypeInt, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeInt, + Optional: true, + Default: 15, + ValidateFunc: validation.IntBetween(15, 15), }, "status": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Optional: true, + Default: s3.ReplicationTimeStatusEnabled, + ValidateFunc: validation.StringInSlice(s3.ReplicationTimeStatus_Values(), false), }, }, }, }, "metrics": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeList, + Optional: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "minutes": { - Type: schema.TypeInt, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeInt, + Optional: true, + Default: 15, + ValidateFunc: validation.IntBetween(10, 15), }, "status": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Optional: true, + Default: s3.MetricsStatusEnabled, + ValidateFunc: validation.StringInSlice(s3.MetricsStatus_Values(), false), }, }, }, @@ -505,21 +510,22 @@ func ResourceBucket() *schema.Resource { }, }, "source_selection_criteria": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "sse_kms_encrypted_objects": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "enabled": { - Type: schema.TypeBool, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeBool, + Required: true, }, }, }, @@ -528,39 +534,39 @@ func ResourceBucket() *schema.Resource { }, }, "prefix": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), }, "status": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.ReplicationRuleStatus_Values(), false), }, "priority": { - Type: schema.TypeInt, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeInt, + Optional: true, }, "filter": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeList, + Optional: true, + MinItems: 1, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "prefix": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), }, - "tags": tftags.TagsSchemaComputedDeprecated("Use the aws_s3_bucket_replication_configuration resource instead"), + "tags": tftags.TagsSchema(), }, }, }, "delete_marker_replication_status": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_replication_configuration resource instead", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{s3.DeleteMarkerReplicationStatusEnabled}, false), }, }, }, @@ -774,21 +780,40 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } } - if d.HasChange("acceleration_status") { - if err := resourceBucketInternalAccelerationUpdate(conn, d); err != nil { - return fmt.Errorf("error updating S3 Bucket (%s) Acceleration Status: %w", d.Id(), err) + // Note: Order of argument updates below is important + + if d.HasChange("cors_rule") { + if err := resourceBucketInternalCorsUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) CORS Rules: %w", d.Id(), err) } } - if d.HasChange("acl") && !d.IsNewResource() { - if err := resourceBucketInternalACLUpdate(conn, d); err != nil { - return fmt.Errorf("error updating S3 Bucket (%s) ACL: %w", d.Id(), err) + if d.HasChange("website") { + if err := resourceBucketInternalWebsiteUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Website: %w", d.Id(), err) } } - if d.HasChange("cors_rule") { - if err := resourceBucketInternalCorsUpdate(conn, d); err != nil { - return fmt.Errorf("error updating S3 Bucket (%s) CORS Rules: %w", d.Id(), err) + if d.HasChange("versioning") { + v := d.Get("versioning").([]interface{}) + + if d.IsNewResource() { + if versioning := expandVersioningWhenIsNewResource(v); versioning != nil { + err := resourceBucketInternalVersioningUpdate(conn, d.Id(), versioning) + if err != nil { + return fmt.Errorf("error updating (new) S3 Bucket (%s) Versioning: %w", d.Id(), err) + } + } + } else { + if err := resourceBucketInternalVersioningUpdate(conn, d.Id(), expandVersioning(v)); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Versioning: %w", d.Id(), err) + } + } + } + + if d.HasChange("acl") && !d.IsNewResource() { + if err := resourceBucketInternalACLUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) ACL: %w", d.Id(), err) } } @@ -798,21 +823,27 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } } + if d.HasChange("logging") { + if err := resourceBucketInternalLoggingUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Logging: %w", d.Id(), err) + } + } + if d.HasChange("lifecycle_rule") { if err := resourceBucketInternalLifecycleUpdate(conn, d); err != nil { return fmt.Errorf("error updating S3 Bucket (%s) Lifecycle Rules: %w", d.Id(), err) } } - if d.HasChange("logging") { - if err := resourceBucketInternalLoggingUpdate(conn, d); err != nil { - return fmt.Errorf("error updating S3 Bucket (%s) Logging: %w", d.Id(), err) + if d.HasChange("acceleration_status") { + if err := resourceBucketInternalAccelerationUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Acceleration Status: %w", d.Id(), err) } } - if d.HasChange("object_lock_configuration") { - if err := resourceBucketInternalObjectLockConfigurationUpdate(conn, d); err != nil { - return fmt.Errorf("error updating S3 Bucket (%s) Object Lock configuration: %w", d.Id(), err) + if d.HasChange("replication_configuration") { + if err := resourceBucketInternalReplicationConfigurationUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Replication configuration: %w", d.Id(), err) } } @@ -822,26 +853,9 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } } - if d.HasChange("versioning") { - v := d.Get("versioning").([]interface{}) - - if d.IsNewResource() { - if versioning := expandVersioningWhenIsNewResource(v); versioning != nil { - err := resourceBucketInternalVersioningUpdate(conn, d.Id(), versioning) - if err != nil { - return fmt.Errorf("error updating (new) S3 Bucket (%s) Versioning: %w", d.Id(), err) - } - } - } else { - if err := resourceBucketInternalVersioningUpdate(conn, d.Id(), expandVersioning(v)); err != nil { - return fmt.Errorf("error updating S3 Bucket (%s) Versioning: %w", d.Id(), err) - } - } - } - - if d.HasChange("website") { - if err := resourceBucketInternalWebsiteUpdate(conn, d); err != nil { - return fmt.Errorf("error updating S3 Bucket (%s) Website: %w", d.Id(), err) + if d.HasChange("object_lock_configuration") { + if err := resourceBucketInternalObjectLockConfigurationUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Object Lock configuration: %w", d.Id(), err) } } @@ -1857,6 +1871,60 @@ func resourceBucketInternalObjectLockConfigurationUpdate(conn *s3.S3, d *schema. return err } +func resourceBucketInternalReplicationConfigurationUpdate(conn *s3.S3, d *schema.ResourceData) error { + replicationConfiguration := d.Get("replication_configuration").([]interface{}) + + if len(replicationConfiguration) == 0 { + input := &s3.DeleteBucketReplicationInput{ + Bucket: aws.String(d.Id()), + } + + _, err := conn.DeleteBucketReplication(input) + + if err != nil { + return fmt.Errorf("error removing S3 Bucket (%s) Replication: %w", d.Id(), err) + } + + return nil + } + + hasVersioning := false + // Validate that bucket versioning is enabled + if versioning, ok := d.GetOk("versioning"); ok { + v := versioning.([]interface{}) + + if v[0].(map[string]interface{})["enabled"].(bool) { + hasVersioning = true + } + } + + if !hasVersioning { + return fmt.Errorf("versioning must be enabled to allow S3 bucket replication") + } + + input := &s3.PutBucketReplicationInput{ + Bucket: aws.String(d.Id()), + ReplicationConfiguration: expandBucketReplicationConfiguration(replicationConfiguration), + } + + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + _, err := conn.PutBucketReplication(input) + if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) || tfawserr.ErrMessageContains(err, ErrCodeInvalidRequest, "Versioning must be 'Enabled' on the bucket") { + return resource.RetryableError(err) + } + if err != nil { + return resource.NonRetryableError(err) + } + return nil + }) + + if tfresource.TimedOut(err) { + _, err = conn.PutBucketReplication(input) + } + + return err +} + func resourceBucketInternalServerSideEncryptionConfigurationUpdate(conn *s3.S3, d *schema.ResourceData) error { serverSideEncryptionConfiguration := d.Get("server_side_encryption_configuration").([]interface{}) @@ -2325,6 +2393,185 @@ func flattenS3ObjectLockConfiguration(conf *s3.ObjectLockConfiguration) []interf // Replication Configuration functions +func expandBucketReplicationConfiguration(l []interface{}) *s3.ReplicationConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + if !ok { + return nil + } + + rc := &s3.ReplicationConfiguration{} + + if val, ok := tfMap["role"].(string); ok { + rc.Role = aws.String(val) + } + + if v, ok := tfMap["rules"].(*schema.Set); ok && v.Len() > 0 { + rc.Rules = expandBucketReplicationConfigurationRules(v.List()) + } + + return rc +} + +func expandBucketReplicationConfigurationRules(l []interface{}) []*s3.ReplicationRule { + var rules []*s3.ReplicationRule + + for _, tfMapRaw := range l { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } + + rcRule := &s3.ReplicationRule{} + + if status, ok := tfMap["status"].(string); ok && status != "" { + rcRule.Status = aws.String(status) + } else { + continue + } + + if v, ok := tfMap["id"].(string); ok && v != "" { + rcRule.ID = aws.String(v) + } + + if v, ok := tfMap["destination"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rcRule.Destination = expandBucketReplicationConfigurationRulesDestination(v) + } else { + rcRule.Destination = &s3.Destination{} + } + + if v, ok := tfMap["source_selection_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rcRule.SourceSelectionCriteria = expandBucketReplicationConfigurationRulesSourceSelectionCriteria(v) + } + + if v, ok := tfMap["filter"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + // XML schema V2. + rcRule.Priority = aws.Int64(int64(tfMap["priority"].(int))) + + rcRule.Filter = &s3.ReplicationRuleFilter{} + + filter := v[0].(map[string]interface{}) + tags := Tags(tftags.New(filter["tags"]).IgnoreAWS()) + + if len(tags) > 0 { + rcRule.Filter.And = &s3.ReplicationRuleAndOperator{ + Prefix: aws.String(filter["prefix"].(string)), + Tags: tags, + } + } else { + rcRule.Filter.Prefix = aws.String(filter["prefix"].(string)) + } + + if dmr, ok := tfMap["delete_marker_replication_status"].(string); ok && dmr != "" { + rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ + Status: aws.String(dmr), + } + } else { + rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ + Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + } + } + } else { + // XML schema V1. + rcRule.Prefix = aws.String(tfMap["prefix"].(string)) + } + + rules = append(rules, rcRule) + } + + return rules +} + +func expandBucketReplicationConfigurationRulesDestination(l []interface{}) *s3.Destination { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + if !ok { + return nil + } + + ruleDestination := &s3.Destination{} + + if v, ok := tfMap["bucket"].(string); ok { + ruleDestination.Bucket = aws.String(v) + } + + if v, ok := tfMap["storage_class"].(string); ok && v != "" { + ruleDestination.StorageClass = aws.String(v) + } + + if v, ok := tfMap["replica_kms_key_id"].(string); ok && v != "" { + ruleDestination.EncryptionConfiguration = &s3.EncryptionConfiguration{ + ReplicaKmsKeyID: aws.String(v), + } + } + + if v, ok := tfMap["account_id"].(string); ok && v != "" { + ruleDestination.Account = aws.String(v) + } + + if v, ok := tfMap["access_control_translation"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + aclTranslationValues := v[0].(map[string]interface{}) + ruleAclTranslation := &s3.AccessControlTranslation{} + ruleAclTranslation.Owner = aws.String(aclTranslationValues["owner"].(string)) + ruleDestination.AccessControlTranslation = ruleAclTranslation + } + + // replication metrics (required for RTC) + if v, ok := tfMap["metrics"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + metricsConfig := &s3.Metrics{} + metricsValues := v[0].(map[string]interface{}) + metricsConfig.EventThreshold = &s3.ReplicationTimeValue{} + metricsConfig.Status = aws.String(metricsValues["status"].(string)) + metricsConfig.EventThreshold.Minutes = aws.Int64(int64(metricsValues["minutes"].(int))) + ruleDestination.Metrics = metricsConfig + } + + // replication time control (RTC) + if v, ok := tfMap["replication_time"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rtcValues := v[0].(map[string]interface{}) + rtcConfig := &s3.ReplicationTime{} + rtcConfig.Status = aws.String(rtcValues["status"].(string)) + rtcConfig.Time = &s3.ReplicationTimeValue{} + rtcConfig.Time.Minutes = aws.Int64(int64(rtcValues["minutes"].(int))) + ruleDestination.ReplicationTime = rtcConfig + } + + return ruleDestination +} + +func expandBucketReplicationConfigurationRulesSourceSelectionCriteria(l []interface{}) *s3.SourceSelectionCriteria { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + if !ok { + return nil + } + + ruleSsc := &s3.SourceSelectionCriteria{} + + if v, ok := tfMap["sse_kms_encrypted_objects"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + sseKmsValues := v[0].(map[string]interface{}) + sseKmsEncryptedObjects := &s3.SseKmsEncryptedObjects{} + + if sseKmsValues["enabled"].(bool) { + sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusEnabled) + } else { + sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusDisabled) + } + ruleSsc.SseKmsEncryptedObjects = sseKmsEncryptedObjects + } + + return ruleSsc +} + func flattenBucketReplicationConfiguration(r *s3.ReplicationConfiguration) []interface{} { if r == nil { return []interface{}{} diff --git a/internal/service/s3/bucket_replication_configuration_test.go b/internal/service/s3/bucket_replication_configuration_test.go index 07648700864..6c45bb01cf5 100644 --- a/internal/service/s3/bucket_replication_configuration_test.go +++ b/internal/service/s3/bucket_replication_configuration_test.go @@ -1073,6 +1073,88 @@ func TestAccS3BucketReplicationConfiguration_withoutPrefix(t *testing.T) { }) } +func TestAccS3BucketReplicationConfiguration_migrate_noChange(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_replication_configuration.test" + bucketResourceName := "aws_s3_bucket.source" + region := acctest.Region() + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroy, &providers), + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withReplicationV2_PrefixAndTags(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExistsWithProvider(bucketResourceName, acctest.RegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(bucketResourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(bucketResourceName, "replication_configuration.0.rules.*", map[string]string{ + "filter.#": "1", + "filter.0.prefix": "foo", + "filter.0.tags.%": "2", + }), + ), + }, + { + Config: testAccBucketReplicationConfiguration_Migrate_NoChangeConfig(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.filter.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.filter.0.and.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.filter.0.and.0.prefix", "foo"), + resource.TestCheckResourceAttr(resourceName, "rule.0.filter.0.and.0.tags.%", "2"), + ), + }, + }, + }) +} + +func TestAccS3BucketReplicationConfiguration_migrate_withChange(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_replication_configuration.test" + bucketResourceName := "aws_s3_bucket.source" + region := acctest.Region() + + // record the initialized providers so that we can use them to check for the instances in each region + var providers []*schema.Provider + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.FactoriesAlternate(&providers), + CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroy, &providers), + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withReplicationV2_PrefixAndTags(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExistsWithProvider(bucketResourceName, acctest.RegionProviderFunc(region, &providers)), + resource.TestCheckResourceAttr(bucketResourceName, "replication_configuration.0.rules.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(bucketResourceName, "replication_configuration.0.rules.*", map[string]string{ + "filter.#": "1", + "filter.0.prefix": "foo", + "filter.0.tags.%": "2", + }), + ), + }, + { + Config: testAccBucketReplicationConfiguration_Migrate_WithChangeConfig(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketReplicationConfigurationExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.filter.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.filter.0.prefix", "bar"), + ), + }, + }, + }) +} + func testAccCheckBucketReplicationConfigurationDestroy(s *terraform.State, provider *schema.Provider) error { conn := provider.Meta().(*conns.AWSClient).S3Conn @@ -2185,3 +2267,137 @@ resource "aws_s3_bucket_replication_configuration" "test" { } }`) } + +func testAccBucketReplicationConfigurationMigrationBase(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_iam_role" "role" { + name = %[1]q + + assume_role_policy = < 0 && err == nil { - t.Fatalf("expected %q to trigger an error", tc.Region) - } - if output != tc.ExpectedOutput { - t.Fatalf("expected %q, received %q", tc.ExpectedOutput, output) - } - } + }) } -func TestWebsiteEndpoint(t *testing.T) { - // https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html - testCases := []struct { - TestingClient *conns.AWSClient - LocationConstraint string - Expected string - }{ - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.UsEast1RegionID, +func TestAccS3Bucket_Security_corsEmptyOrigin(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withCORSEmptyOrigin(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "cors_rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_headers.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_headers.0", "*"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_methods.#", "2"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_methods.0", "PUT"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_methods.1", "POST"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_origins.#", "1"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.allowed_origins.0", ""), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.expose_headers.#", "2"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.expose_headers.0", "x-amz-server-side-encryption"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.expose_headers.1", "ETag"), + resource.TestCheckResourceAttr(resourceName, "cors_rule.0.max_age_seconds", "3000"), + ), }, - LocationConstraint: "", - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.UsEast1RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.UsWest2RegionID, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, }, - LocationConstraint: endpoints.UsWest2RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.UsWest2RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.UsWest1RegionID, - }, - LocationConstraint: endpoints.UsWest1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.UsWest1RegionID, acctest.PartitionDNSSuffix()), }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.EuWest1RegionID, + }) +} + +func TestAccS3Bucket_Security_corsSingleMethodAndEmptyOrigin(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withCORSSingleMethodAndEmptyOrigin(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + ), }, - LocationConstraint: endpoints.EuWest1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.EuWest1RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.EuWest3RegionID, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, }, - LocationConstraint: endpoints.EuWest3RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.EuWest3RegionID, acctest.PartitionDNSSuffix()), }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.EuCentral1RegionID, + }) +} + +func TestAccS3Bucket_Security_logging(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withLogging(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "logging.#", "1"), + resource.TestCheckResourceAttrPair(resourceName, "logging.0.target_bucket", "aws_s3_bucket.log_bucket", "id"), + resource.TestCheckResourceAttr(resourceName, "logging.0.target_prefix", "log/"), + ), }, - LocationConstraint: endpoints.EuCentral1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.EuCentral1RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.ApSouth1RegionID, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, }, - LocationConstraint: endpoints.ApSouth1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.ApSouth1RegionID, acctest.PartitionDNSSuffix()), }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.ApSoutheast1RegionID, + }) +} +func TestAccS3Bucket_Security_enableDefaultEncryptionWhenTypical(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withDefaultEncryption_KmsMasterKey(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAwsKms), + resource.TestMatchResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.kms_master_key_id", regexp.MustCompile("^arn")), + ), }, - LocationConstraint: endpoints.ApSoutheast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.ApSoutheast1RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.ApNortheast1RegionID, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, }, - LocationConstraint: endpoints.ApNortheast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.ApNortheast1RegionID, acctest.PartitionDNSSuffix()), }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.ApSoutheast2RegionID, + }) +} + +func TestAccS3Bucket_Security_enableDefaultEncryptionWhenAES256IsUsed(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withDefaultEncryption_defaultKey(bucketName, s3.ServerSideEncryptionAes256), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAes256), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.kms_master_key_id", ""), + ), }, - LocationConstraint: endpoints.ApSoutheast2RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.ApSoutheast2RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.ApNortheast2RegionID, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, }, - LocationConstraint: endpoints.ApNortheast2RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.ApNortheast2RegionID, acctest.PartitionDNSSuffix()), }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.SaEast1RegionID, + }) +} + +func TestAccS3Bucket_Security_disableDefaultEncryptionWhenDefaultEncryptionIsEnabled(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withDefaultEncryption_defaultKey(bucketName, s3.ServerSideEncryptionAwsKms), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(resourceName), + ), }, - LocationConstraint: endpoints.SaEast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.SaEast1RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.UsGovEast1RegionID, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, }, - LocationConstraint: endpoints.UsGovEast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.UsGovEast1RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.UsGovWest1RegionID, + { + // As ServerSide Encryption Configuration is a Computed field, removing them from terraform will not + // trigger an update to remove it from the S3 bucket. + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.#", "1"), + ), }, - LocationConstraint: endpoints.UsGovWest1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.UsGovWest1RegionID, acctest.PartitionDNSSuffix()), }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "c2s.ic.gov", - Region: endpoints.UsIsoEast1RegionID, + }) +} + +func TestAccS3Bucket_Web_simple(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + region := acctest.Region() + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withWebsite(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.index_document", "index.html"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), }, - LocationConstraint: endpoints.UsIsoEast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.c2s.ic.gov", endpoints.UsIsoEast1RegionID), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "sc2s.sgov.gov", - Region: endpoints.UsIsobEast1RegionID, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl", "grant"}, }, - LocationConstraint: endpoints.UsIsobEast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.sc2s.sgov.gov", endpoints.UsIsobEast1RegionID), + { + Config: testAccBucketConfig_withWebsiteAndError(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.index_document", "index.html"), + resource.TestCheckResourceAttr(resourceName, "website.0.error_document", "error.html"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + { + // As Website is a Computed field, removing them from terraform will not + // trigger an update to remove them from the S3 bucket. + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.index_document", "index.html"), + resource.TestCheckResourceAttr(resourceName, "website.0.error_document", "error.html"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + }, + }) +} + +func TestAccS3Bucket_Web_redirect(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + region := acctest.Region() + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withWebsiteAndRedirect(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.redirect_all_requests_to", "hashicorp.com?my=query"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl", "grant"}, + }, + { + Config: testAccBucketConfig_withWebsiteAndHTTPSRedirect(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.redirect_all_requests_to", "https://hashicorp.com?my=query"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + { + // As Website is a Computed field, removing them from terraform will not + // trigger an update to remove them from the S3 bucket. + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.redirect_all_requests_to", "https://hashicorp.com?my=query"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + }, + }) +} + +func TestAccS3Bucket_Web_routingRules(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + region := acctest.Region() + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withWebsiteAndRoutingRules(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.error_document", "error.html"), + resource.TestCheckResourceAttr(resourceName, "website.0.index_document", "index.html"), + resource.TestCheckResourceAttrSet(resourceName, "website.0.routing_rules"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl", "grant"}, + }, + { + // As Website is a Computed field, removing them from terraform will not + // trigger an update to remove them from the S3 bucket. + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "website.#", "1"), + resource.TestCheckResourceAttr(resourceName, "website.0.error_document", "error.html"), + resource.TestCheckResourceAttr(resourceName, "website.0.index_document", "index.html"), + resource.TestCheckResourceAttrSet(resourceName, "website.0.routing_rules"), + testAccCheckS3BucketWebsiteEndpoint(resourceName, "website_endpoint", bucketName, region), + ), + }, + }, + }) +} + +func TestBucketName(t *testing.T) { + validDnsNames := []string{ + "foobar", + "foo.bar", + "foo.bar.baz", + "1234", + "foo-bar", + strings.Repeat("x", 63), + } + + for _, v := range validDnsNames { + if err := tfs3.ValidBucketName(v, endpoints.UsWest2RegionID); err != nil { + t.Fatalf("%q should be a valid S3 bucket name", v) + } + } + + invalidDnsNames := []string{ + "foo..bar", + "Foo.Bar", + "192.168.0.1", + "127.0.0.1", + ".foo", + "bar.", + "foo_bar", + strings.Repeat("x", 64), + } + + for _, v := range invalidDnsNames { + if err := tfs3.ValidBucketName(v, endpoints.UsWest2RegionID); err == nil { + t.Fatalf("%q should not be a valid S3 bucket name", v) + } + } + + validEastNames := []string{ + "foobar", + "foo_bar", + "127.0.0.1", + "foo..bar", + "foo_bar_baz", + "foo.bar.baz", + "Foo.Bar", + strings.Repeat("x", 255), + } + + for _, v := range validEastNames { + if err := tfs3.ValidBucketName(v, endpoints.UsEast1RegionID); err != nil { + t.Fatalf("%q should be a valid S3 bucket name", v) + } + } + + invalidEastNames := []string{ + "foo;bar", + strings.Repeat("x", 256), + } + + for _, v := range invalidEastNames { + if err := tfs3.ValidBucketName(v, endpoints.UsEast1RegionID); err == nil { + t.Fatalf("%q should not be a valid S3 bucket name", v) + } + } +} + +func TestBucketRegionalDomainName(t *testing.T) { + const bucket = "bucket-name" + + var testCases = []struct { + ExpectedErrCount int + ExpectedOutput string + Region string + }{ + { + Region: "", + ExpectedErrCount: 0, + ExpectedOutput: bucket + ".s3.amazonaws.com", + }, + { + Region: "custom", + ExpectedErrCount: 0, + ExpectedOutput: bucket + ".s3.custom.amazonaws.com", + }, + { + Region: endpoints.UsEast1RegionID, + ExpectedErrCount: 0, + ExpectedOutput: bucket + ".s3.amazonaws.com", + }, + { + Region: endpoints.UsWest2RegionID, + ExpectedErrCount: 0, + ExpectedOutput: bucket + fmt.Sprintf(".s3.%s.%s", endpoints.UsWest2RegionID, acctest.PartitionDNSSuffix()), + }, + { + Region: endpoints.UsGovWest1RegionID, + ExpectedErrCount: 0, + ExpectedOutput: bucket + fmt.Sprintf(".s3.%s.%s", endpoints.UsGovWest1RegionID, acctest.PartitionDNSSuffix()), }, + { + Region: endpoints.CnNorth1RegionID, + ExpectedErrCount: 0, + ExpectedOutput: bucket + fmt.Sprintf(".s3.%s.amazonaws.com.cn", endpoints.CnNorth1RegionID), + }, + } + + for _, tc := range testCases { + output, err := tfs3.BucketRegionalDomainName(bucket, tc.Region) + if tc.ExpectedErrCount == 0 && err != nil { + t.Fatalf("expected %q not to trigger an error, received: %s", tc.Region, err) + } + if tc.ExpectedErrCount > 0 && err == nil { + t.Fatalf("expected %q to trigger an error", tc.Region) + } + if output != tc.ExpectedOutput { + t.Fatalf("expected %q, received %q", tc.ExpectedOutput, output) + } + } +} + +func TestWebsiteEndpoint(t *testing.T) { + // https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html + testCases := []struct { + TestingClient *conns.AWSClient + LocationConstraint string + Expected string + }{ { TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com.cn", - Region: endpoints.CnNorthwest1RegionID, + DNSSuffix: "amazonaws.com", + Region: endpoints.UsEast1RegionID, }, - LocationConstraint: endpoints.CnNorthwest1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.amazonaws.com.cn", endpoints.CnNorthwest1RegionID), + LocationConstraint: "", + Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.UsEast1RegionID, acctest.PartitionDNSSuffix()), }, { TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com.cn", - Region: endpoints.CnNorth1RegionID, + DNSSuffix: "amazonaws.com", + Region: endpoints.UsWest2RegionID, + }, + LocationConstraint: endpoints.UsWest2RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.UsWest2RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.UsWest1RegionID, + }, + LocationConstraint: endpoints.UsWest1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.UsWest1RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.EuWest1RegionID, + }, + LocationConstraint: endpoints.EuWest1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.EuWest1RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.EuWest3RegionID, + }, + LocationConstraint: endpoints.EuWest3RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.EuWest3RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.EuCentral1RegionID, + }, + LocationConstraint: endpoints.EuCentral1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.EuCentral1RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.ApSouth1RegionID, + }, + LocationConstraint: endpoints.ApSouth1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.ApSouth1RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.ApSoutheast1RegionID, + }, + LocationConstraint: endpoints.ApSoutheast1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.ApSoutheast1RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.ApNortheast1RegionID, + }, + LocationConstraint: endpoints.ApNortheast1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.ApNortheast1RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.ApSoutheast2RegionID, + }, + LocationConstraint: endpoints.ApSoutheast2RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.ApSoutheast2RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.ApNortheast2RegionID, + }, + LocationConstraint: endpoints.ApNortheast2RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.ApNortheast2RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.SaEast1RegionID, + }, + LocationConstraint: endpoints.SaEast1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.SaEast1RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.UsGovEast1RegionID, + }, + LocationConstraint: endpoints.UsGovEast1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.UsGovEast1RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com", + Region: endpoints.UsGovWest1RegionID, + }, + LocationConstraint: endpoints.UsGovWest1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.UsGovWest1RegionID, acctest.PartitionDNSSuffix()), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "c2s.ic.gov", + Region: endpoints.UsIsoEast1RegionID, + }, + LocationConstraint: endpoints.UsIsoEast1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.c2s.ic.gov", endpoints.UsIsoEast1RegionID), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "sc2s.sgov.gov", + Region: endpoints.UsIsobEast1RegionID, + }, + LocationConstraint: endpoints.UsIsobEast1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.sc2s.sgov.gov", endpoints.UsIsobEast1RegionID), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com.cn", + Region: endpoints.CnNorthwest1RegionID, + }, + LocationConstraint: endpoints.CnNorthwest1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.amazonaws.com.cn", endpoints.CnNorthwest1RegionID), + }, + { + TestingClient: &conns.AWSClient{ + DNSSuffix: "amazonaws.com.cn", + Region: endpoints.CnNorth1RegionID, }, LocationConstraint: endpoints.CnNorth1RegionID, Expected: fmt.Sprintf("bucket-name.s3-website.%s.amazonaws.com.cn", endpoints.CnNorth1RegionID), }, } - for _, testCase := range testCases { - got := tfs3.WebsiteEndpoint(testCase.TestingClient, "bucket-name", testCase.LocationConstraint) - if got.Endpoint != testCase.Expected { - t.Errorf("WebsiteEndpointUrl(\"bucket-name\", %q) => %q, want %q", testCase.LocationConstraint, got.Endpoint, testCase.Expected) - } - } + for _, testCase := range testCases { + got := tfs3.WebsiteEndpoint(testCase.TestingClient, "bucket-name", testCase.LocationConstraint) + if got.Endpoint != testCase.Expected { + t.Errorf("WebsiteEndpointUrl(\"bucket-name\", %q) => %q, want %q", testCase.LocationConstraint, got.Endpoint, testCase.Expected) + } + } +} + +func testAccCheckBucketDestroy(s *terraform.State) error { + return testAccCheckBucketDestroyWithProvider(s, acctest.Provider) +} + +func testAccCheckBucketDestroyWithProvider(s *terraform.State, provider *schema.Provider) error { + conn := provider.Meta().(*conns.AWSClient).S3Conn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_s3_bucket" { + continue + } + + input := &s3.HeadBucketInput{ + Bucket: aws.String(rs.Primary.ID), + } + + // Retry for S3 eventual consistency + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + _, err := conn.HeadBucket(input) + + if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) || tfawserr.ErrCodeEquals(err, "NotFound") { + return nil + } + + if err != nil { + return resource.NonRetryableError(err) + } + + return resource.RetryableError(fmt.Errorf("AWS S3 Bucket still exists: %s", rs.Primary.ID)) + }) + + if tfresource.TimedOut(err) { + _, err = conn.HeadBucket(input) + } + + if err != nil { + return err + } + } + return nil +} + +func testAccCheckBucketExists(n string) resource.TestCheckFunc { + return testAccCheckBucketExistsWithProvider(n, func() *schema.Provider { return acctest.Provider }) +} + +func testAccCheckBucketExistsWithProvider(n string, providerF func() *schema.Provider) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + provider := providerF() + + conn := provider.Meta().(*conns.AWSClient).S3Conn + _, err := conn.HeadBucket(&s3.HeadBucketInput{ + Bucket: aws.String(rs.Primary.ID), + }) + + if err != nil { + if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + return fmt.Errorf("S3 bucket not found") + } + return err + } + return nil + + } +} + +func testAccCheckBucketAddObjects(n string, keys ...string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs := s.RootModule().Resources[n] + conn := acctest.Provider.Meta().(*conns.AWSClient).S3ConnURICleaningDisabled + + for _, key := range keys { + _, err := conn.PutObject(&s3.PutObjectInput{ + Bucket: aws.String(rs.Primary.ID), + Key: aws.String(key), + }) + + if err != nil { + return fmt.Errorf("PutObject error: %s", err) + } + } + + return nil + } +} + +func testAccCheckBucketAddObjectsWithLegalHold(n string, keys ...string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs := s.RootModule().Resources[n] + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn + + for _, key := range keys { + _, err := conn.PutObject(&s3.PutObjectInput{ + Bucket: aws.String(rs.Primary.ID), + Key: aws.String(key), + ObjectLockLegalHoldStatus: aws.String(s3.ObjectLockLegalHoldStatusOn), + }) + + if err != nil { + return fmt.Errorf("PutObject error: %s", err) + } + } + + return nil + } +} + +// Create an S3 bucket via a CF stack so that it has system tags. +func testAccCheckBucketCreateViaCloudFormation(n string, stackID *string) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).CloudFormationConn + stackName := sdkacctest.RandomWithPrefix("tf-acc-test-s3tags") + templateBody := fmt.Sprintf(`{ + "Resources": { + "TfTestBucket": { + "Type": "AWS::S3::Bucket", + "Properties": { + "BucketName": "%s" + } + } + } +}`, n) + + requestToken := resource.UniqueId() + req := &cloudformation.CreateStackInput{ + StackName: aws.String(stackName), + TemplateBody: aws.String(templateBody), + ClientRequestToken: aws.String(requestToken), + } + + log.Printf("[DEBUG] Creating CloudFormation stack: %s", req) + resp, err := conn.CreateStack(req) + if err != nil { + return fmt.Errorf("error creating CloudFormation stack: %w", err) + } + + stack, err := tfcloudformation.WaitStackCreated(conn, aws.StringValue(resp.StackId), requestToken, 10*time.Minute) + if err != nil { + return fmt.Errorf("Error waiting for CloudFormation stack creation: %w", err) + } + status := aws.StringValue(stack.StackStatus) + if status != cloudformation.StackStatusCreateComplete { + return fmt.Errorf("Invalid CloudFormation stack creation status: %s", status) + } + + *stackID = aws.StringValue(resp.StackId) + return nil + } +} + +func testAccCheckBucketTagKeys(n string, keys ...string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs := s.RootModule().Resources[n] + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn + + got, err := tfs3.BucketListTags(conn, rs.Primary.Attributes["bucket"]) + if err != nil { + return err + } + + for _, want := range keys { + ok := false + for _, key := range got.Keys() { + if want == key { + ok = true + break + } + } + if !ok { + return fmt.Errorf("Key %s not found in bucket's tag set", want) + } + } + + return nil + } +} + +func testAccCheckS3BucketDomainName(resourceName string, attributeName string, bucketName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + expectedValue := acctest.Provider.Meta().(*conns.AWSClient).PartitionHostname(fmt.Sprintf("%s.s3", bucketName)) + + return resource.TestCheckResourceAttr(resourceName, attributeName, expectedValue)(s) + } +} + +func testAccBucketRegionalDomainName(bucket, region string) string { + regionalEndpoint, err := tfs3.BucketRegionalDomainName(bucket, region) + if err != nil { + return fmt.Sprintf("Regional endpoint not found for bucket %s", bucket) + } + return regionalEndpoint +} + +func testAccCheckS3BucketWebsiteEndpoint(resourceName string, attributeName string, bucketName string, region string) resource.TestCheckFunc { + return func(s *terraform.State) error { + website := tfs3.WebsiteEndpoint(acctest.Provider.Meta().(*conns.AWSClient), bucketName, region) + expectedValue := website.Endpoint + + return resource.TestCheckResourceAttr(resourceName, attributeName, expectedValue)(s) + } +} + +func testAccCheckBucketUpdateTags(n string, oldTags, newTags map[string]string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs := s.RootModule().Resources[n] + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn + + return tfs3.BucketUpdateTags(conn, rs.Primary.Attributes["bucket"], oldTags, newTags) + } +} + +func testAccCheckBucketCheckTags(n string, expectedTags map[string]string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs := s.RootModule().Resources[n] + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn + + got, err := tfs3.BucketListTags(conn, rs.Primary.Attributes["bucket"]) + if err != nil { + return err + } + + want := tftags.New(expectedTags) + if !reflect.DeepEqual(want, got) { + return fmt.Errorf("Incorrect tags, want: %v got: %v", want, got) + } + + return nil + } +} + +func testAccBucketConfig_Basic(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} +`, bucketName) +} + +func testAccBucketConfig_withAcceleration(bucketName, acceleration string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acceleration_status = %[2]q +} +`, bucketName, acceleration) +} + +func testAccBucketConfig_withACL(bucketName, acl string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = %[2]q +} +`, bucketName, acl) +} + +func testAccBucketConfig_withCORS(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + cors_rule { + allowed_headers = ["*"] + allowed_methods = ["PUT", "POST"] + allowed_origins = ["https://www.example.com"] + expose_headers = ["x-amz-server-side-encryption", "ETag"] + max_age_seconds = 3000 + } +} +`, bucketName) +} + +func testAccBucketConfig_withCORSSingleMethodAndEmptyOrigin(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + cors_rule { + allowed_methods = ["GET"] + allowed_origins = [""] + } +} +`, bucketName) +} + +func testAccBucketConfig_withCORSEmptyOrigin(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + cors_rule { + allowed_headers = ["*"] + allowed_methods = ["PUT", "POST"] + allowed_origins = [""] + expose_headers = ["x-amz-server-side-encryption", "ETag"] + max_age_seconds = 3000 + } +} +`, bucketName) +} + +func testAccBucketConfig_withDefaultEncryption_defaultKey(bucketName, sseAlgorithm string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + server_side_encryption_configuration { + rule { + apply_server_side_encryption_by_default { + sse_algorithm = %[2]q + } + } + } +} +`, bucketName, sseAlgorithm) +} + +func testAccBucketConfig_withDefaultEncryption_KmsMasterKey(bucketName string) string { + return fmt.Sprintf(` +resource "aws_kms_key" "test" { + description = "KMS Key for Bucket %[1]s" + deletion_window_in_days = 10 +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + server_side_encryption_configuration { + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.test.arn + sse_algorithm = "aws:kms" + } + } + } +} +`, bucketName) +} + +func testAccBucketConfig_withDefaultEncryptionAndBucketKeyEnabled_KmsMasterKey(bucketName string) string { + return fmt.Sprintf(` +resource "aws_kms_key" "test" { + description = "KMS Key for Bucket %[1]s" + deletion_window_in_days = 7 +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + server_side_encryption_configuration { + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.test.arn + sse_algorithm = "aws:kms" + } + bucket_key_enabled = true + } + } +} +`, bucketName) +} + +func testAccBucketConfig_withGrants(bucketName string) string { + return fmt.Sprintf(` +data "aws_canonical_user_id" "current" {} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + grant { + id = data.aws_canonical_user_id.current.id + type = "CanonicalUser" + permissions = ["FULL_CONTROL", "WRITE"] + } +} +`, bucketName) +} + +func testAccBucketConfig_withUpdatedGrants(bucketName string) string { + return fmt.Sprintf(` +data "aws_canonical_user_id" "current" {} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + grant { + id = data.aws_canonical_user_id.current.id + type = "CanonicalUser" + permissions = ["READ"] + } + + grant { + type = "Group" + permissions = ["READ_ACP"] + uri = "http://acs.amazonaws.com/groups/s3/LogDelivery" + } +} +`, bucketName) +} + +func testAccBucketConfig_withLifecycle(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "private" + + lifecycle_rule { + id = "id1" + prefix = "path1/" + enabled = true + + expiration { + days = 365 + } + + transition { + days = 30 + storage_class = "STANDARD_IA" + } + + transition { + days = 60 + storage_class = "INTELLIGENT_TIERING" + } + + transition { + days = 90 + storage_class = "ONEZONE_IA" + } + + transition { + days = 120 + storage_class = "GLACIER" + } + + transition { + days = 210 + storage_class = "DEEP_ARCHIVE" + } + } + + lifecycle_rule { + id = "id2" + prefix = "path2/" + enabled = true + + expiration { + date = "2016-01-12" + } + } + + lifecycle_rule { + id = "id3" + prefix = "path3/" + enabled = true + + transition { + days = 0 + storage_class = "GLACIER" + } + } + + lifecycle_rule { + id = "id4" + prefix = "path4/" + enabled = true + + tags = { + "tagKey" = "tagValue" + "terraform" = "hashicorp" + } + + expiration { + date = "2016-01-12" + } + } + + lifecycle_rule { + id = "id5" + enabled = true + + tags = { + "tagKey" = "tagValue" + "terraform" = "hashicorp" + } + + transition { + days = 0 + storage_class = "GLACIER" + } + } + + lifecycle_rule { + id = "id6" + enabled = true + + tags = { + "tagKey" = "tagValue" + } + + transition { + days = 0 + storage_class = "GLACIER" + } + } +} +`, bucketName) } -func testAccCheckBucketDestroy(s *terraform.State) error { - return testAccCheckBucketDestroyWithProvider(s, acctest.Provider) +func testAccBucketConfig_withLifecycleExpireMarker(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "private" + + lifecycle_rule { + id = "id1" + prefix = "path1/" + enabled = true + + expiration { + expired_object_delete_marker = "true" + } + } +} +`, bucketName) } -func testAccCheckBucketDestroyWithProvider(s *terraform.State, provider *schema.Provider) error { - conn := provider.Meta().(*conns.AWSClient).S3Conn +func testAccBucketConfig_withLifecycleRuleExpirationEmptyConfigurationBlock(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_s3_bucket" { - continue - } + lifecycle_rule { + enabled = true + id = "id1" - input := &s3.HeadBucketInput{ - Bucket: aws.String(rs.Primary.ID), - } + expiration {} + } +} +`, rName) +} - // Retry for S3 eventual consistency - err := resource.Retry(1*time.Minute, func() *resource.RetryError { - _, err := conn.HeadBucket(input) +func testAccBucketConfig_withLifecycleRuleAbortIncompleteMultipartUploadDays(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q - if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) || tfawserr.ErrCodeEquals(err, "NotFound") { - return nil - } + lifecycle_rule { + abort_incomplete_multipart_upload_days = 7 + enabled = true + id = "id1" + } +} +`, rName) +} - if err != nil { - return resource.NonRetryableError(err) - } +func testAccBucketConfig_withLogging(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "log_bucket" { + bucket = "%[1]s-log" + acl = "log-delivery-write" +} - return resource.RetryableError(fmt.Errorf("AWS S3 Bucket still exists: %s", rs.Primary.ID)) - }) +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "private" - if tfresource.TimedOut(err) { - _, err = conn.HeadBucket(input) - } + logging { + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" + } +} +`, bucketName) +} - if err != nil { - return err - } - } - return nil +func testAccBucketConfig_ReplicationBase(bucketName string) string { + return acctest.ConfigCompose( + acctest.ConfigMultipleRegionProvider(2), + fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_iam_role" "role" { + name = %[1]q + + assume_role_policy = < **NOTE on S3 Bucket Replication Configuration:** S3 Bucket Replication can be configured in either the standalone resource [`aws_s3_bucket_replicaton_configuration`](s3_bucket_replication_configuration.html) +or with the deprecated parameter `replication_configuration` in the resource `aws_s3_bucket`. +Configuring with both will cause inconsistencies and may overwrite configuration. + ~> **NOTE on S3 Bucket Server Side Encryption Configuration:** S3 Bucket Server Side Encryption can be configured in either the standalone resource [`aws_s3_bucket_server_side_encryption_configuration`](s3_bucket_server_side_encryption_configuration.html) or with the deprecated parameter `server_side_encryption_configuration` in the resource `aws_s3_bucket`. Configuring with both will cause inconsistencies and may overwrite configuration. @@ -240,8 +244,131 @@ To **enable** Object Lock on an **existing** bucket, please contact AWS Support ### Using replication configuration -The `replication_configuration` argument is read-only as of version 4.0 of the Terraform AWS Provider. -See the [`aws_s3_bucket_replication_configuration` resource](s3_bucket_replication_configuration.html.markdown) for configuration details. +-> **NOTE:** The parameter `replication_configuration` is deprecated. +Use the resource [`aws_s3_bucket_replication_configuration`](s3_bucket_replication_configuration.html) instead. + +```terraform +provider "aws" { + region = "eu-west-1" +} + +provider "aws" { + alias = "central" + region = "eu-central-1" +} + +resource "aws_iam_role" "replication" { + name = "tf-iam-role-replication-12345" + + assume_role_policy = < **NOTE:** Currently, changes to the `replication_configuration` configuration of _existing_ resources cannot be automatically detected by Terraform. To manage replication configuration changes to an S3 bucket, use the `aws_s3_bucket_replication_configuration` resource instead. If you use `replication_configuration` on an `aws_s3_bucket`, Terraform will assume management over the full replication configuration for the S3 bucket, treating additional replication configuration rules as drift. For this reason, `replication_configuration` cannot be mixed with the external `aws_s3_bucket_replication_configuration` resource for a given S3 bucket. + +The `replication_configuration` configuration block supports the following arguments: + +* `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects. +* `rules` - (Required) Specifies the rules managing the replication ([documented below](#rules)). + +#### Rules + +The `rules` configuration block supports the following arguments: + +~> **NOTE:** Amazon S3's latest version of the replication configuration is V2, which includes the `filter` attribute for replication rules. +With the `filter` attribute, you can specify object filters based on the object key prefix, tags, or both to scope the objects that the rule applies to. +Replication configuration V1 supports filtering based on only the `prefix` attribute. For backwards compatibility, Amazon S3 continues to support the V1 configuration. + +* `delete_marker_replication_status` - (Optional) Whether delete markers are replicated. The only valid value is `Enabled`. To disable, omit this argument. This argument is only valid with V2 replication configurations (i.e., when `filter` is used). +* `destination` - (Required) Specifies the destination for the rule ([documented below](#destination)). +* `filter` - (Optional, Conflicts with `prefix`) Filter that identifies subset of objects to which the replication rule applies ([documented below](#filter)). +* `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. +* `prefix` - (Optional, Conflicts with `filter`) Object keyname prefix identifying one or more objects to which the rule applies. Must be less than or equal to 1024 characters in length. +* `priority` - (Optional) The priority associated with the rule. Priority should only be set if `filter` is configured. If not provided, defaults to `0`. Priority must be unique between multiple rules. +* `source_selection_criteria` - (Optional) Specifies special object selection criteria ([documented below](#source-selection-criteria)). +* `status` - (Required) The status of the rule. Either `Enabled` or `Disabled`. The rule is ignored if status is not Enabled. + +#### Filter + +The `filter` configuration block supports the following arguments: + +* `prefix` - (Optional) Object keyname prefix that identifies subset of objects to which the rule applies. Must be less than or equal to 1024 characters in length. +* `tags` - (Optional) A map of tags that identifies subset of objects to which the rule applies. + The rule applies only to objects having all the tags in its tagset. + +#### Destination + +~> **NOTE:** Replication to multiple destination buckets requires that `priority` is specified in the `rules` object. If the corresponding rule requires no filter, an empty configuration block `filter {}` must be specified. + +The `destination` configuration block supports the following arguments: + +* `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule. +* `storage_class` - (Optional) The [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Destination.html#AmazonS3-Type-Destination-StorageClass) used to store the object. By default, Amazon S3 uses the storage class of the source object to create the object replica. +* `replica_kms_key_id` - (Optional) Destination KMS encryption key ARN for SSE-KMS replication. Must be used in conjunction with + `sse_kms_encrypted_objects` source selection criteria. +* `access_control_translation` - (Optional) Specifies the overrides to use for object owners on replication. Must be used in conjunction with `account_id` owner override configuration. +* `account_id` - (Optional) The Account ID to use for overriding the object owner on replication. Must be used in conjunction with `access_control_translation` override configuration. +* `replication_time` - (Optional) Enables S3 Replication Time Control (S3 RTC) ([documented below](#replication-time)). +* `metrics` - (Optional) Enables replication metrics (required for S3 RTC) ([documented below](#metrics)). + +#### Replication Time + +The `replication_time` configuration block supports the following arguments: + +* `status` - (Optional) The status of RTC. Either `Enabled` or `Disabled`. +* `minutes` - (Optional) Threshold within which objects are to be replicated. The only valid value is `15`. + +#### Metrics + +The `metrics` configuration block supports the following arguments: + +* `status` - (Optional) The status of replication metrics. Either `Enabled` or `Disabled`. +* `minutes` - (Optional) Threshold within which objects are to be replicated. The only valid value is `15`. + +#### Source Selection Criteria + +The `source_selection_criteria` configuration block supports the following argument: + +* `sse_kms_encrypted_objects` - (Optional) Match SSE-KMS encrypted objects ([documented below](#sse-kms-encrypted-objects)). If specified, `replica_kms_key_id` + in `destination` must be specified as well. + +#### SSE KMS Encrypted Objects + +The `sse_kms_encrypted_objects` configuration block supports the following argument: + +* `enabled` - (Required) Boolean which indicates if this criteria is enabled. + ### Server Side Encryption Configuration ~> **NOTE:** Currently, changes to the `server_side_encryption_configuration` configuration of _existing_ resources cannot be automatically detected by Terraform. To manage changes in encryption of an S3 bucket, use the `aws_s3_bucket_server_side_encryption_configuration` resource instead. If you use `server_side_encryption_configuration` on an `aws_s3_bucket`, Terraform will assume management over the encryption configuration for the S3 bucket, treating additional encryption changes as drift. For this reason, `server_side_encryption_configuration` cannot be mixed with the external `aws_s3_bucket_server_side_encryption_configuration` resource for a given S3 bucket. @@ -465,33 +670,6 @@ In addition to all arguments above, the following attributes are exported: * `years` - The number of years specified for the default retention period. * `policy` - The [bucket policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html) JSON document. * `region` - The AWS region this bucket resides in. -* `replication_configuration` - The [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html). - * `role` - The ARN of the IAM role for Amazon S3 assumed when replicating the objects. - * `rules` - The rules managing the replication. - * `delete_marker_replication_status` - Whether delete markers are replicated. - * `destination` - The destination for the rule. - * `access_control_translation` - The overrides to use for object owners on replication. - * `owner` - The override value for the owner on replicated objects. - * `account_id` - The Account ID to use for overriding the object owner on replication. - * `bucket` - The ARN of the S3 bucket where Amazon S3 stores replicas of the object identified by the rule. - * `metrics` - Replication metrics. - * `status` - The status of replication metrics. - * `minutes` - Threshold within which objects are replicated. - * `storage_class` - The [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Destination.html#AmazonS3-Type-Destination-StorageClass) used to store the object. - * `replica_kms_key_id` - Destination KMS encryption key ARN for SSE-KMS replication. - * `replication_time` - S3 Replication Time Control (S3 RTC). - * `status` - The status of RTC. - * `minutes` - Threshold within which objects are to be replicated. - * `filter` - Filter that identifies subset of objects to which the replication rule applies. - * `prefix` - Object keyname prefix that identifies subset of objects to which the rule applies. - * `tags` - Map of tags that identifies subset of objects to which the rule applies. - * `id` - Unique identifier for the rule. - * `prefix` - Object keyname prefix identifying one or more objects to which the rule applies - * `priority` - The priority associated with the rule. - * `source_selection_criteria` - The special object selection criteria. - * `sse_kms_encrypted_objects` - Matched SSE-KMS encrypted objects. - * `enabled` - Whether this criteria is enabled. - * `status` - The status of the rule. * `request_payer` - Either `BucketOwner` or `Requester` that pays for the download and request fees. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). * `website_endpoint` - The website endpoint, if the bucket is configured with a website. If not, this will be an empty string. From fa49a1bf39c74db57119358c047ab7fdaf415f78 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 31 Mar 2022 21:20:09 -0400 Subject: [PATCH 27/42] Update CHANGELOG for #23842 --- .changelog/23842.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/23842.txt diff --git a/.changelog/23842.txt b/.changelog/23842.txt new file mode 100644 index 00000000000..b757bbb820f --- /dev/null +++ b/.changelog/23842.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_s3_bucket: Update `replication_configuration` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_replication_configuration` resource. +``` \ No newline at end of file From 540212e411ca88200fafd875d50fc071cbad59f5 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 1 Apr 2022 03:10:16 +0000 Subject: [PATCH 28/42] Update CHANGELOG.md for #23842 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c046360feb1..f8d01cf1078 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ ENHANCEMENTS: * resource/aws_s3_bucket: Update `cors_rule` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_cors_configuration` resource. ([#23817](https://github.com/hashicorp/terraform-provider-aws/issues/23817)) * resource/aws_s3_bucket: Update `lifecycle_rule` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_lifecycle_configuration` resource. ([#23818](https://github.com/hashicorp/terraform-provider-aws/issues/23818)) * resource/aws_s3_bucket: Update `logging` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_logging` resource. ([#23819](https://github.com/hashicorp/terraform-provider-aws/issues/23819)) +* resource/aws_s3_bucket: Update `replication_configuration` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_replication_configuration` resource. ([#23842](https://github.com/hashicorp/terraform-provider-aws/issues/23842)) * resource/aws_s3_bucket: Update `server_side_encryption_configuration` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_server_side_encryption_configuration` resource. ([#23822](https://github.com/hashicorp/terraform-provider-aws/issues/23822)) * resource/aws_s3_bucket: Update `versioning` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_versioning` resource. ([#23820](https://github.com/hashicorp/terraform-provider-aws/issues/23820)) * resource/aws_s3_bucket: Update `website` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_website_configuration` resource. ([#23821](https://github.com/hashicorp/terraform-provider-aws/issues/23821)) From c540b6d0e5d2c59e71f20f131af9534453a583e7 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 31 Mar 2022 23:27:35 -0400 Subject: [PATCH 29/42] r/s3_bucket: make 'policy' configurable --- internal/service/s3/bucket.go | 59 ++++++++- internal/service/s3/bucket_policy_test.go | 114 +++++++++++++++++ internal/service/s3/bucket_test.go | 143 ++++++++++++++++++++++ internal/service/s3/errors.go | 1 + website/docs/r/s3_bucket.html.markdown | 8 +- 5 files changed, 321 insertions(+), 4 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 155e81ef959..6e5c251ee82 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -122,9 +122,12 @@ func ResourceBucket() *schema.Resource { }, "policy": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_policy resource instead", + Type: schema.TypeString, + Optional: true, + Computed: true, + Deprecated: "Use the aws_s3_bucket_policy resource instead", + ValidateFunc: validation.StringIsJSON, + DiffSuppressFunc: verify.SuppressEquivalentPolicyDiffs, }, "cors_rule": { @@ -782,6 +785,12 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { // Note: Order of argument updates below is important + if d.HasChange("policy") { + if err := resourceBucketInternalPolicyUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Policy: %w", d.Id(), err) + } + } + if d.HasChange("cors_rule") { if err := resourceBucketInternalCorsUpdate(conn, d); err != nil { return fmt.Errorf("error updating S3 Bucket (%s) CORS Rules: %w", d.Id(), err) @@ -1871,6 +1880,50 @@ func resourceBucketInternalObjectLockConfigurationUpdate(conn *s3.S3, d *schema. return err } +func resourceBucketInternalPolicyUpdate(conn *s3.S3, d *schema.ResourceData) error { + policy, err := structure.NormalizeJsonString(d.Get("policy").(string)) + + if err != nil { + return fmt.Errorf("policy (%s) is an invalid JSON: %w", policy, err) + } + + if policy == "" { + _, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.DeleteBucketPolicy(&s3.DeleteBucketPolicyInput{ + Bucket: aws.String(d.Id()), + }) + }) + + if err != nil { + return fmt.Errorf("error deleting S3 Bucket (%s) policy: %w", d.Id(), err) + } + + return nil + } + + params := &s3.PutBucketPolicyInput{ + Bucket: aws.String(d.Id()), + Policy: aws.String(policy), + } + + err = resource.Retry(1*time.Minute, func() *resource.RetryError { + _, err := conn.PutBucketPolicy(params) + if tfawserr.ErrCodeEquals(err, ErrCodeMalformedPolicy, s3.ErrCodeNoSuchBucket) { + return resource.RetryableError(err) + } + if err != nil { + return resource.NonRetryableError(err) + } + return nil + }) + + if tfresource.TimedOut(err) { + _, err = conn.PutBucketPolicy(params) + } + + return err +} + func resourceBucketInternalReplicationConfigurationUpdate(conn *s3.S3, d *schema.ResourceData) error { replicationConfiguration := d.Get("replication_configuration").([]interface{}) diff --git a/internal/service/s3/bucket_policy_test.go b/internal/service/s3/bucket_policy_test.go index f31c0f9e474..60f05ace8d7 100644 --- a/internal/service/s3/bucket_policy_test.go +++ b/internal/service/s3/bucket_policy_test.go @@ -2,6 +2,7 @@ package s3_test import ( "fmt" + "strconv" "testing" "github.com/aws/aws-sdk-go/aws" @@ -249,6 +250,66 @@ func TestAccS3BucketPolicy_IAMRoleOrder_jsonEncode(t *testing.T) { }) } +func TestAccS3BucketPolicy_migrate_noChange(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_policy.test" + bucketResourceName := "aws_s3_bucket.test" + partition := acctest.Partition() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withPolicy(rName, partition), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + testAccCheckBucketPolicy(bucketResourceName, testAccBucketPolicy(rName, partition)), + ), + }, + { + Config: testAccBucketPolicy_Migrate_NoChangeConfig(rName, partition), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + testAccCheckBucketPolicy(resourceName, testAccBucketPolicy(rName, partition)), + ), + }, + }, + }) +} + +func TestAccS3BucketPolicy_migrate_withChange(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_policy.test" + bucketResourceName := "aws_s3_bucket.test" + partition := acctest.Partition() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withPolicy(rName, partition), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + testAccCheckBucketPolicy(bucketResourceName, testAccBucketPolicy(rName, partition)), + ), + }, + { + Config: testAccBucketPolicy_Migrate_WithChangeConfig(rName, partition), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(resourceName), + testAccCheckBucketPolicy(resourceName, testAccBucketPolicyUpdated(rName, partition)), + ), + }, + }, + }) +} + func testAccCheckBucketHasPolicy(n string, expectedPolicyText string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -640,3 +701,56 @@ resource "aws_s3_bucket_policy" "bucket" { } `) } + +func testAccBucketPolicy_Migrate_NoChangeConfig(bucketName, partition string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_bucket_acl" "test" { + bucket = aws_s3_bucket.test.id + acl = "private" +} + +resource "aws_s3_bucket_policy" "test" { + bucket = aws_s3_bucket.test.id + policy = %[2]s +} +`, bucketName, strconv.Quote(testAccBucketPolicy(bucketName, partition))) +} + +func testAccBucketPolicy_Migrate_WithChangeConfig(bucketName, partition string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_bucket_acl" "test" { + bucket = aws_s3_bucket.test.id + acl = "private" +} + +resource "aws_s3_bucket_policy" "test" { + bucket = aws_s3_bucket.test.id + policy = %[2]s +} +`, bucketName, strconv.Quote(testAccBucketPolicyUpdated(bucketName, partition))) +} + +func testAccBucketPolicyUpdated(bucketName, partition string) string { + return fmt.Sprintf(`{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Principal": { + "AWS": "*" + }, + "Action": "s3:PutObject", + "Resource": "arn:%[1]s:s3:::%[2]s/*" + } + ] +}`, partition, bucketName) +} diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index 30b2ed78904..ece924edfa3 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -1,15 +1,18 @@ package s3_test import ( + "encoding/json" "fmt" "log" "reflect" "regexp" + "strconv" "strings" "testing" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/cloudformation" "github.com/aws/aws-sdk-go/service/cloudfront" @@ -2101,6 +2104,62 @@ func TestAccS3Bucket_Security_disableDefaultEncryptionWhenDefaultEncryptionIsEna }) } +func TestAccS3Bucket_Security_policy(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + partition := acctest.Partition() + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withPolicy(bucketName, partition), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + testAccCheckBucketPolicy(resourceName, testAccBucketPolicy(bucketName, partition)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "acl", + "force_destroy", + "grant", + // NOTE: Prior to Terraform AWS Provider 3.0, this attribute did not import correctly either. + // The Read function does not require GetBucketPolicy, if the argument is not configured. + // Rather than introduce that breaking change as well with 3.0, instead we leave the + // current Read behavior and note this will be deprecated in a later 3.x release along + // with other inline policy attributes across the provider. + "policy", + }, + }, + { + // As Policy is a Computed field, removing it from terraform will not + // trigger an update to remove it from the S3 bucket. + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + testAccCheckBucketPolicy(resourceName, testAccBucketPolicy(bucketName, partition)), + ), + }, + { + // As Policy is a Computed field, setting it to the empty String will not + // trigger an update to remove it from the S3 bucket. + Config: testAccBucketConfig_withEmptyPolicy(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + testAccCheckBucketPolicy(resourceName, testAccBucketPolicy(bucketName, partition)), + ), + }, + }, + }) +} + func TestAccS3Bucket_Web_simple(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) region := acctest.Region() @@ -2719,6 +2778,53 @@ func testAccCheckS3BucketDomainName(resourceName string, attributeName string, b } } +func testAccCheckBucketPolicy(n string, policy string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs := s.RootModule().Resources[n] + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn + + out, err := conn.GetBucketPolicy(&s3.GetBucketPolicyInput{ + Bucket: aws.String(rs.Primary.ID), + }) + + if policy == "" { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoSuchBucketPolicy" { + // expected + return nil + } + if err == nil { + return fmt.Errorf("Expected no policy, got: %#v", *out.Policy) + } else { + return fmt.Errorf("GetBucketPolicy error: %v, expected %s", err, policy) + } + } + if err != nil { + return fmt.Errorf("GetBucketPolicy error: %v, expected %s", err, policy) + } + + if v := out.Policy; v == nil { + if policy != "" { + return fmt.Errorf("bad policy, found nil, expected: %s", policy) + } + } else { + expected := make(map[string]interface{}) + if err := json.Unmarshal([]byte(policy), &expected); err != nil { + return err + } + actual := make(map[string]interface{}) + if err := json.Unmarshal([]byte(*v), &actual); err != nil { + return err + } + + if !reflect.DeepEqual(expected, actual) { + return fmt.Errorf("bad policy, expected: %#v, got %#v", expected, actual) + } + } + + return nil + } +} + func testAccBucketRegionalDomainName(bucket, region string) string { regionalEndpoint, err := tfs3.BucketRegionalDomainName(bucket, region) if err != nil { @@ -2764,6 +2870,23 @@ func testAccCheckBucketCheckTags(n string, expectedTags map[string]string) resou } } +func testAccBucketPolicy(bucketName, partition string) string { + return fmt.Sprintf(`{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Principal": { + "AWS": "*" + }, + "Action": "s3:GetObject", + "Resource": "arn:%[1]s:s3:::%[2]s/*" + } + ] +}`, partition, bucketName) +} + func testAccBucketConfig_Basic(bucketName string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "test" { @@ -3110,6 +3233,26 @@ resource "aws_s3_bucket" "test" { `, bucketName) } +func testAccBucketConfig_withEmptyPolicy(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "private" + policy = "" +} +`, bucketName) +} + +func testAccBucketConfig_withPolicy(bucketName, partition string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "private" + policy = %[2]s +} +`, bucketName, strconv.Quote(testAccBucketPolicy(bucketName, partition))) +} + func testAccBucketConfig_ReplicationBase(bucketName string) string { return acctest.ConfigCompose( acctest.ConfigMultipleRegionProvider(2), diff --git a/internal/service/s3/errors.go b/internal/service/s3/errors.go index 7669a63320c..40d27fd05bd 100644 --- a/internal/service/s3/errors.go +++ b/internal/service/s3/errors.go @@ -6,6 +6,7 @@ package s3 const ( ErrCodeInvalidBucketState = "InvalidBucketState" ErrCodeInvalidRequest = "InvalidRequest" + ErrCodeMalformedPolicy = "MalformedPolicy" ErrCodeMethodNotAllowed = "MethodNotAllowed" ErrCodeNoSuchBucketPolicy = "NoSuchBucketPolicy" ErrCodeNoSuchConfiguration = "NoSuchConfiguration" diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index dad86620d51..1e9afcec8ff 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -36,6 +36,10 @@ Configuring with both will cause inconsistencies and may overwrite configuration or with the deprecated parameter `logging` in the resource `aws_s3_bucket`. Configuring with both will cause inconsistencies and may overwrite configuration. +~> **NOTE on S3 Bucket Policy Configuration:** S3 Bucket Policy can be configured in either the standalone resource [`aws_s3_bucket_policy`](s3_bucket_policy.html) +or with the deprecated parameter `policy` in the resource `aws_s3_bucket`. +Configuring with both will cause inconsistencies and may overwrite configuration. + ~> **NOTE on S3 Bucket Replication Configuration:** S3 Bucket Replication can be configured in either the standalone resource [`aws_s3_bucket_replicaton_configuration`](s3_bucket_replication_configuration.html) or with the deprecated parameter `replication_configuration` in the resource `aws_s3_bucket`. Configuring with both will cause inconsistencies and may overwrite configuration. @@ -438,6 +442,9 @@ The following arguments are supported: Use the resource [`aws_s3_bucket_logging`](s3_bucket_logging.html.markdown) instead. * `object_lock_enabled` - (Optional, Default:`false`, Forces new resource) Indicates whether this bucket has an Object Lock configuration enabled. * `object_lock_configuration` - (Optional) A configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). See [Object Lock Configuration](#object-lock-configuration) below. +* `policy` - (Optional, **Deprecated**) A valid [bucket policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html) JSON document. Note that if the policy document is not specific enough (but still valid), Terraform may view the policy as constantly changing in a `terraform plan`. In this case, please make sure you use the verbose/specific version of the policy. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). + Terraform will only perform drift detection if a configuration value is provided. + Use the resource [`aws_s3_bucket_policy`](s3_bucket_policy.html) instead. * `replication_configuration` - (Optional, **Deprecated**) A configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html). See [Replication Configuration](#replication-configuration) below for details. Terraform will only perform drift detection if a configuration value is provided. Use the resource [`aws_s3_bucket_replication_configuration`](s3_bucket_replication_configuration.html) instead. * `server_side_encryption_configuration` - (Optional, **Deprecated**) A configuration of [server-side encryption configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). See [Server Side Encryption Configuration](#server-side-encryption-configuration) below for details. @@ -668,7 +675,6 @@ In addition to all arguments above, the following attributes are exported: * `mode` - The default Object Lock retention mode applied to new objects placed in this bucket. * `days` - The number of days specified for the default retention period. * `years` - The number of years specified for the default retention period. -* `policy` - The [bucket policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html) JSON document. * `region` - The AWS region this bucket resides in. * `request_payer` - Either `BucketOwner` or `Requester` that pays for the download and request fees. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). From efdca46c553f400e3589a8b587edfd2723410347 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Thu, 31 Mar 2022 23:27:40 -0400 Subject: [PATCH 30/42] Update CHANGELOG for #23843 --- .changelog/23843.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/23843.txt diff --git a/.changelog/23843.txt b/.changelog/23843.txt new file mode 100644 index 00000000000..fbcf4506918 --- /dev/null +++ b/.changelog/23843.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_s3_bucket: Update `policy` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_policy` resource. +``` \ No newline at end of file From 644cd2996efd297862cf95dcc7dd59d42d3876fd Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 1 Apr 2022 04:06:32 +0000 Subject: [PATCH 31/42] Update CHANGELOG.md for #23843 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f8d01cf1078..3806f7b48c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ ENHANCEMENTS: * resource/aws_s3_bucket: Update `cors_rule` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_cors_configuration` resource. ([#23817](https://github.com/hashicorp/terraform-provider-aws/issues/23817)) * resource/aws_s3_bucket: Update `lifecycle_rule` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_lifecycle_configuration` resource. ([#23818](https://github.com/hashicorp/terraform-provider-aws/issues/23818)) * resource/aws_s3_bucket: Update `logging` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_logging` resource. ([#23819](https://github.com/hashicorp/terraform-provider-aws/issues/23819)) +* resource/aws_s3_bucket: Update `policy` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_policy` resource. ([#23843](https://github.com/hashicorp/terraform-provider-aws/issues/23843)) * resource/aws_s3_bucket: Update `replication_configuration` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_replication_configuration` resource. ([#23842](https://github.com/hashicorp/terraform-provider-aws/issues/23842)) * resource/aws_s3_bucket: Update `server_side_encryption_configuration` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_server_side_encryption_configuration` resource. ([#23822](https://github.com/hashicorp/terraform-provider-aws/issues/23822)) * resource/aws_s3_bucket: Update `versioning` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_versioning` resource. ([#23820](https://github.com/hashicorp/terraform-provider-aws/issues/23820)) From fe0f04f805b17e82a349c3fa14a25fcc5a2a61b7 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Fri, 1 Apr 2022 00:16:11 -0400 Subject: [PATCH 32/42] r/s3_bucket: make 'request_payer' configurable --- internal/service/s3/bucket.go | 31 +++++++++- ...cket_request_payment_configuration_test.go | 58 +++++++++++++++++++ internal/service/s3/bucket_test.go | 43 ++++++++++++++ website/docs/r/s3_bucket.html.markdown | 10 +++- 4 files changed, 138 insertions(+), 4 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 6e5c251ee82..6a7d8eafd33 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -399,9 +399,11 @@ func ResourceBucket() *schema.Resource { }, "request_payer": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_request_payment_configuration resource instead", + Type: schema.TypeString, + Optional: true, + Computed: true, + Deprecated: "Use the aws_s3_bucket_request_payment_configuration resource instead", + ValidateFunc: validation.StringInSlice(s3.Payer_Values(), false), }, "replication_configuration": { @@ -850,6 +852,12 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } } + if d.HasChange("request_payer") { + if err := resourceBucketInternalRequestPayerUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Request Payer: %w", d.Id(), err) + } + } + if d.HasChange("replication_configuration") { if err := resourceBucketInternalReplicationConfigurationUpdate(conn, d); err != nil { return fmt.Errorf("error updating S3 Bucket (%s) Replication configuration: %w", d.Id(), err) @@ -1978,6 +1986,23 @@ func resourceBucketInternalReplicationConfigurationUpdate(conn *s3.S3, d *schema return err } +func resourceBucketInternalRequestPayerUpdate(conn *s3.S3, d *schema.ResourceData) error { + payer := d.Get("request_payer").(string) + + input := &s3.PutBucketRequestPaymentInput{ + Bucket: aws.String(d.Id()), + RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{ + Payer: aws.String(payer), + }, + } + + _, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.PutBucketRequestPayment(input) + }) + + return err +} + func resourceBucketInternalServerSideEncryptionConfigurationUpdate(conn *s3.S3, d *schema.ResourceData) error { serverSideEncryptionConfiguration := d.Get("server_side_encryption_configuration").([]interface{}) diff --git a/internal/service/s3/bucket_request_payment_configuration_test.go b/internal/service/s3/bucket_request_payment_configuration_test.go index ee96edb9322..ef4c2472696 100644 --- a/internal/service/s3/bucket_request_payment_configuration_test.go +++ b/internal/service/s3/bucket_request_payment_configuration_test.go @@ -106,6 +106,64 @@ func TestAccS3BucketRequestPaymentConfiguration_update(t *testing.T) { }) } +func TestAccS3BucketRequestPaymentConfiguration_migrate_noChange(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucketResourceName := "aws_s3_bucket.test" + resourceName := "aws_s3_bucket_request_payment_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketRequestPaymentConfigurationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withRequestPayer(rName, s3.PayerRequester), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "request_payer", s3.PayerRequester), + ), + }, + { + Config: testAccBucketRequestPaymentConfigurationBasicConfig(rName, s3.PayerRequester), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketRequestPaymentConfigurationExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "payer", s3.PayerRequester), + ), + }, + }, + }) +} + +func TestAccS3BucketRequestPaymentConfiguration_migrate_withChange(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + bucketResourceName := "aws_s3_bucket.test" + resourceName := "aws_s3_bucket_request_payment_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketRequestPaymentConfigurationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withRequestPayer(rName, s3.PayerRequester), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "request_payer", s3.PayerRequester), + ), + }, + { + Config: testAccBucketRequestPaymentConfigurationBasicConfig(rName, s3.PayerBucketOwner), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketRequestPaymentConfigurationExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "payer", s3.PayerBucketOwner), + ), + }, + }, + }) +} + func testAccCheckBucketRequestPaymentConfigurationDestroy(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index ece924edfa3..540eee11436 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -298,6 +298,40 @@ func TestAccS3Bucket_Basic_keyEnabled(t *testing.T) { }) } +func TestAccS3Bucket_Basic_requestPayer(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withRequestPayer(bucketName, s3.PayerBucketOwner), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "request_payer", s3.PayerBucketOwner), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + { + Config: testAccBucketConfig_withRequestPayer(bucketName, s3.PayerRequester), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "request_payer", s3.PayerRequester), + ), + }, + }, + }) +} + // Test TestAccS3Bucket_disappears is designed to fail with a "plan // not empty" error in Terraform, to check against regressions. // See https://github.com/hashicorp/terraform/pull/2925 @@ -4170,6 +4204,15 @@ resource "aws_s3_bucket" "source" { `, bucketName)) } +func testAccBucketConfig_withRequestPayer(bucketName, requestPayer string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + request_payer = %[2]q +} +`, bucketName, requestPayer) +} + func testAccBucketConfig_withVersioning(bucketName string, enabled bool) string { return fmt.Sprintf(` resource "aws_s3_bucket" "test" { diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 1e9afcec8ff..0ce46c35bff 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -44,6 +44,10 @@ Configuring with both will cause inconsistencies and may overwrite configuration or with the deprecated parameter `replication_configuration` in the resource `aws_s3_bucket`. Configuring with both will cause inconsistencies and may overwrite configuration. +~> **NOTE on S3 Bucket Request Payment Configuration:** S3 Bucket Request Payment can be configured in either the standalone resource [`aws_s3_bucket_request_payment_configuration`](s3_bucket_request_payment_configuration.html) +or with the deprecated parameter `request_payer` in the resource `aws_s3_bucket`. +Configuring with both will cause inconsistencies and may overwrite configuration. + ~> **NOTE on S3 Bucket Server Side Encryption Configuration:** S3 Bucket Server Side Encryption can be configured in either the standalone resource [`aws_s3_bucket_server_side_encryption_configuration`](s3_bucket_server_side_encryption_configuration.html) or with the deprecated parameter `server_side_encryption_configuration` in the resource `aws_s3_bucket`. Configuring with both will cause inconsistencies and may overwrite configuration. @@ -447,6 +451,11 @@ The following arguments are supported: Use the resource [`aws_s3_bucket_policy`](s3_bucket_policy.html) instead. * `replication_configuration` - (Optional, **Deprecated**) A configuration of [replication configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html). See [Replication Configuration](#replication-configuration) below for details. Terraform will only perform drift detection if a configuration value is provided. Use the resource [`aws_s3_bucket_replication_configuration`](s3_bucket_replication_configuration.html) instead. +* `request_payer` - (Optional, **Deprecated**) Specifies who should bear the cost of Amazon S3 data transfer. + Can be either `BucketOwner` or `Requester`. By default, the owner of the S3 bucket would incur the costs of any data transfer. + See [Requester Pays Buckets](http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) developer guide for more information. + Terraform will only perform drift detection if a configuration value is provided. + Use the resource [`aws_s3_bucket_request_payment_configuration`](s3_bucket_request_payment_configuration.html) instead. * `server_side_encryption_configuration` - (Optional, **Deprecated**) A configuration of [server-side encryption configuration](http://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). See [Server Side Encryption Configuration](#server-side-encryption-configuration) below for details. Terraform will only perform drift detection if a configuration value is provided. Use the resource [`aws_s3_bucket_server_side_encryption_configuration`](s3_bucket_server_side_encryption_configuration.html) instead. @@ -676,7 +685,6 @@ In addition to all arguments above, the following attributes are exported: * `days` - The number of days specified for the default retention period. * `years` - The number of years specified for the default retention period. * `region` - The AWS region this bucket resides in. -* `request_payer` - Either `BucketOwner` or `Requester` that pays for the download and request fees. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). * `website_endpoint` - The website endpoint, if the bucket is configured with a website. If not, this will be an empty string. * `website_domain` - The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records. From 6c78858262efc22c317b628fa2254b59a2c5dbb4 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Fri, 1 Apr 2022 00:16:17 -0400 Subject: [PATCH 33/42] Update CHANGELOG for #23844 --- .changelog/23844.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/23844.txt diff --git a/.changelog/23844.txt b/.changelog/23844.txt new file mode 100644 index 00000000000..b4c96c31a8e --- /dev/null +++ b/.changelog/23844.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_s3_bucket: Update `request_payer` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_request_payment_configuration` resource. +``` \ No newline at end of file From 8057f87ade2a13babb411af368c30bdc332fa548 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 1 Apr 2022 04:34:14 +0000 Subject: [PATCH 34/42] Update CHANGELOG.md for #23844 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3806f7b48c2..761d7d43635 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ ENHANCEMENTS: * resource/aws_s3_bucket: Update `logging` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_logging` resource. ([#23819](https://github.com/hashicorp/terraform-provider-aws/issues/23819)) * resource/aws_s3_bucket: Update `policy` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_policy` resource. ([#23843](https://github.com/hashicorp/terraform-provider-aws/issues/23843)) * resource/aws_s3_bucket: Update `replication_configuration` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_replication_configuration` resource. ([#23842](https://github.com/hashicorp/terraform-provider-aws/issues/23842)) +* resource/aws_s3_bucket: Update `request_payer` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_request_payment_configuration` resource. ([#23844](https://github.com/hashicorp/terraform-provider-aws/issues/23844)) * resource/aws_s3_bucket: Update `server_side_encryption_configuration` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_server_side_encryption_configuration` resource. ([#23822](https://github.com/hashicorp/terraform-provider-aws/issues/23822)) * resource/aws_s3_bucket: Update `versioning` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_versioning` resource. ([#23820](https://github.com/hashicorp/terraform-provider-aws/issues/23820)) * resource/aws_s3_bucket: Update `website` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_website_configuration` resource. ([#23821](https://github.com/hashicorp/terraform-provider-aws/issues/23821)) From a912ad578f46025dd6b5e20cbd107682a34f21d5 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Fri, 1 Apr 2022 01:44:14 -0400 Subject: [PATCH 35/42] r/s3_bucket: make 'object_lock_configuration.rule' configurable --- internal/service/s3/bucket.go | 72 +++++++++++++------ .../bucket_object_lock_configuration_test.go | 72 +++++++++++++++++++ internal/service/s3/bucket_test.go | 42 +++++++++-- website/docs/r/s3_bucket.html.markdown | 59 +++++++++++---- 4 files changed, 204 insertions(+), 41 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 6a7d8eafd33..1859f37df15 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -628,52 +628,56 @@ func ResourceBucket() *schema.Resource { Optional: true, Computed: true, // Can be removed when object_lock_configuration.0.object_lock_enabled is removed ForceNew: true, - ConflictsWith: []string{"object_lock_configuration.0.object_lock_enabled"}, + ConflictsWith: []string{"object_lock_configuration"}, }, "object_lock_configuration": { - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Deprecated: "Use the top-level parameter object_lock_enabled and the aws_s3_bucket_object_lock_configuration resource instead", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "object_lock_enabled": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(s3.ObjectLockEnabled_Values(), false), - Deprecated: "Use the top-level parameter object_lock_enabled instead", + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"object_lock_enabled"}, + ValidateFunc: validation.StringInSlice(s3.ObjectLockEnabled_Values(), false), + Deprecated: "Use the top-level parameter object_lock_enabled instead", }, "rule": { Type: schema.TypeList, - Computed: true, + Optional: true, Deprecated: "Use the aws_s3_bucket_object_lock_configuration resource instead", + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "default_retention": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_object_lock_configuration resource instead", + Type: schema.TypeList, + Required: true, + MinItems: 1, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "mode": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_object_lock_configuration resource instead", + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.ObjectLockRetentionMode_Values(), false), }, "days": { - Type: schema.TypeInt, - Computed: true, - Deprecated: "Use the aws_s3_bucket_object_lock_configuration resource instead", + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(1), }, "years": { - Type: schema.TypeInt, - Computed: true, - Deprecated: "Use the aws_s3_bucket_object_lock_configuration resource instead", + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(1), }, }, }, @@ -2440,6 +2444,28 @@ func expandS3ObjectLockConfiguration(vConf []interface{}) *s3.ObjectLockConfigur conf.ObjectLockEnabled = aws.String(vObjectLockEnabled) } + if vRule, ok := mConf["rule"].([]interface{}); ok && len(vRule) > 0 { + mRule := vRule[0].(map[string]interface{}) + + if vDefaultRetention, ok := mRule["default_retention"].([]interface{}); ok && len(vDefaultRetention) > 0 && vDefaultRetention[0] != nil { + mDefaultRetention := vDefaultRetention[0].(map[string]interface{}) + + conf.Rule = &s3.ObjectLockRule{ + DefaultRetention: &s3.DefaultRetention{}, + } + + if vMode, ok := mDefaultRetention["mode"].(string); ok && vMode != "" { + conf.Rule.DefaultRetention.Mode = aws.String(vMode) + } + if vDays, ok := mDefaultRetention["days"].(int); ok && vDays > 0 { + conf.Rule.DefaultRetention.Days = aws.Int64(int64(vDays)) + } + if vYears, ok := mDefaultRetention["years"].(int); ok && vYears > 0 { + conf.Rule.DefaultRetention.Years = aws.Int64(int64(vYears)) + } + } + } + return conf } diff --git a/internal/service/s3/bucket_object_lock_configuration_test.go b/internal/service/s3/bucket_object_lock_configuration_test.go index 4b200ca129e..71390ea6131 100644 --- a/internal/service/s3/bucket_object_lock_configuration_test.go +++ b/internal/service/s3/bucket_object_lock_configuration_test.go @@ -102,6 +102,78 @@ func TestAccS3BucketObjectLockConfiguration_update(t *testing.T) { }) } +func TestAccS3BucketObjectLockConfiguration_migrate_noChange(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_object_lock_configuration.test" + bucketResourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketObjectLockConfigurationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_ObjectLockEnabledWithDefaultRetention(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "object_lock_configuration.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "object_lock_configuration.0.object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(bucketResourceName, "object_lock_configuration.0.rule.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "object_lock_configuration.0.rule.0.default_retention.0.mode", s3.ObjectLockRetentionModeCompliance), + resource.TestCheckResourceAttr(bucketResourceName, "object_lock_configuration.0.rule.0.default_retention.0.days", "3"), + ), + }, + { + Config: testAccBucketObjectLockConfigurationBasicConfig(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketObjectLockConfigurationExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.0.days", "3"), + resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.0.mode", s3.ObjectLockRetentionModeCompliance), + ), + }, + }, + }) +} + +func TestAccS3BucketObjectLockConfiguration_migrate_withChange(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_object_lock_configuration.test" + bucketResourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketObjectLockConfigurationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_ObjectLockEnabledNoDefaultRetention(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "object_lock_configuration.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "object_lock_configuration.0.object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(bucketResourceName, "object_lock_configuration.0.rule.#", "0"), + ), + }, + { + Config: testAccBucketObjectLockConfigurationBasicConfig(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketObjectLockConfigurationExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.0.days", "3"), + resource.TestCheckResourceAttr(resourceName, "rule.0.default_retention.0.mode", s3.ObjectLockRetentionModeCompliance), + ), + }, + }, + }) +} + func testAccCheckBucketObjectLockConfigurationDestroy(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index 540eee11436..7698166fff6 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -785,7 +785,7 @@ func TestAccS3Bucket_Manage_objectLock(t *testing.T) { CheckDestroy: testAccCheckBucketDestroy, Steps: []resource.TestStep{ { - Config: testAccObjectLockEnabledNoDefaultRetention(bucketName), + Config: testAccBucketConfig_ObjectLockEnabledNoDefaultRetention(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(resourceName), resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", "true"), @@ -800,6 +800,17 @@ func TestAccS3Bucket_Manage_objectLock(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"force_destroy"}, }, + { + Config: testAccBucketConfig_ObjectLockEnabledWithDefaultRetention(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.object_lock_enabled", "Enabled"), + resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.rule.0.default_retention.0.mode", "COMPLIANCE"), + resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.rule.0.default_retention.0.days", "3"), + ), + }, }, }) } @@ -815,7 +826,7 @@ func TestAccS3Bucket_Manage_objectLock_deprecatedEnabled(t *testing.T) { CheckDestroy: testAccCheckBucketDestroy, Steps: []resource.TestStep{ { - Config: testAccObjectLockEnabledNoDefaultRetention_deprecatedEnabled(bucketName), + Config: testAccBucketConfig_ObjectLockEnabledNoDefaultRetention_deprecatedEnabled(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(resourceName), resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", "true"), @@ -845,7 +856,7 @@ func TestAccS3Bucket_Manage_objectLock_migrate(t *testing.T) { CheckDestroy: testAccCheckBucketDestroy, Steps: []resource.TestStep{ { - Config: testAccObjectLockEnabledNoDefaultRetention_deprecatedEnabled(bucketName), + Config: testAccBucketConfig_ObjectLockEnabledNoDefaultRetention_deprecatedEnabled(bucketName), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(resourceName), resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", "true"), @@ -854,7 +865,7 @@ func TestAccS3Bucket_Manage_objectLock_migrate(t *testing.T) { ), }, { - Config: testAccObjectLockEnabledNoDefaultRetention(bucketName), + Config: testAccBucketConfig_ObjectLockEnabledNoDefaultRetention(bucketName), PlanOnly: true, }, }, @@ -4480,7 +4491,7 @@ resource "aws_s3_bucket_acl" "test6" { `, randInt) } -func testAccObjectLockEnabledNoDefaultRetention(bucketName string) string { +func testAccBucketConfig_ObjectLockEnabledNoDefaultRetention(bucketName string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "test" { bucket = %[1]q @@ -4490,7 +4501,26 @@ resource "aws_s3_bucket" "test" { `, bucketName) } -func testAccObjectLockEnabledNoDefaultRetention_deprecatedEnabled(bucketName string) string { +func testAccBucketConfig_ObjectLockEnabledWithDefaultRetention(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + object_lock_configuration { + object_lock_enabled = "Enabled" + + rule { + default_retention { + mode = "COMPLIANCE" + days = 3 + } + } + } +} +`, bucketName) +} + +func testAccBucketConfig_ObjectLockEnabledNoDefaultRetention_deprecatedEnabled(bucketName string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "test" { bucket = %[1]q diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 0ce46c35bff..08aa82e8891 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -36,6 +36,10 @@ Configuring with both will cause inconsistencies and may overwrite configuration or with the deprecated parameter `logging` in the resource `aws_s3_bucket`. Configuring with both will cause inconsistencies and may overwrite configuration. +~> **NOTE on S3 Bucket Object Lock Configuration:** S3 Bucket Object Lock can be configured in either the standalone resource [`aws_s3_bucket_object_lock_configuration`](s3_bucket_object_lock_configuration.html) +or with the deprecated parameter `object_lock_configuration` in the resource `aws_s3_bucket`. +Configuring with both will cause inconsistencies and may overwrite configuration. + ~> **NOTE on S3 Bucket Policy Configuration:** S3 Bucket Policy can be configured in either the standalone resource [`aws_s3_bucket_policy`](s3_bucket_policy.html) or with the deprecated parameter `policy` in the resource `aws_s3_bucket`. Configuring with both will cause inconsistencies and may overwrite configuration. @@ -245,11 +249,28 @@ resource "aws_s3_bucket" "versioning_bucket" { ### Using object lock configuration -The `object_lock_configuration.rule` argument is read-only as of version 4.0 of the Terraform AWS Provider. -To **enable** Object Lock on a **new** bucket, use the `object_lock_enabled` argument in **this** resource. See [Object Lock Configuration](#object-lock-configuration) below for details. -To configure the default retention rule of the Object Lock configuration, see the [`aws_s3_bucket_object_lock_configuration` resource](s3_bucket_object_lock_configuration.html.markdown) for configuration details. +-> **NOTE:** The parameter `object_lock_configuration` is deprecated. +To **enable** Object Lock on a **new** bucket, use the `object_lock_enabled` argument in **this** resource. +To configure the default retention rule of the Object Lock configuration use the resource [`aws_s3_bucket_object_lock_configuration` resource](s3_bucket_object_lock_configuration.html.markdown) instead. To **enable** Object Lock on an **existing** bucket, please contact AWS Support and refer to the [Object lock configuration for an existing bucket](s3_bucket_object_lock_configuration.html.markdown#object-lock-configuration-for-an-existing-bucket) example for more details. +```terraform +resource "aws_s3_bucket" "example" { + bucket = "my-tf-example-bucket" + + object_lock_configuration { + object_lock_enabled = "Enabled" + + rule { + default_retention { + mode = "COMPLIANCE" + days = 5 + } + } + } +} +``` + ### Using replication configuration -> **NOTE:** The parameter `replication_configuration` is deprecated. @@ -445,7 +466,9 @@ The following arguments are supported: * `logging` - (Optional, **Deprecated**) A configuration of [S3 bucket logging](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) parameters. See [Logging](#logging) below for details. Terraform will only perform drift detection if a configuration value is provided. Use the resource [`aws_s3_bucket_logging`](s3_bucket_logging.html.markdown) instead. * `object_lock_enabled` - (Optional, Default:`false`, Forces new resource) Indicates whether this bucket has an Object Lock configuration enabled. -* `object_lock_configuration` - (Optional) A configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). See [Object Lock Configuration](#object-lock-configuration) below. +* `object_lock_configuration` - (Optional, **Deprecated**) A configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). See [Object Lock Configuration](#object-lock-configuration) below for details. + Terraform wil only perform drift detection if a configuration value is provided. + Use the `object_lock_enabled` parameter and the resource [`aws_s3_bucket_object_lock_configuration`](s3_bucket_object_lock_configuration.html.markdown) instead. * `policy` - (Optional, **Deprecated**) A valid [bucket policy](https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html) JSON document. Note that if the policy document is not specific enough (but still valid), Terraform may view the policy as constantly changing in a `terraform plan`. In this case, please make sure you use the verbose/specific version of the policy. For more information about building AWS IAM policy documents with Terraform, see the [AWS IAM Policy Document Guide](https://learn.hashicorp.com/terraform/aws/iam-policy). Terraform will only perform drift detection if a configuration value is provided. Use the resource [`aws_s3_bucket_policy`](s3_bucket_policy.html) instead. @@ -548,11 +571,29 @@ The `logging` configuration block supports the following arguments: ~> **NOTE:** You can only **enable** S3 Object Lock for **new** buckets. If you need to **enable** S3 Object Lock for an **existing** bucket, please contact AWS Support. When you create a bucket with S3 Object Lock enabled, Amazon S3 automatically enables versioning for the bucket. Once you create a bucket with S3 Object Lock enabled, you can't disable Object Lock or suspend versioning for the bucket. -To configure the default retention rule of the Object Lock configuration, see the [`aws_s3_bucket_object_lock_configuration` resource](s3_bucket_object_lock_configuration.html.markdown) for configuration details. -The `object_lock_configuration` configuration block supports the following argument: +~> **NOTE:** Currently, changes to the `object_lock_configuration` configuration of _existing_ resources cannot be automatically detected by Terraform. To manage changes of Object Lock settings to an S3 bucket, use the `aws_s3_bucket_object_lock_configuration` resource instead. If you use `object_lock_configuration` on an `aws_s3_bucket`, Terraform will assume management over the full set of Object Lock configuration parameters for the S3 bucket, treating additional Object Lock configuration parameters as drift. For this reason, `object_lock_configuration` cannot be mixed with the external `aws_s3_bucket_object_lock_configuration` resource for a given S3 bucket. + +The `object_lock_configuration` configuration block supports the following arguments: * `object_lock_enabled` - (Optional, **Deprecated**) Indicates whether this bucket has an Object Lock configuration enabled. Valid value is `Enabled`. Use the top-level argument `object_lock_enabled` instead. +* `rule` - (Optional) The Object Lock rule in place for this bucket ([documented below](#rule)). + +#### Rule + +The `rule` configuration block supports the following argument: + +* `default_retention` - (Required) The default retention period that you want to apply to new objects placed in this bucket ([documented below](#default-retention)). + +#### Default Retention + +The `default_retention` configuration block supports the following arguments: + +~> **NOTE:** Either `days` or `years` must be specified, but not both. + +* `mode` - (Required) The default Object Lock retention mode you want to apply to new objects placed in this bucket. Valid values are `GOVERNANCE` and `COMPLIANCE`. +* `days` - (Optional) The number of days that you want to specify for the default retention period. +* `years` - (Optional) The number of years that you want to specify for the default retention period. ### Replication Configuration @@ -678,12 +719,6 @@ In addition to all arguments above, the following attributes are exported: * `bucket_domain_name` - The bucket domain name. Will be of format `bucketname.s3.amazonaws.com`. * `bucket_regional_domain_name` - The bucket region-specific domain name. The bucket domain name including the region name, please refer [here](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) for format. Note: The AWS CloudFront allows specifying S3 region-specific endpoint when creating S3 origin, it will prevent [redirect issues](https://forums.aws.amazon.com/thread.jspa?threadID=216814) from CloudFront to S3 Origin URL. * `hosted_zone_id` - The [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region. -* `object_lock_configuration` - The [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) configuration. - * `rule` - The Object Lock rule in place for this bucket. - * `default_retention` - The default retention period applied to new objects placed in this bucket. - * `mode` - The default Object Lock retention mode applied to new objects placed in this bucket. - * `days` - The number of days specified for the default retention period. - * `years` - The number of years specified for the default retention period. * `region` - The AWS region this bucket resides in. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). * `website_endpoint` - The website endpoint, if the bucket is configured with a website. If not, this will be an empty string. From e63baf02d16e0c726ec5d9a548cd088bd3ff77c1 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Fri, 1 Apr 2022 01:44:20 -0400 Subject: [PATCH 36/42] Update CHANGELOG for #23984 --- .changelog/23984.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/23984.txt diff --git a/.changelog/23984.txt b/.changelog/23984.txt new file mode 100644 index 00000000000..d20dae5ada0 --- /dev/null +++ b/.changelog/23984.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_s3_bucket: Update `object_lock_configuration.rule` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_object_lock_configuration` resource. +``` \ No newline at end of file From 138a0e974a596d5402465e9903f83f68a1ac03c6 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 1 Apr 2022 14:11:20 +0000 Subject: [PATCH 37/42] Update CHANGELOG.md (Manual Trigger) --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 761d7d43635..888207bd605 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,6 +38,7 @@ ENHANCEMENTS: * resource/aws_s3_bucket: Update `cors_rule` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_cors_configuration` resource. ([#23817](https://github.com/hashicorp/terraform-provider-aws/issues/23817)) * resource/aws_s3_bucket: Update `lifecycle_rule` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_lifecycle_configuration` resource. ([#23818](https://github.com/hashicorp/terraform-provider-aws/issues/23818)) * resource/aws_s3_bucket: Update `logging` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_logging` resource. ([#23819](https://github.com/hashicorp/terraform-provider-aws/issues/23819)) +* resource/aws_s3_bucket: Update `object_lock_configuration.rule` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_object_lock_configuration` resource. ([#23984](https://github.com/hashicorp/terraform-provider-aws/issues/23984)) * resource/aws_s3_bucket: Update `policy` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_policy` resource. ([#23843](https://github.com/hashicorp/terraform-provider-aws/issues/23843)) * resource/aws_s3_bucket: Update `replication_configuration` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_replication_configuration` resource. ([#23842](https://github.com/hashicorp/terraform-provider-aws/issues/23842)) * resource/aws_s3_bucket: Update `request_payer` parameter to be configurable. Please refer to the documentation for details on drift detection and potential conflicts when configuring this parameter with the standalone `aws_s3_bucket_request_payment_configuration` resource. ([#23844](https://github.com/hashicorp/terraform-provider-aws/issues/23844)) From fb87e196d6a92422251793821684e9ea62793a8e Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Fri, 1 Apr 2022 17:05:25 -0400 Subject: [PATCH 38/42] update Version 4 upgrade guide with S3 bucket remediation info --- website/docs/guides/version-4-upgrade.html.md | 139 +++++++++++++++++- 1 file changed, 136 insertions(+), 3 deletions(-) diff --git a/website/docs/guides/version-4-upgrade.html.md b/website/docs/guides/version-4-upgrade.html.md index ec878e7f0b4..a090edd9213 100644 --- a/website/docs/guides/version-4-upgrade.html.md +++ b/website/docs/guides/version-4-upgrade.html.md @@ -12,7 +12,8 @@ Version 4.0.0 of the AWS provider for Terraform is a major release and includes We previously marked most of the changes we outline in this guide as deprecated in the Terraform plan/apply output throughout previous provider releases. You can find these changes, including deprecation notices, in the [Terraform AWS Provider CHANGELOG](https://github.com/hashicorp/terraform-provider-aws/blob/main/CHANGELOG.md). -~> **NOTE:** Version 4.0.0 of the AWS Provider introduces significant changes to the `aws_s3_bucket` resource. See [S3 Bucket Refactor](#s3-bucket-refactor) for more details. +~> **NOTE:** Versions 4.0.0 through v4.8.0 of the AWS Provider introduce significant breaking changes to the `aws_s3_bucket` resource. See [S3 Bucket Refactor](#s3-bucket-refactor) for more details. +We recommend upgrading to v4.9.0 or later of the AWS Provider instead, where only non-breaking changes and deprecation notices are introduced to the `aws_s3_bucket`. See [Changes to S3 Bucket Drift Detection](#changes-to-s3-bucket-drift-detection) for additional considerations when upgrading to v4.9.0 or later. ~> **NOTE:** Version 4.0.0 of the AWS Provider introduces changes to the precedence of some authentication and configuration parameters. These changes bring the provider in line with the AWS CLI and SDKs. @@ -29,7 +30,8 @@ Upgrade topics: - [Provider Version Configuration](#provider-version-configuration) - [Changes to Authentication](#changes-to-authentication) - [New Provider Arguments](#new-provider-arguments) -- [S3 Bucket Refactor](#s3-bucket-refactor) +- [Changes to S3 Bucket Drift Detection](#changes-to-s3-bucket-drift-detection) (**Applicable to v4.9.0 and later of the AWS Provider**) +- [S3 Bucket Refactor](#s3-bucket-refactor) (**Only applicable to v4.0.0 through v4.8.0 of the AWS Provider**) - [`acceleration_status` Argument](#acceleration_status-argument) - [`acl` Argument](#acl-argument) - [`cors_rule` Argument](#cors_rule-argument) @@ -195,8 +197,139 @@ provider "aws" { Note that the provider can only resolve FIPS endpoints where AWS provides FIPS support. Support depends on the service and may include `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `us-gov-east-1`, `us-gov-west-1`, and `ca-central-1`. For more information, see [Federal Information Processing Standard (FIPS) 140-2](https://aws.amazon.com/compliance/fips/). +## Changes to S3 Bucket Drift Detection + +~> **NOTE:** This only applies to v4.9.0 and later of the AWS Provider. + +~> **NOTE:** If you are migrating from v3.75.x of the AWS Provider and you have already adopted the standalone S3 bucket resources (e.g. `aws_s3_bucket_lifecycle_configuration`), +a [`lifecycle` configuration block to ignore changes](https://www.terraform.io/language/meta-arguments/lifecycle#ignore_changes) to the internal parameters of the source `aws_s3_bucket` resources will no longer be necessary and can be removed upon upgrade. + +~> **NOTE:** In the next major version, v5.0, the parameters listed below will be removed entirely from the `aws_s3_bucket` resource. +For this reason, a deprecation notice is printed in the Terraform CLI for each of the parameters when used in a configuration. + +To remediate the breaking changes introduced to the `aws_s3_bucket` resource in v4.0.0 of the AWS Provider, +v4.9.0 and later retain the same configuration parameters of the `aws_s3_bucket` resource as in v3.x and functionality of the `aws_s3_bucket` resource only differs from v3.x +in that Terraform will only perform drift detection for each of the following parameters if a configuration value is provided: + +* `acceleration_status` +* `acl` +* `cors_rule` +* `grant` +* `lifecycle_rule` +* `logging` +* `object_lock_configuration` +* `policy` +* `replication_configuration` +* `request_payer` +* `server_side_encryption_configuration` +* `versioning` +* `website` + +Thus, if one of these parameters was once configured and then is entirely removed from an `aws_s3_bucket` resource configuration, +Terraform will not pick up on these changes on a subsequent `terraform plan` or `terraform apply`. + +For example, given the following configuration with a single `cors_rule`: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... + cors_rule { + allowed_headers = ["*"] + allowed_methods = ["PUT", "POST"] + allowed_origins = ["https://s3-website-test.hashicorp.com"] + expose_headers = ["ETag"] + max_age_seconds = 3000 + } +} +``` + +When updated to the following configuration without a `cors_rule`: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... +} +``` + +Terraform CLI with v4.9.0 of the AWS Provider will report back: + +```shell +aws_s3_bucket.example: Refreshing state... [id=yournamehere] +... +No changes. Your infrastructure matches the configuration. +``` + +With that said, to manage changes to these parameters in the `aws_s3_bucket` resource, practitioners should configure each parameter's respective standalone resource +and perform updates directly on those new configurations. The parameters are mapped to the standalone resources as follows: + +| `aws_s3_bucket` Parameter | Standalone Resource | +|----------------------------------------|------------------------------------------------------| +| `acceleration_status` | `aws_s3_bucket_accelerate_configuration` | +| `acl` | `aws_s3_bucket_acl` | +| `cors_rule` | `aws_s3_bucket_cors_configuration` | +| `grant` | `aws_s3_bucket_acl` | +| `lifecycle_rule` | `aws_s3_bucket_lifecycle_configuration` | +| `logging` | `aws_s3_bucket_logging` | +| `object_lock_configuration` | `aws_s3_bucket_object_lock_configuration` | +| `policy` | `aws_s3_bucket_policy` | +| `replication_configuration` | `aws_s3_bucket_replication_configuration` | +| `request_payer` | `aws_s3_bucket_request_payment_configuration` | +| `server_side_encryption_configuration` | `aws_s3_bucket_server_side_encryption_configuration` | +| `versioning` | `aws_s3_bucket_versioning` | +| `website` | `aws_s3_bucket_website_configuration` | + +Going back to the earlier example, given the following configuration: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... + cors_rule { + allowed_headers = ["*"] + allowed_methods = ["PUT", "POST"] + allowed_origins = ["https://s3-website-test.hashicorp.com"] + expose_headers = ["ETag"] + max_age_seconds = 3000 + } +} +``` + +Practitioners can upgrade to v4.9.0 and then introduce the standalone `aws_s3_bucket_cors_configuration` resource, e.g. + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + # ... other configuration ... +} + +resource "aws_s3_bucket_cors_configuration" "example" { + bucket = aws_s3_bucket.example.id + + cors_rule { + allowed_headers = ["*"] + allowed_methods = ["PUT", "POST"] + allowed_origins = ["https://s3-website-test.hashicorp.com"] + expose_headers = ["ETag"] + max_age_seconds = 3000 + } +} +``` + +Depending on the tools available to you, the above configuration can either be directly applied with Terraform or the standalone resource +can be imported into Terraform state. Please refer to each standalone resource's _Import_ documentation for the proper syntax. + +Once the standalone resource(s) are managed by Terraform, updates/removal can be performed as needed. + ## S3 Bucket Refactor +~> **NOTE:** This only applies to v4.0.0 through v4.8.0 of the AWS Provider, which introduce significant breaking +changes to the `aws_s3_bucket` resource. We recommend upgrading to v4.9.0 of the AWS Provider instead. See the section above, [Changes to S3 Bucket Drift Detection](#changes-to-s3-bucket-drift-detection), for additional upgrade considerations. + To help distribute the management of S3 bucket settings via independent resources, various arguments and attributes in the `aws_s3_bucket` resource have become **read-only**. Configurations dependent on these arguments should be updated to use the corresponding `aws_s3_bucket_*` resource in order to prevent Terraform from reporting “unconfigurable attribute” errors for read-only arguments. Once updated, it is recommended to import new `aws_s3_bucket_*` resources into Terraform state. @@ -1513,7 +1646,7 @@ resource and remove `versioning` and its nested arguments in the `aws_s3_bucket` } } ``` - + * If migrating from an earlier version of Terraform AWS Provider: ```terraform From 29c53957137bf90efabe63e5f5b6c9d7c97ee895 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Fri, 1 Apr 2022 19:06:40 -0400 Subject: [PATCH 39/42] Provide examples of standalone resource adoption --- website/docs/guides/version-4-upgrade.html.md | 998 ++++++++++++++++++ 1 file changed, 998 insertions(+) diff --git a/website/docs/guides/version-4-upgrade.html.md b/website/docs/guides/version-4-upgrade.html.md index a090edd9213..047f44937b5 100644 --- a/website/docs/guides/version-4-upgrade.html.md +++ b/website/docs/guides/version-4-upgrade.html.md @@ -325,6 +325,1004 @@ can be imported into Terraform state. Please refer to each standalone resource's Once the standalone resource(s) are managed by Terraform, updates/removal can be performed as needed. +The following sections depict standalone resource adoption per individual parameter. Standalone resource adoption is not required to upgrade but is recommended to ensure drift is detected by Terraform. +The examples below are by no means exhaustive. The aim is to provide important concepts when migrating to a standalone resource whose parameters may not entirely align with the corresponding parameter in the `aws_s3_bucket` resource. + +### Migrating to `aws_s3_bucket_accelerate_configuration` + +Given this previous configuration: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... + acceleration_status = "Enabled" +} +``` + +Update the configuration to: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... +} + +resource "aws_s3_bucket_accelerate_configuration" "example" { + bucket = aws_s3_bucket.example.id + status = "Enabled" +} +``` + +### Migrating to `aws_s3_bucket_acl` + +#### With `acl` + +Given this previous configuration: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + acl = "private" + + # ... other configuration ... +} +``` + +Update the configuration to: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" +} + +resource "aws_s3_bucket_acl" "example" { + bucket = aws_s3_bucket.example.id + acl = "private" +} +``` + +#### With `grant` + +Given this previous configuration: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... + grant { + id = data.aws_canonical_user_id.current_user.id + type = "CanonicalUser" + permissions = ["FULL_CONTROL"] + } + + grant { + type = "Group" + permissions = ["READ_ACP", "WRITE"] + uri = "http://acs.amazonaws.com/groups/s3/LogDelivery" + } +} +``` + +Update the configuration to: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... +} + +resource "aws_s3_bucket_acl" "example" { + bucket = aws_s3_bucket.example.id + + access_control_policy { + grant { + grantee { + id = data.aws_canonical_user_id.current_user.id + type = "CanonicalUser" + } + permission = "FULL_CONTROL" + } + + grant { + grantee { + type = "Group" + uri = "http://acs.amazonaws.com/groups/s3/LogDelivery" + } + permission = "READ_ACP" + } + + grant { + grantee { + type = "Group" + uri = "http://acs.amazonaws.com/groups/s3/LogDelivery" + } + permission = "WRITE" + } + + owner { + id = data.aws_canonical_user_id.current_user.id + } + } +} +``` + +### Migrating to `aws_s3_bucket_cors_configuration` + +Given this previous configuration: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... + cors_rule { + allowed_headers = ["*"] + allowed_methods = ["PUT", "POST"] + allowed_origins = ["https://s3-website-test.hashicorp.com"] + expose_headers = ["ETag"] + max_age_seconds = 3000 + } +} +``` + +Update the configuration to: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... +} + +resource "aws_s3_bucket_cors_configuration" "example" { + bucket = aws_s3_bucket.example.id + + cors_rule { + allowed_headers = ["*"] + allowed_methods = ["PUT", "POST"] + allowed_origins = ["https://s3-website-test.hashicorp.com"] + expose_headers = ["ETag"] + max_age_seconds = 3000 + } +} +``` + +### Migrating to `aws_s3_bucket_lifecycle_configuration` + +#### For Lifecycle Rules with no `prefix` previously configured + +~> **Note:** When configuring the `rule.filter` configuration block in the new `aws_s3_bucket_lifecycle_configuration` resource, use the AWS CLI s3api [get-bucket-lifecycle-configuration](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/get-bucket-lifecycle-configuration.html) +to get the source bucket's lifecycle configuration and determine if the `Filter` is configured as `"Filter" : {}` or `"Filter" : { "Prefix": "" }`. +If AWS returns the former, configure `rule.filter` as `filter {}`. Otherwise, neither a `rule.filter` nor `rule.prefix` parameter should be configured as shown here: + +Given this previous configuration: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + lifecycle_rule { + id = "Keep previous version 30 days, then in Glacier another 60" + enabled = true + + noncurrent_version_transition { + days = 30 + storage_class = "GLACIER" + } + + noncurrent_version_expiration { + days = 90 + } + } + + lifecycle_rule { + id = "Delete old incomplete multi-part uploads" + enabled = true + abort_incomplete_multipart_upload_days = 7 + } +} +``` + +Update the configuration to: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... +} + +resource "aws_s3_bucket_lifecycle_configuration" "example" { + bucket = aws_s3_bucket.example.id + + rule { + id = "Keep previous version 30 days, then in Glacier another 60" + status = "Enabled" + + noncurrent_version_transition { + noncurrent_days = 30 + storage_class = "GLACIER" + } + + noncurrent_version_expiration { + noncurrent_days = 90 + } + } + + rule { + id = "Delete old incomplete multi-part uploads" + status = "Enabled" + + abort_incomplete_multipart_upload { + days_after_initiation = 7 + } + } +} +``` + +#### For Lifecycle Rules with `prefix` previously configured as an empty string + +Given this previous configuration: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + lifecycle_rule { + id = "log-expiration" + enabled = true + prefix = "" + + transition { + days = 30 + storage_class = "STANDARD_IA" + } + + transition { + days = 180 + storage_class = "GLACIER" + } + } +} +``` + +Update the configuration to: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... +} + +resource "aws_s3_bucket_lifecycle_configuration" "example" { + bucket = aws_s3_bucket.example.id + + rule { + id = "log-expiration" + status = "Enabled" + + transition { + days = 30 + storage_class = "STANDARD_IA" + } + + transition { + days = 180 + storage_class = "GLACIER" + } + } +} +``` + +#### For Lifecycle Rules with `prefix` + +Given this previous configuration: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + lifecycle_rule { + id = "log-expiration" + enabled = true + prefix = "foobar" + + transition { + days = 30 + storage_class = "STANDARD_IA" + } + + transition { + days = 180 + storage_class = "GLACIER" + } + } +} +``` + +Update the configuration to: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... +} + +resource "aws_s3_bucket_lifecycle_configuration" "example" { + bucket = aws_s3_bucket.example.id + + rule { + id = "log-expiration" + status = "Enabled" + + filter { + prefix = "foobar" + } + + transition { + days = 30 + storage_class = "STANDARD_IA" + } + + transition { + days = 180 + storage_class = "GLACIER" + } + } +} +``` + +#### For Lifecycle Rules with `prefix` and `tags` + +Given this previous configuration: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... + lifecycle_rule { + id = "log" + enabled = true + prefix = "log/" + + tags = { + rule = "log" + autoclean = "true" + } + + transition { + days = 30 + storage_class = "STANDARD_IA" + } + + transition { + days = 60 + storage_class = "GLACIER" + } + + expiration { + days = 90 + } + } + + lifecycle_rule { + id = "tmp" + prefix = "tmp/" + enabled = true + + expiration { + date = "2022-12-31" + } + } +} +``` + +Update the configuration to: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... +} + +resource "aws_s3_bucket_lifecycle_configuration" "example" { + bucket = aws_s3_bucket.example.id + + rule { + id = "log" + status = "Enabled" + + filter { + and { + prefix = "log/" + + tags = { + rule = "log" + autoclean = "true" + } + } + } + + transition { + days = 30 + storage_class = "STANDARD_IA" + } + + transition { + days = 60 + storage_class = "GLACIER" + } + + expiration { + days = 90 + } + } + + rule { + id = "tmp" + + filter { + prefix = "tmp/" + } + + expiration { + date = "2022-12-31T00:00:00Z" + } + + status = "Enabled" + } +} +``` + +### Migrating to `aws_s3_bucket_logging` + +Given this previous configuration: + +```terraform +resource "aws_s3_bucket" "log_bucket" { + # ... other configuration ... + bucket = "example-log-bucket" +} + +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... + logging { + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" + } +} +``` + +Update the configuration to: + +```terraform +resource "aws_s3_bucket" "log_bucket" { + bucket = "example-log-bucket" + + # ... other configuration ... +} + +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... +} + +resource "aws_s3_bucket_logging" "example" { + bucket = aws_s3_bucket.example.id + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" +} +``` + +### Migrating to `aws_s3_bucket_object_lock_configuration` + +Given this previous configuration: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... + object_lock_configuration { + object_lock_enabled = "Enabled" + + rule { + default_retention { + mode = "COMPLIANCE" + days = 3 + } + } + } +} +``` + +Update the configuration to: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... + object_lock_enabled = true +} + +resource "aws_s3_bucket_object_lock_configuration" "example" { + bucket = aws_s3_bucket.example.id + + rule { + default_retention { + mode = "COMPLIANCE" + days = 3 + } + } +} +``` + +### Migrating to `aws_s3_bucket_policy` + +Given this previous configuration: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... + policy = < **NOTE:** As `aws_s3_bucket_versioning` is a separate resource, any S3 objects for which versioning is important (_e.g._, a truststore for mutual TLS authentication) must implicitly or explicitly depend on the `aws_s3_bucket_versioning` resource. Otherwise, the S3 objects may be created before versioning has been set. [See below](#ensure-objects-depend-on-versioning) for an example. Also note that AWS recommends waiting 15 minutes after enabling versioning on a bucket before putting or deleting objects in/from the bucket. + +#### Buckets With Versioning Enabled + +Given this previous configuration: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... + versioning { + enabled = true + } +} +``` + +Update the configuration to: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... +} + +resource "aws_s3_bucket_versioning" "example" { + bucket = aws_s3_bucket.example.id + versioning_configuration { + status = "Enabled" + } +} +``` + +#### Buckets With Versioning Disabled or Suspended + +Depending on the version of the Terraform AWS Provider you are migrating from, the interpretation of `versioning.enabled = false` +in your `aws_s3_bucket` resource will differ and thus the migration to the `aws_s3_bucket_versioning` resource will also differ as follows. + +If you are migrating from the Terraform AWS Provider `v3.70.0` or later: + +* For new S3 buckets, `enabled = false` is synonymous to `Disabled`. +* For existing S3 buckets, `enabled = false` is synonymous to `Suspended`. + +If you are migrating from an earlier version of the Terraform AWS Provider: + +* For both new and existing S3 buckets, `enabled = false` is synonymous to `Suspended`. + +Given this previous configuration: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... + versioning { + enabled = false + } +} +``` + +Update the configuration to one of the following: + +* If migrating from Terraform AWS Provider `v3.70.0` or later and bucket versioning was never enabled: + + ```terraform + resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... + } + + resource "aws_s3_bucket_versioning" "example" { + bucket = aws_s3_bucket.example.id + versioning_configuration { + status = "Disabled" + } + } + ``` + +* If migrating from Terraform AWS Provider `v3.70.0` or later and bucket versioning was enabled at one point: + + ```terraform + resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... + } + + resource "aws_s3_bucket_versioning" "example" { + bucket = aws_s3_bucket.example.id + versioning_configuration { + status = "Suspended" + } + } + ``` + +* If migrating from an earlier version of Terraform AWS Provider: + + ```terraform + resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... + } + + resource "aws_s3_bucket_versioning" "example" { + bucket = aws_s3_bucket.example.id + versioning_configuration { + status = "Suspended" + } + } + ``` + +#### Ensure Objects Depend on Versioning + +When you create an object whose `version_id` you need and an `aws_s3_bucket_versioning` resource in the same configuration, you are more likely to have success by ensuring the `s3_object` depends either implicitly (see below) or explicitly (i.e., using `depends_on = [aws_s3_bucket_versioning.example]`) on the `aws_s3_bucket_versioning` resource. + +~> **NOTE:** For critical and/or production S3 objects, do not create a bucket, enable versioning, and create an object in the bucket within the same configuration. Doing so will not allow the AWS-recommended 15 minutes between enabling versioning and writing to the bucket. + +This example shows the `aws_s3_object.example` depending implicitly on the versioning resource through the reference to `aws_s3_bucket_versioning.example.bucket` to define `bucket`: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yotto" +} + +resource "aws_s3_bucket_versioning" "example" { + bucket = aws_s3_bucket.example.id + + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_object" "example" { + bucket = aws_s3_bucket_versioning.example.bucket + key = "droeloe" + source = "example.txt" +} +``` + +### Migrating to `aws_s3_bucket_website_configuration` + +Given this previous configuration: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... + website { + index_document = "index.html" + error_document = "error.html" + } +} +``` + +Update the configuration to: + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "yournamehere" + + # ... other configuration ... +} + +resource "aws_s3_bucket_website_configuration" "example" { + bucket = aws_s3_bucket.example.id + + index_document { + suffix = "index.html" + } + + error_document { + key = "error.html" + } +} +``` + +Given this previous configuration that uses the `aws_s3_bucket` parameter `website_domain` with `aws_route53_record`: + +```terraform +resource "aws_route53_zone" "main" { + name = "domain.test" +} + +resource "aws_s3_bucket" "website" { + # ... other configuration ... + website { + index_document = "index.html" + error_document = "error.html" + } +} + +resource "aws_route53_record" "alias" { + zone_id = aws_route53_zone.main.zone_id + name = "www" + type = "A" + + alias { + zone_id = aws_s3_bucket.website.hosted_zone_id + name = aws_s3_bucket.website.website_domain + evaluate_target_health = true + } +} +``` + +Update the configuration to use the `aws_s3_bucket_website_configuration` resource and its `website_domain` parameter: + +```terraform +resource "aws_route53_zone" "main" { + name = "domain.test" +} + +resource "aws_s3_bucket" "website" { + # ... other configuration ... +} + +resource "aws_s3_bucket_website_configuration" "example" { + bucket = aws_s3_bucket.website.id + + index_document { + suffix = "index.html" + } +} + +resource "aws_route53_record" "alias" { + zone_id = aws_route53_zone.main.zone_id + name = "www" + type = "A" + + alias { + zone_id = aws_s3_bucket.website.hosted_zone_id + name = aws_s3_bucket_website_configuration.example.website_domain + evaluate_target_health = true + } +} +``` + ## S3 Bucket Refactor ~> **NOTE:** This only applies to v4.0.0 through v4.8.0 of the AWS Provider, which introduce significant breaking From f5c9f0c8daff188a3f2f2a6e7bb69d87eb2facc4 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Fri, 1 Apr 2022 20:10:04 -0400 Subject: [PATCH 40/42] Update CHANGELOG for #23985 --- .changelog/23985.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/23985.txt diff --git a/.changelog/23985.txt b/.changelog/23985.txt new file mode 100644 index 00000000000..a2849a6646d --- /dev/null +++ b/.changelog/23985.txt @@ -0,0 +1,3 @@ +```release-note:note +resource/aws_s3_bucket: The `acceleration_status`, `acl`, `cors_rule`, `grant`, `lifecycle_rule`, `logging`, `object_lock_configuration.rule`, `policy`, `replication_configuration`, `request_payer`, `server_side_encryption_configuration`, `versioning`, and `website` parameters are now Optional. Please refer to the documentation for details on drift detection and potential conflicts when configuring these parameters with the standalone `aws_s3_bucket_*` resources. +``` \ No newline at end of file From 43beb782d17c95da623e198da3ed0709048d1d90 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 5 Apr 2022 12:13:14 -0700 Subject: [PATCH 41/42] Tweaks to documentation --- website/docs/guides/version-4-upgrade.html.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/docs/guides/version-4-upgrade.html.md b/website/docs/guides/version-4-upgrade.html.md index 047f44937b5..a202b843bce 100644 --- a/website/docs/guides/version-4-upgrade.html.md +++ b/website/docs/guides/version-4-upgrade.html.md @@ -323,7 +323,7 @@ resource "aws_s3_bucket_cors_configuration" "example" { Depending on the tools available to you, the above configuration can either be directly applied with Terraform or the standalone resource can be imported into Terraform state. Please refer to each standalone resource's _Import_ documentation for the proper syntax. -Once the standalone resource(s) are managed by Terraform, updates/removal can be performed as needed. +Once the standalone resources are managed by Terraform, updates and removal can be performed as needed. The following sections depict standalone resource adoption per individual parameter. Standalone resource adoption is not required to upgrade but is recommended to ensure drift is detected by Terraform. The examples below are by no means exhaustive. The aim is to provide important concepts when migrating to a standalone resource whose parameters may not entirely align with the corresponding parameter in the `aws_s3_bucket` resource. @@ -889,7 +889,7 @@ resource "aws_s3_bucket" "example" { "Principal": { "AWS": "${data.aws_elb_service_account.current.arn}" }, - "Resource": "arn:${data.aws_partition.current.partition}:s3:::example/*", + "Resource": "arn:${data.aws_partition.current.partition}:s3:::yournamehere/*", "Sid": "Stmt1446575236270" } ], @@ -920,7 +920,7 @@ resource "aws_s3_bucket_policy" "example" { "Principal": { "AWS": "${data.aws_elb_service_account.current.arn}" }, - "Resource": "arn:${data.aws_partition.current.partition}:s3:::example/*", + "Resource": "${aws_s3_bucket.example.arn}/*", "Sid": "Stmt1446575236270" } ], @@ -2193,7 +2193,7 @@ resource "aws_s3_bucket" "example" { "Principal": { "AWS": "${data.aws_elb_service_account.current.arn}" }, - "Resource": "arn:${data.aws_partition.current.partition}:s3:::example/*", + "Resource": "arn:${data.aws_partition.current.partition}:s3:::yournamehere/*", "Sid": "Stmt1446575236270" } ], @@ -2237,7 +2237,7 @@ resource "aws_s3_bucket_policy" "example" { "Principal": { "AWS": "${data.aws_elb_service_account.current.arn}" }, - "Resource": "arn:${data.aws_partition.current.partition}:s3:::example/*", + "Resource": "${aws_s3_bucket.example.arn}/*", "Sid": "Stmt1446575236270" } ], From 76c2c5154e616ad7743c77614d03c6b02c8f1265 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 5 Apr 2022 12:14:35 -0700 Subject: [PATCH 42/42] Updates test to use `arn` parameter instead of manually-generated ARN --- internal/service/elb/load_balancer_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/elb/load_balancer_test.go b/internal/service/elb/load_balancer_test.go index f85edaa5862..6715609ff6d 100644 --- a/internal/service/elb/load_balancer_test.go +++ b/internal/service/elb/load_balancer_test.go @@ -1272,7 +1272,7 @@ resource "aws_s3_bucket_policy" "test" { "Principal": { "AWS": "${data.aws_elb_service_account.current.arn}" }, - "Resource": "arn:${data.aws_partition.current.partition}:s3:::%[1]s/*", + "Resource": "${aws_s3_bucket.accesslogs_bucket.arn}/*", "Sid": "Stmt1446575236270" } ],