diff --git a/.changelog/22581.txt b/.changelog/22581.txt new file mode 100644 index 00000000000..c19ee806ddb --- /dev/null +++ b/.changelog/22581.txt @@ -0,0 +1,3 @@ +```release-note:breaking-change +resource/aws_s3_bucket: The `lifecycle_rule` argument has been deprecated and is now read-only. Use the `aws_s3_bucket_lifecycle_configuration` resource instead. +``` \ No newline at end of file diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index dc10bf90c54..32dc6b8005d 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -261,105 +261,109 @@ func ResourceBucket() *schema.Resource { }, "lifecycle_rule": { - Type: schema.TypeList, - Optional: true, + Type: schema.TypeList, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringLenBetween(0, 255), + Type: schema.TypeString, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, "prefix": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, - "tags": tftags.TagsSchema(), + "tags": tftags.TagsSchemaComputedDeprecated("Use the aws_s3_bucket_lifecycle_configuration resource instead"), "enabled": { - Type: schema.TypeBool, - Required: true, + Type: schema.TypeBool, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, "abort_incomplete_multipart_upload_days": { - Type: schema.TypeInt, - Optional: true, + Type: schema.TypeInt, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, "expiration": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, + Type: schema.TypeList, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "date": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validBucketLifecycleTimestamp, + Type: schema.TypeString, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, "days": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntAtLeast(0), + Type: schema.TypeInt, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, "expired_object_delete_marker": { - Type: schema.TypeBool, - Optional: true, + Type: schema.TypeBool, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, }, }, }, "noncurrent_version_expiration": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, + Type: schema.TypeList, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "days": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntAtLeast(1), + Type: schema.TypeInt, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, }, }, }, "transition": { - Type: schema.TypeSet, - Optional: true, - Set: transitionHash, + Type: schema.TypeSet, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "date": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validBucketLifecycleTimestamp, + Type: schema.TypeString, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, "days": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntAtLeast(0), + Type: schema.TypeInt, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, "storage_class": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.TransitionStorageClass_Values(), false), + Type: schema.TypeString, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, }, }, }, "noncurrent_version_transition": { - Type: schema.TypeSet, - Optional: true, - Set: transitionHash, + Type: schema.TypeSet, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "days": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntAtLeast(0), + Type: schema.TypeInt, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, "storage_class": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.TransitionStorageClass_Values(), false), + Type: schema.TypeString, + Computed: true, + Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", }, }, }, @@ -776,12 +780,6 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } } - if d.HasChange("lifecycle_rule") { - if err := resourceBucketLifecycleUpdate(conn, d); err != nil { - return err - } - } - if d.HasChange("object_lock_configuration") { if err := resourceBucketInternalObjectLockConfigurationUpdate(conn, d); err != nil { return err @@ -1000,126 +998,16 @@ func resourceBucketRead(d *schema.ResourceData, meta interface{}) error { Bucket: aws.String(d.Id()), }) }) - if err != nil && !tfawserr.ErrMessageContains(err, "NoSuchLifecycleConfiguration", "") { - return err + if err != nil && !tfawserr.ErrCodeEquals(err, ErrCodeNoSuchLifecycleConfiguration) { + return fmt.Errorf("error getting S3 Bucket (%s) Lifecycle Configuration: %w", d.Id(), err) } - lifecycleRules := make([]map[string]interface{}, 0) - if lifecycle, ok := lifecycleResponse.(*s3.GetBucketLifecycleConfigurationOutput); ok && len(lifecycle.Rules) > 0 { - lifecycleRules = make([]map[string]interface{}, 0, len(lifecycle.Rules)) - - for _, lifecycleRule := range lifecycle.Rules { - log.Printf("[DEBUG] S3 bucket: %s, read lifecycle rule: %v", d.Id(), lifecycleRule) - rule := make(map[string]interface{}) - - // ID - if lifecycleRule.ID != nil && aws.StringValue(lifecycleRule.ID) != "" { - rule["id"] = aws.StringValue(lifecycleRule.ID) - } - filter := lifecycleRule.Filter - if filter != nil { - if filter.And != nil { - // Prefix - if filter.And.Prefix != nil && aws.StringValue(filter.And.Prefix) != "" { - rule["prefix"] = aws.StringValue(filter.And.Prefix) - } - // Tag - if len(filter.And.Tags) > 0 { - rule["tags"] = KeyValueTags(filter.And.Tags).IgnoreAWS().Map() - } - } else { - // Prefix - if filter.Prefix != nil && aws.StringValue(filter.Prefix) != "" { - rule["prefix"] = aws.StringValue(filter.Prefix) - } - // Tag - if filter.Tag != nil { - rule["tags"] = KeyValueTags([]*s3.Tag{filter.Tag}).IgnoreAWS().Map() - } - } - } else { - if lifecycleRule.Prefix != nil { - rule["prefix"] = aws.StringValue(lifecycleRule.Prefix) - } - } - - // Enabled - if lifecycleRule.Status != nil { - if aws.StringValue(lifecycleRule.Status) == s3.ExpirationStatusEnabled { - rule["enabled"] = true - } else { - rule["enabled"] = false - } - } - - // AbortIncompleteMultipartUploadDays - if lifecycleRule.AbortIncompleteMultipartUpload != nil { - if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil { - rule["abort_incomplete_multipart_upload_days"] = int(aws.Int64Value(lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation)) - } - } - - // expiration - if lifecycleRule.Expiration != nil { - e := make(map[string]interface{}) - if lifecycleRule.Expiration.Date != nil { - e["date"] = (aws.TimeValue(lifecycleRule.Expiration.Date)).Format("2006-01-02") - } - if lifecycleRule.Expiration.Days != nil { - e["days"] = int(aws.Int64Value(lifecycleRule.Expiration.Days)) - } - if lifecycleRule.Expiration.ExpiredObjectDeleteMarker != nil { - e["expired_object_delete_marker"] = aws.BoolValue(lifecycleRule.Expiration.ExpiredObjectDeleteMarker) - } - rule["expiration"] = []interface{}{e} - } - // noncurrent_version_expiration - if lifecycleRule.NoncurrentVersionExpiration != nil { - e := make(map[string]interface{}) - if lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays != nil { - e["days"] = int(aws.Int64Value(lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays)) - } - rule["noncurrent_version_expiration"] = []interface{}{e} - } - //// transition - if len(lifecycleRule.Transitions) > 0 { - transitions := make([]interface{}, 0, len(lifecycleRule.Transitions)) - for _, v := range lifecycleRule.Transitions { - t := make(map[string]interface{}) - if v.Date != nil { - t["date"] = (aws.TimeValue(v.Date)).Format("2006-01-02") - } - if v.Days != nil { - t["days"] = int(aws.Int64Value(v.Days)) - } - if v.StorageClass != nil { - t["storage_class"] = aws.StringValue(v.StorageClass) - } - transitions = append(transitions, t) - } - rule["transition"] = schema.NewSet(transitionHash, transitions) - } - // noncurrent_version_transition - if len(lifecycleRule.NoncurrentVersionTransitions) > 0 { - transitions := make([]interface{}, 0, len(lifecycleRule.NoncurrentVersionTransitions)) - for _, v := range lifecycleRule.NoncurrentVersionTransitions { - t := make(map[string]interface{}) - if v.NoncurrentDays != nil { - t["days"] = int(aws.Int64Value(v.NoncurrentDays)) - } - if v.StorageClass != nil { - t["storage_class"] = aws.StringValue(v.StorageClass) - } - transitions = append(transitions, t) - } - rule["noncurrent_version_transition"] = schema.NewSet(transitionHash, transitions) - } - - lifecycleRules = append(lifecycleRules, rule) + if lifecycle, ok := lifecycleResponse.(*s3.GetBucketLifecycleConfigurationOutput); ok { + if err := d.Set("lifecycle_rule", flattenBucketLifecycleRules(lifecycle.Rules)); err != nil { + return fmt.Errorf("error setting lifecycle_rule: %s", err) } - } - if err := d.Set("lifecycle_rule", lifecycleRules); err != nil { - return fmt.Errorf("error setting lifecycle_rule: %s", err) + } else { + d.Set("lifecycle_rule", nil) } // Read the bucket replication configuration if configured outside this resource @@ -1520,163 +1408,167 @@ func resourceBucketInternalObjectLockConfigurationUpdate(conn *s3.S3, d *schema. return nil } -func resourceBucketLifecycleUpdate(conn *s3.S3, d *schema.ResourceData) error { - bucket := d.Get("bucket").(string) +func flattenBucketLifecycleRuleExpiration(expiration *s3.LifecycleExpiration) []interface{} { + if expiration == nil { + return []interface{}{} + } - lifecycleRules := d.Get("lifecycle_rule").([]interface{}) + m := make(map[string]interface{}) - if len(lifecycleRules) == 0 || lifecycleRules[0] == nil { - i := &s3.DeleteBucketLifecycleInput{ - Bucket: aws.String(bucket), - } + if expiration.Date != nil { + m["date"] = (aws.TimeValue(expiration.Date)).Format("2006-01-02") + } + if expiration.Days != nil { + m["days"] = int(aws.Int64Value(expiration.Days)) + } + if expiration.ExpiredObjectDeleteMarker != nil { + m["expired_object_delete_marker"] = aws.BoolValue(expiration.ExpiredObjectDeleteMarker) + } - _, err := conn.DeleteBucketLifecycle(i) - if err != nil { - return fmt.Errorf("Error removing S3 lifecycle: %s", err) - } - return nil + return []interface{}{m} +} + +func flattenBucketLifecycleRules(lifecycleRules []*s3.LifecycleRule) []interface{} { + if len(lifecycleRules) == 0 { + return []interface{}{} } - rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules)) + var results []interface{} - for i, lifecycleRule := range lifecycleRules { - r := lifecycleRule.(map[string]interface{}) + for _, lifecycleRule := range lifecycleRules { + if lifecycleRule == nil { + continue + } - rule := &s3.LifecycleRule{} + rule := make(map[string]interface{}) - // Filter - tags := Tags(tftags.New(r["tags"]).IgnoreAWS()) - filter := &s3.LifecycleRuleFilter{} - if len(tags) > 0 { - lifecycleRuleAndOp := &s3.LifecycleRuleAndOperator{} - lifecycleRuleAndOp.SetPrefix(r["prefix"].(string)) - lifecycleRuleAndOp.SetTags(tags) - filter.SetAnd(lifecycleRuleAndOp) - } else { - filter.SetPrefix(r["prefix"].(string)) + // AbortIncompleteMultipartUploadDays + if lifecycleRule.AbortIncompleteMultipartUpload != nil { + if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil { + rule["abort_incomplete_multipart_upload_days"] = int(aws.Int64Value(lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation)) + } } - rule.SetFilter(filter) // ID - if val, ok := r["id"].(string); ok && val != "" { - rule.ID = aws.String(val) - } else { - rule.ID = aws.String(resource.PrefixedUniqueId("tf-s3-lifecycle-")) + if lifecycleRule.ID != nil { + rule["id"] = aws.StringValue(lifecycleRule.ID) } - // Enabled - if val, ok := r["enabled"].(bool); ok && val { - rule.Status = aws.String(s3.ExpirationStatusEnabled) - } else { - rule.Status = aws.String(s3.ExpirationStatusDisabled) + // Filter + if filter := lifecycleRule.Filter; filter != nil { + if filter.And != nil { + // Prefix + if filter.And.Prefix != nil { + rule["prefix"] = aws.StringValue(filter.And.Prefix) + } + // Tag + if len(filter.And.Tags) > 0 { + rule["tags"] = KeyValueTags(filter.And.Tags).IgnoreAWS().Map() + } + } else { + // Prefix + if filter.Prefix != nil { + rule["prefix"] = aws.StringValue(filter.Prefix) + } + // Tag + if filter.Tag != nil { + rule["tags"] = KeyValueTags([]*s3.Tag{filter.Tag}).IgnoreAWS().Map() + } + } + } + + // Prefix + if lifecycleRule.Prefix != nil { + rule["prefix"] = aws.StringValue(lifecycleRule.Prefix) } - // AbortIncompleteMultipartUpload - if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 { - rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{ - DaysAfterInitiation: aws.Int64(int64(val)), + // Enabled + if lifecycleRule.Status != nil { + if aws.StringValue(lifecycleRule.Status) == s3.ExpirationStatusEnabled { + rule["enabled"] = true + } else { + rule["enabled"] = false } } // Expiration - expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).([]interface{}) - if len(expiration) > 0 && expiration[0] != nil { - e := expiration[0].(map[string]interface{}) - i := &s3.LifecycleExpiration{} - if val, ok := e["date"].(string); ok && val != "" { - t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) - if err != nil { - return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) - } - i.Date = aws.Time(t) - } else if val, ok := e["days"].(int); ok && val > 0 { - i.Days = aws.Int64(int64(val)) - } else if val, ok := e["expired_object_delete_marker"].(bool); ok { - i.ExpiredObjectDeleteMarker = aws.Bool(val) - } - rule.Expiration = i + if lifecycleRule.Expiration != nil { + rule["expiration"] = flattenBucketLifecycleRuleExpiration(lifecycleRule.Expiration) } // NoncurrentVersionExpiration - nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).([]interface{}) - if len(nc_expiration) > 0 && nc_expiration[0] != nil { - e := nc_expiration[0].(map[string]interface{}) - - if val, ok := e["days"].(int); ok && val > 0 { - rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{ - NoncurrentDays: aws.Int64(int64(val)), - } + if lifecycleRule.NoncurrentVersionExpiration != nil { + e := make(map[string]interface{}) + if lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays != nil { + e["days"] = int(aws.Int64Value(lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays)) } + rule["noncurrent_version_expiration"] = []interface{}{e} } - // Transitions - transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.transition", i)).(*schema.Set).List() - if len(transitions) > 0 { - rule.Transitions = make([]*s3.Transition, 0, len(transitions)) - for _, transition := range transitions { - transition := transition.(map[string]interface{}) - i := &s3.Transition{} - if val, ok := transition["date"].(string); ok && val != "" { - t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) - if err != nil { - return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) - } - i.Date = aws.Time(t) - } else if val, ok := transition["days"].(int); ok && val >= 0 { - i.Days = aws.Int64(int64(val)) - } - if val, ok := transition["storage_class"].(string); ok && val != "" { - i.StorageClass = aws.String(val) - } + // NoncurrentVersionTransition + if len(lifecycleRule.NoncurrentVersionTransitions) > 0 { + rule["noncurrent_version_transition"] = flattenBucketLifecycleRuleNoncurrentVersionTransitions(lifecycleRule.NoncurrentVersionTransitions) + } - rule.Transitions = append(rule.Transitions, i) - } + // Transition + if len(lifecycleRule.Transitions) > 0 { + rule["transition"] = flattenBucketLifecycleRuleTransitions(lifecycleRule.Transitions) } - // NoncurrentVersionTransitions - nc_transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_transition", i)).(*schema.Set).List() - if len(nc_transitions) > 0 { - rule.NoncurrentVersionTransitions = make([]*s3.NoncurrentVersionTransition, 0, len(nc_transitions)) - for _, transition := range nc_transitions { - transition := transition.(map[string]interface{}) - i := &s3.NoncurrentVersionTransition{} - if val, ok := transition["days"].(int); ok && val >= 0 { - i.NoncurrentDays = aws.Int64(int64(val)) - } - if val, ok := transition["storage_class"].(string); ok && val != "" { - i.StorageClass = aws.String(val) - } - rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, i) - } + results = append(results, rule) + } + + return results +} + +func flattenBucketLifecycleRuleNoncurrentVersionTransitions(transitions []*s3.NoncurrentVersionTransition) []interface{} { + if len(transitions) == 0 { + return []interface{}{} + } + + var results []interface{} + + for _, t := range transitions { + m := make(map[string]interface{}) + + if t.NoncurrentDays != nil { + m["days"] = int(aws.Int64Value(t.NoncurrentDays)) } - // As a lifecycle rule requires 1 or more transition/expiration actions, - // we explicitly pass a default ExpiredObjectDeleteMarker value to be able to create - // the rule while keeping the policy unaffected if the conditions are not met. - if rule.Expiration == nil && rule.NoncurrentVersionExpiration == nil && - rule.Transitions == nil && rule.NoncurrentVersionTransitions == nil && - rule.AbortIncompleteMultipartUpload == nil { - rule.Expiration = &s3.LifecycleExpiration{ExpiredObjectDeleteMarker: aws.Bool(false)} + if t.StorageClass != nil { + m["storage_class"] = aws.StringValue(t.StorageClass) } - rules = append(rules, rule) + results = append(results, m) } - i := &s3.PutBucketLifecycleConfigurationInput{ - Bucket: aws.String(bucket), - LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ - Rules: rules, - }, + return results +} + +func flattenBucketLifecycleRuleTransitions(transitions []*s3.Transition) []interface{} { + if len(transitions) == 0 { + return []interface{}{} } - _, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { - return conn.PutBucketLifecycleConfiguration(i) - }) - if err != nil { - return fmt.Errorf("Error putting S3 lifecycle: %s", err) + var results []interface{} + + for _, t := range transitions { + m := make(map[string]interface{}) + + if t.Date != nil { + m["date"] = (aws.TimeValue(t.Date)).Format("2006-01-02") + } + if t.Days != nil { + m["days"] = int(aws.Int64Value(t.Days)) + } + if t.StorageClass != nil { + m["storage_class"] = aws.StringValue(t.StorageClass) + } + + results = append(results, m) } - return nil + return results } func flattenBucketLoggingEnabled(loggingEnabled *s3.LoggingEnabled) []interface{} { @@ -2111,26 +2003,6 @@ func grantHash(v interface{}) int { return create.StringHashcode(buf.String()) } -func transitionHash(v interface{}) int { - var buf bytes.Buffer - m, ok := v.(map[string]interface{}) - - if !ok { - return 0 - } - - if v, ok := m["date"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - if v, ok := m["days"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(int))) - } - if v, ok := m["storage_class"]; ok { - buf.WriteString(fmt.Sprintf("%s-", v.(string))) - } - return create.StringHashcode(buf.String()) -} - func rulesHash(v interface{}) int { var buf bytes.Buffer m, ok := v.(map[string]interface{}) diff --git a/internal/service/s3/bucket_lifecycle_configuration_test.go b/internal/service/s3/bucket_lifecycle_configuration_test.go index f6e0afdc66b..4e6cc49723b 100644 --- a/internal/service/s3/bucket_lifecycle_configuration_test.go +++ b/internal/service/s3/bucket_lifecycle_configuration_test.go @@ -532,12 +532,6 @@ func testAccBucketLifecycleConfigurationBasicConfig(rName string) string { resource "aws_s3_bucket" "test" { bucket = %[1]q acl = "private" - - lifecycle { - ignore_changes = [ - lifecycle_rule - ] - } } resource "aws_s3_bucket_lifecycle_configuration" "test" { @@ -562,12 +556,6 @@ func testAccBucketLifecycleConfiguration_Basic_StatusConfig(rName, status string resource "aws_s3_bucket" "test" { bucket = %[1]q acl = "private" - - lifecycle { - ignore_changes = [ - lifecycle_rule - ] - } } resource "aws_s3_bucket_lifecycle_configuration" "test" { @@ -592,12 +580,6 @@ func testAccBucketLifecycleConfiguration_Basic_UpdateConfig(rName, date, prefix resource "aws_s3_bucket" "test" { bucket = %[1]q acl = "private" - - lifecycle { - ignore_changes = [ - lifecycle_rule - ] - } } resource "aws_s3_bucket_lifecycle_configuration" "test" { @@ -624,12 +606,6 @@ func testAccBucketLifecycleConfiguration_Basic_PrefixConfig(rName, prefix string resource "aws_s3_bucket" "test" { bucket = %[1]q acl = "private" - - lifecycle { - ignore_changes = [ - lifecycle_rule - ] - } } resource "aws_s3_bucket_lifecycle_configuration" "test" { @@ -654,12 +630,6 @@ func testAccBucketLifecycleConfiguration_RuleExpiration_ExpiredDeleteMarkerConfi resource "aws_s3_bucket" "test" { bucket = %[1]q acl = "private" - - lifecycle { - ignore_changes = [ - lifecycle_rule - ] - } } resource "aws_s3_bucket_lifecycle_configuration" "test" { @@ -685,12 +655,6 @@ func testAccBucketLifecycleConfiguration_RuleExpiration_EmptyConfigurationBlockC resource "aws_s3_bucket" "test" { bucket = %[1]q acl = "private" - - lifecycle { - ignore_changes = [ - lifecycle_rule - ] - } } resource "aws_s3_bucket_lifecycle_configuration" "test" { @@ -714,12 +678,6 @@ func testAccBucketLifecycleConfiguration_RuleAbortIncompleteMultipartUploadConfi resource "aws_s3_bucket" "test" { bucket = %[1]q acl = "private" - - lifecycle { - ignore_changes = [ - lifecycle_rule - ] - } } resource "aws_s3_bucket_lifecycle_configuration" "test" { @@ -745,12 +703,6 @@ func testAccBucketLifecycleConfiguration_MultipleRulesConfig(rName, date string) resource "aws_s3_bucket" "test" { bucket = %[1]q acl = "private" - - lifecycle { - ignore_changes = [ - lifecycle_rule - ] - } } resource "aws_s3_bucket_lifecycle_configuration" "test" { @@ -809,12 +761,6 @@ func testAccBucketLifecycleConfiguration_NonCurrentVersionExpirationConfig(rName resource "aws_s3_bucket" "test" { bucket = %[1]q acl = "private" - - lifecycle { - ignore_changes = [ - lifecycle_rule - ] - } } resource "aws_s3_bucket_lifecycle_configuration" "test" { @@ -843,12 +789,6 @@ func testAccBucketLifecycleConfiguration_NonCurrentVersionTransitionConfig(rName resource "aws_s3_bucket" "test" { bucket = %[1]q acl = "private" - - lifecycle { - ignore_changes = [ - lifecycle_rule - ] - } } resource "aws_s3_bucket_lifecycle_configuration" "test" { diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index 0f6072fb374..c47bff4d22e 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -523,208 +523,6 @@ func TestAccS3Bucket_Manage_versioningAndMfaDeleteDisabled(t *testing.T) { }) } -func TestAccS3Bucket_Manage_lifecycleBasic(t *testing.T) { - bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") - resourceName := "aws_s3_bucket.bucket" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), - Providers: acctest.Providers, - CheckDestroy: testAccCheckBucketDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBucketWithLifecycleConfig(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckBucketExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.id", "id1"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.prefix", "path1/"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.days", "365"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.date", ""), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.expired_object_delete_marker", "false"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.transition.*", map[string]string{ - "date": "", - "days": "30", - "storage_class": "STANDARD_IA", - }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.transition.*", map[string]string{ - "date": "", - "days": "60", - "storage_class": "INTELLIGENT_TIERING", - }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.transition.*", map[string]string{ - "date": "", - "days": "90", - "storage_class": "ONEZONE_IA", - }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.transition.*", map[string]string{ - "date": "", - "days": "120", - "storage_class": "GLACIER", - }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.transition.*", map[string]string{ - "date": "", - "days": "210", - "storage_class": "DEEP_ARCHIVE", - }), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.id", "id2"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.prefix", "path2/"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.expiration.0.date", "2016-01-12"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.expiration.0.days", "0"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.expiration.0.expired_object_delete_marker", "false"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.2.id", "id3"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.2.prefix", "path3/"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.2.transition.*", map[string]string{ - "days": "0", - }), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.3.id", "id4"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.3.prefix", "path4/"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.3.tags.tagKey", "tagValue"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.3.tags.terraform", "hashicorp"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.4.id", "id5"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.4.tags.tagKey", "tagValue"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.4.tags.terraform", "hashicorp"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.4.transition.*", map[string]string{ - "days": "0", - "storage_class": "GLACIER", - }), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.5.id", "id6"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.5.tags.tagKey", "tagValue"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.5.transition.*", map[string]string{ - "days": "0", - "storage_class": "GLACIER", - }), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, - }, - { - Config: testAccBucketWithVersioningLifecycleConfig(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckBucketExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.id", "id1"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.prefix", "path1/"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.enabled", "true"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.noncurrent_version_expiration.0.days", "365"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.noncurrent_version_transition.*", map[string]string{ - "days": "30", - "storage_class": "STANDARD_IA", - }), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.noncurrent_version_transition.*", map[string]string{ - "days": "60", - "storage_class": "GLACIER", - }), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.id", "id2"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.prefix", "path2/"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.enabled", "false"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.noncurrent_version_expiration.0.days", "365"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.2.id", "id3"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.2.prefix", "path3/"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.2.noncurrent_version_transition.*", map[string]string{ - "days": "0", - "storage_class": "GLACIER", - }), - ), - }, - { - Config: testAccBucketConfig_Basic(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckBucketExists(resourceName), - ), - }, - }, - }) -} - -func TestAccS3Bucket_Manage_lifecycleExpireMarkerOnly(t *testing.T) { - bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") - resourceName := "aws_s3_bucket.bucket" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), - Providers: acctest.Providers, - CheckDestroy: testAccCheckBucketDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBucketWithLifecycleExpireMarkerConfig(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckBucketExists(resourceName), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.id", "id1"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.prefix", "path1/"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.days", "0"), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.date", ""), - resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.expired_object_delete_marker", "true"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, - }, - { - Config: testAccBucketConfig_Basic(bucketName), - Check: resource.ComposeTestCheckFunc( - testAccCheckBucketExists(resourceName), - ), - }, - }, - }) -} - -// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/11420 -func TestAccS3Bucket_Manage_lifecycleRuleExpirationEmptyBlock(t *testing.T) { - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_s3_bucket.bucket" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), - Providers: acctest.Providers, - CheckDestroy: testAccCheckBucketDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBucketLifecycleRuleExpirationEmptyConfigurationBlockConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckBucketExists(resourceName), - ), - }, - }, - }) -} - -// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/15138 -func TestAccS3Bucket_Manage_lifecycleRuleAbortIncompleteMultipartUploadDaysNoExpiration(t *testing.T) { - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_s3_bucket.bucket" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), - Providers: acctest.Providers, - CheckDestroy: testAccCheckBucketDestroy, - Steps: []resource.TestStep{ - { - Config: testAccBucketLifecycleRuleAbortIncompleteMultipartUploadDaysConfig(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckBucketExists(resourceName), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, - }, - }, - }) -} - func TestAccS3Bucket_Manage_objectLock(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") resourceName := "aws_s3_bucket.arbitrary" @@ -1630,247 +1428,6 @@ resource "aws_s3_bucket" "bucket" { `, bucketName, enabled, mfaDelete) } -func testAccBucketWithLifecycleConfig(bucketName string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { - bucket = %[1]q - - lifecycle_rule { - id = "id1" - prefix = "path1/" - enabled = true - - expiration { - days = 365 - } - - transition { - days = 30 - storage_class = "STANDARD_IA" - } - - transition { - days = 60 - storage_class = "INTELLIGENT_TIERING" - } - - transition { - days = 90 - storage_class = "ONEZONE_IA" - } - - transition { - days = 120 - storage_class = "GLACIER" - } - - transition { - days = 210 - storage_class = "DEEP_ARCHIVE" - } - } - - lifecycle_rule { - id = "id2" - prefix = "path2/" - enabled = true - - expiration { - date = "2016-01-12" - } - } - - lifecycle_rule { - id = "id3" - prefix = "path3/" - enabled = true - - transition { - days = 0 - storage_class = "GLACIER" - } - } - - lifecycle_rule { - id = "id4" - prefix = "path4/" - enabled = true - - tags = { - "tagKey" = "tagValue" - "terraform" = "hashicorp" - } - - expiration { - date = "2016-01-12" - } - } - - lifecycle_rule { - id = "id5" - enabled = true - - tags = { - "tagKey" = "tagValue" - "terraform" = "hashicorp" - } - - transition { - days = 0 - storage_class = "GLACIER" - } - } - - lifecycle_rule { - id = "id6" - enabled = true - - tags = { - "tagKey" = "tagValue" - } - - transition { - days = 0 - storage_class = "GLACIER" - } - } - - lifecycle { - ignore_changes = [ - grant, - ] - } -} - -resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.bucket.id - acl = "private" -} -`, bucketName) -} - -func testAccBucketWithLifecycleExpireMarkerConfig(bucketName string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { - bucket = %[1]q - - lifecycle_rule { - id = "id1" - prefix = "path1/" - enabled = true - - expiration { - expired_object_delete_marker = "true" - } - } - - lifecycle { - ignore_changes = [ - grant, - ] - } -} - -resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.bucket.id - acl = "private" -} -`, bucketName) -} - -func testAccBucketWithVersioningLifecycleConfig(bucketName string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { - bucket = %[1]q - - versioning { - enabled = false - } - - lifecycle_rule { - id = "id1" - prefix = "path1/" - enabled = true - - noncurrent_version_expiration { - days = 365 - } - - noncurrent_version_transition { - days = 30 - storage_class = "STANDARD_IA" - } - - noncurrent_version_transition { - days = 60 - storage_class = "GLACIER" - } - } - - lifecycle_rule { - id = "id2" - prefix = "path2/" - enabled = false - - noncurrent_version_expiration { - days = 365 - } - } - - lifecycle_rule { - id = "id3" - prefix = "path3/" - enabled = true - - noncurrent_version_transition { - days = 0 - storage_class = "GLACIER" - } - } - - lifecycle { - ignore_changes = [ - grant, - ] - } -} - -resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.bucket.id - acl = "private" -} -`, bucketName) -} - -func testAccBucketLifecycleRuleExpirationEmptyConfigurationBlockConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { - bucket = %[1]q - - lifecycle_rule { - enabled = true - id = "id1" - - expiration {} - } -} -`, rName) -} - -func testAccBucketLifecycleRuleAbortIncompleteMultipartUploadDaysConfig(rName string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { - bucket = %[1]q - - lifecycle_rule { - abort_incomplete_multipart_upload_days = 7 - enabled = true - id = "id1" - } -} -`, rName) -} - func testAccObjectLockEnabledNoDefaultRetention(bucketName string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "arbitrary" { diff --git a/internal/service/s3/validate.go b/internal/service/s3/validate.go deleted file mode 100644 index 50185c826f9..00000000000 --- a/internal/service/s3/validate.go +++ /dev/null @@ -1,17 +0,0 @@ -package s3 - -import ( - "fmt" - "time" -) - -func validBucketLifecycleTimestamp(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - _, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", value)) - if err != nil { - errors = append(errors, fmt.Errorf( - "%q cannot be parsed as RFC3339 Timestamp Format", value)) - } - - return -} diff --git a/internal/service/s3/validate_test.go b/internal/service/s3/validate_test.go deleted file mode 100644 index 847122d2b91..00000000000 --- a/internal/service/s3/validate_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package s3 - -import ( - "testing" -) - -func TestValidBucketLifecycleTimestamp(t *testing.T) { - validDates := []string{ - "2016-01-01", - "2006-01-02", - } - - for _, v := range validDates { - _, errors := validBucketLifecycleTimestamp(v, "date") - if len(errors) != 0 { - t.Fatalf("%q should be valid date: %q", v, errors) - } - } - - invalidDates := []string{ - "Jan 01 2016", - "20160101", - } - - for _, v := range invalidDates { - _, errors := validBucketLifecycleTimestamp(v, "date") - if len(errors) == 0 { - t.Fatalf("%q should be invalid date", v) - } - } -} diff --git a/website/docs/guides/version-4-upgrade.html.md b/website/docs/guides/version-4-upgrade.html.md index ab43c6dbd12..d45ba0b18ca 100644 --- a/website/docs/guides/version-4-upgrade.html.md +++ b/website/docs/guides/version-4-upgrade.html.md @@ -810,6 +810,130 @@ The resources that were imported are shown above. These resources are now in your Terraform state and will henceforth be managed by Terraform. ``` +### `lifecycle_rule` Argument deprecation + +Switch your Terraform configuration to the `aws_s3_bucket_lifecycle_configuration` resource instead. + +For example, given this previous configuration: + +```terraform +resource "aws_s3_bucket" "example" { + # ... other configuration ... + lifecycle_rule { + id = "log" + enabled = true + prefix = "log/" + tags = { + rule = "log" + autoclean = "true" + } + transition { + days = 30 + storage_class = "STANDARD_IA" + } + transition { + days = 60 + storage_class = "GLACIER" + } + expiration { + days = 90 + } + } + + lifecycle_rule { + id = "tmp" + prefix = "tmp/" + enabled = true + expiration { + date = "2022-12-31" + } + } +} +``` + +It will receive the following error after upgrading: + +``` +│ Error: Value for unconfigurable attribute +│ +│ with aws_s3_bucket.example, +│ on main.tf line 1, in resource "aws_s3_bucket" "example": +│ 1: resource "aws_s3_bucket" "example" { +│ +│ Can't configure a value for "lifecycle_rule": its value will be decided automatically based on the result of applying this configuration. +``` + +Since the `lifecycle_rule` argument changed to read-only, the recommendation is to update the configuration to use the `aws_s3_bucket_lifecycle_configuration` +resource and remove any references to `lifecycle_rule` and its nested arguments in the `aws_s3_bucket` resource: + +```terraform +resource "aws_s3_bucket" "example" { + # ... other configuration ... +} + +resource "aws_s3_bucket_lifecycle_configuration" "example" { + bucket = aws_s3_bucket.example.id + + rule { + id = "log" + status = "Enabled" + + filter { + and { + prefix = "log/" + tags = { + rule = "log" + autoclean = "true" + } + } + } + + transition { + days = 30 + storage_class = "STANDARD_IA" + } + + transition { + days = 60 + storage_class = "GLACIER" + } + + expiration { + days = 90 + } + } + + rule { + id = "tmp" + + filter { + prefix = "tmp/" + } + + expiration { + date = "2022-12-31T00:00:00Z" + } + + status = "Enabled" + } +} +``` + +It is then recommended running `terraform import` on each new resource to prevent data loss, e.g. + +```shell +$ terraform import aws_s3_bucket_lifecycle_configuration.example example +aws_s3_bucket_lifecycle_configuration.example: Importing from ID "example"... +aws_s3_bucket_lifecycle_configuration.example: Import prepared! + Prepared aws_s3_bucket_lifecycle_configuration for import +aws_s3_bucket_lifecycle_configuration.example: Refreshing state... [id=example] + +Import successful! + +The resources that were imported are shown above. These resources are now in +your Terraform state and will henceforth be managed by Terraform. +``` + ### `logging` Argument deprecation Switch your Terraform configuration to the `aws_s3_bucket_logging` resource instead. diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index 697b4c4a5ce..5e0f47cd3e7 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -66,84 +66,8 @@ See the [`aws_s3_bucket_logging` resource](s3_bucket_logging.html.markdown) for ### Using object lifecycle -```terraform -resource "aws_s3_bucket" "bucket" { - bucket = "my-bucket" - - lifecycle_rule { - id = "log" - enabled = true - - prefix = "log/" - - tags = { - rule = "log" - autoclean = "true" - } - - transition { - days = 30 - storage_class = "STANDARD_IA" # or "ONEZONE_IA" - } - - transition { - days = 60 - storage_class = "GLACIER" - } - - expiration { - days = 90 - } - } - - lifecycle_rule { - id = "tmp" - prefix = "tmp/" - enabled = true - - expiration { - date = "2016-01-12" - } - } -} - -resource "aws_s3_bucket_acl" "bucket_acl" { - bucket = aws_s3_bucket.bucket.id - acl = "private" -} - -resource "aws_s3_bucket" "versioning_bucket" { - bucket = "my-versioning-bucket" - - versioning { - enabled = true - } - - lifecycle_rule { - prefix = "config/" - enabled = true - - noncurrent_version_transition { - days = 30 - storage_class = "STANDARD_IA" - } - - noncurrent_version_transition { - days = 60 - storage_class = "GLACIER" - } - - noncurrent_version_expiration { - days = 90 - } - } -} - -resource "aws_s3_bucket_acl" "versioning_bucket_acl" { - bucket = aws_s3_bucket.versioning_bucket.id - acl = "private" -} -``` +The `lifecycle_rule` argument is read-only as of version 4.0 of the Terraform AWS Provider. +See the [`aws_s3_bucket_lifecycle_configuration` resource](s3_bucket_lifecycle_configuration.html.markdown) for configuration details. ### Using replication configuration @@ -188,7 +112,6 @@ The following arguments are supported: * `tags` - (Optional) A map of tags to assign to the bucket. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `force_destroy` - (Optional, Default:`false`) A boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket so that the bucket can be destroyed without error. These objects are *not* recoverable. * `versioning` - (Optional) A state of [versioning](https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) (documented below) -* `lifecycle_rule` - (Optional) A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) (documented below). * `object_lock_configuration` - (Optional) A configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) (documented below) The `versioning` object supports the following: @@ -196,41 +119,6 @@ The `versioning` object supports the following: * `enabled` - (Optional) Enable versioning. Once you version-enable a bucket, it can never return to an unversioned state. You can, however, suspend versioning on that bucket. * `mfa_delete` - (Optional) Enable MFA delete for either `Change the versioning state of your bucket` or `Permanently delete an object version`. Default is `false`. This cannot be used to toggle this setting but is available to allow managed buckets to reflect the state in AWS -The `lifecycle_rule` object supports the following: - -* `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. -* `prefix` - (Optional) Object key prefix identifying one or more objects to which the rule applies. -* `tags` - (Optional) Specifies object tags key and value. -* `enabled` - (Required) Specifies lifecycle rule status. -* `abort_incomplete_multipart_upload_days` (Optional) Specifies the number of days after initiating a multipart upload when the multipart upload must be completed. -* `expiration` - (Optional) Specifies a period in the object's expire (documented below). -* `transition` - (Optional) Specifies a period in the object's transitions (documented below). -* `noncurrent_version_expiration` - (Optional) Specifies when noncurrent object versions expire (documented below). -* `noncurrent_version_transition` - (Optional) Specifies when noncurrent object versions transitions (documented below). - -At least one of `abort_incomplete_multipart_upload_days`, `expiration`, `transition`, `noncurrent_version_expiration`, `noncurrent_version_transition` must be specified. - -The `expiration` object supports the following - -* `date` (Optional) Specifies the date after which you want the corresponding action to take effect. -* `days` (Optional) Specifies the number of days after object creation when the specific rule action takes effect. -* `expired_object_delete_marker` (Optional) On a versioned bucket (versioning-enabled or versioning-suspended bucket), you can add this element in the lifecycle configuration to direct Amazon S3 to delete expired object delete markers. This cannot be specified with Days or Date in a Lifecycle Expiration Policy. - -The `transition` object supports the following - -* `date` (Optional) Specifies the date after which you want the corresponding action to take effect. -* `days` (Optional) Specifies the number of days after object creation when the specific rule action takes effect. -* `storage_class` (Required) Specifies the Amazon S3 [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Transition.html#AmazonS3-Type-Transition-StorageClass) to which you want the object to transition. - -The `noncurrent_version_expiration` object supports the following - -* `days` (Required) Specifies the number of days noncurrent object versions expire. - -The `noncurrent_version_transition` object supports the following - -* `days` (Required) Specifies the number of days noncurrent object versions transition. -* `storage_class` (Required) Specifies the Amazon S3 [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Transition.html#AmazonS3-Type-Transition-StorageClass) to which you want the object to transition. - The `grant` object supports the following: * `id` - (optional) Canonical user id to grant for. Used only when `type` is `CanonicalUser`. @@ -275,6 +163,25 @@ In addition to all arguments above, the following attributes are exported: * `expose_headers` - Set of headers in the response that customers are able to access from their applications. * `max_age_seconds` The time in seconds that browser can cache the response for a preflight request. * `hosted_zone_id` - The [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region. +* `lifecycle_rule` - A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). + * `id` - Unique identifier for the rule. + * `prefix` - Object key prefix identifying one or more objects to which the rule applies. + * `tags` - Object tags key and value. + * `enabled` - Lifecycle rule status. + * `abort_incomplete_multipart_upload_days` - Number of days after initiating a multipart upload when the multipart upload must be completed. + * `expiration` - The expiration for the lifecycle of the object in the form of date, days and, whether the object has a delete marker. + * `date` - Indicates at what date the object is to be moved or deleted. + * `days` - Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer. + * `expired_object_delete_marker` - Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. + * `transition` - Specifies when an Amazon S3 object transitions to a specified storage class. + * `date` - The date after which you want the corresponding action to take effect. + * `days` - The number of days after object creation when the specific rule action takes effect. + * `storage_class` - The Amazon S3 [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Transition.html#AmazonS3-Type-Transition-StorageClass) an object will transition to. + * `noncurrent_version_expiration` - When noncurrent object versions expire. + * `days` - The number of days noncurrent object versions expire. + * `noncurrent_version_transition` - When noncurrent object versions transition. + * `days` - The number of days noncurrent object versions transition. + * `storage_class` - The Amazon S3 [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Transition.html#AmazonS3-Type-Transition-StorageClass) an object will transition to. * `logging` - The [logging parameters](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) for the bucket. * `target_bucket` - The name of the bucket that receives the log objects. * `target_prefix` - The prefix for all log object keys/