diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index d0c9fb82917..231d4d46400 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -7,7 +7,6 @@ import ( "log" "net/http" "net/url" - "regexp" "strings" "time" @@ -257,108 +256,104 @@ func ResourceBucket() *schema.Resource { "lifecycle_rule": { Type: schema.TypeList, + Optional: true, Computed: true, Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "id": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringLenBetween(0, 255), }, "prefix": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeString, + Optional: true, }, - "tags": tftags.TagsSchemaComputedDeprecated("Use the aws_s3_bucket_lifecycle_configuration resource instead"), + "tags": tftags.TagsSchema(), "enabled": { - Type: schema.TypeBool, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeBool, + Required: true, }, "abort_incomplete_multipart_upload_days": { - Type: schema.TypeInt, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeInt, + Optional: true, }, "expiration": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeList, + Optional: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "date": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validBucketLifecycleTimestamp, }, "days": { - Type: schema.TypeInt, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), }, "expired_object_delete_marker": { - Type: schema.TypeBool, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeBool, + Optional: true, }, }, }, }, "noncurrent_version_expiration": { - Type: schema.TypeList, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeList, + MaxItems: 1, + Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "days": { - Type: schema.TypeInt, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(1), }, }, }, }, "transition": { - Type: schema.TypeSet, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeSet, + Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "date": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeString, + Optional: true, + ValidateFunc: validBucketLifecycleTimestamp, }, "days": { - Type: schema.TypeInt, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), }, "storage_class": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.TransitionStorageClass_Values(), false), }, }, }, }, "noncurrent_version_transition": { - Type: schema.TypeSet, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeSet, + Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "days": { - Type: schema.TypeInt, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), }, "storage_class": { - Type: schema.TypeString, - Computed: true, - Deprecated: "Use the aws_s3_bucket_lifecycle_configuration resource instead", + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.TransitionStorageClass_Values(), false), }, }, }, @@ -784,6 +779,12 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } } + if d.HasChange("lifecycle_rule") { + if err := resourceBucketInternalLifecycleUpdate(conn, d); err != nil { + return fmt.Errorf("error updating S3 Bucket (%s) Lifecycle Rules: %w", d.Id(), err) + } + } + if d.HasChange("object_lock_configuration") { if err := resourceBucketInternalObjectLockConfigurationUpdate(conn, d); err != nil { return fmt.Errorf("error updating S3 Bucket (%s) Object Lock configuration: %w", d.Id(), err) @@ -1375,40 +1376,6 @@ func BucketRegionalDomainName(bucket string, region string) (string, error) { return fmt.Sprintf("%s.%s", bucket, strings.TrimPrefix(endpoint.URL, "https://")), nil } -// ValidBucketName validates any S3 bucket name that is not inside the us-east-1 region. -// Buckets outside of this region have to be DNS-compliant. After the same restrictions are -// applied to buckets in the us-east-1 region, this function can be refactored as a SchemaValidateFunc -func ValidBucketName(value string, region string) error { - if region != endpoints.UsEast1RegionID { - if (len(value) < 3) || (len(value) > 63) { - return fmt.Errorf("%q must contain from 3 to 63 characters", value) - } - if !regexp.MustCompile(`^[0-9a-z-.]+$`).MatchString(value) { - return fmt.Errorf("only lowercase alphanumeric characters and hyphens allowed in %q", value) - } - if regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`).MatchString(value) { - return fmt.Errorf("%q must not be formatted as an IP address", value) - } - if strings.HasPrefix(value, `.`) { - return fmt.Errorf("%q cannot start with a period", value) - } - if strings.HasSuffix(value, `.`) { - return fmt.Errorf("%q cannot end with a period", value) - } - if strings.Contains(value, `..`) { - return fmt.Errorf("%q can be only one period between labels", value) - } - } else { - if len(value) > 255 { - return fmt.Errorf("%q must contain less than 256 characters", value) - } - if !regexp.MustCompile(`^[0-9a-zA-Z-._]+$`).MatchString(value) { - return fmt.Errorf("only alphanumeric characters, hyphens, periods, and underscores allowed in %q", value) - } - } - return nil -} - type S3Website struct { Endpoint, Domain string } @@ -1636,6 +1603,162 @@ func resourceBucketInternalGrantsUpdate(conn *s3.S3, d *schema.ResourceData) err return err } +func resourceBucketInternalLifecycleUpdate(conn *s3.S3, d *schema.ResourceData) error { + lifecycleRules := d.Get("lifecycle_rule").([]interface{}) + + if len(lifecycleRules) == 0 || lifecycleRules[0] == nil { + input := &s3.DeleteBucketLifecycleInput{ + Bucket: aws.String(d.Id()), + } + + _, err := conn.DeleteBucketLifecycle(input) + + if err != nil { + return fmt.Errorf("error removing S3 Bucket (%s) lifecycle: %w", d.Id(), err) + } + + return nil + } + + rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules)) + + for i, lifecycleRule := range lifecycleRules { + r := lifecycleRule.(map[string]interface{}) + + rule := &s3.LifecycleRule{} + + // Filter + tags := Tags(tftags.New(r["tags"]).IgnoreAWS()) + filter := &s3.LifecycleRuleFilter{} + if len(tags) > 0 { + lifecycleRuleAndOp := &s3.LifecycleRuleAndOperator{} + lifecycleRuleAndOp.SetPrefix(r["prefix"].(string)) + lifecycleRuleAndOp.SetTags(tags) + filter.SetAnd(lifecycleRuleAndOp) + } else { + filter.SetPrefix(r["prefix"].(string)) + } + rule.SetFilter(filter) + + // ID + if val, ok := r["id"].(string); ok && val != "" { + rule.ID = aws.String(val) + } else { + rule.ID = aws.String(resource.PrefixedUniqueId("tf-s3-lifecycle-")) + } + + // Enabled + if val, ok := r["enabled"].(bool); ok && val { + rule.Status = aws.String(s3.ExpirationStatusEnabled) + } else { + rule.Status = aws.String(s3.ExpirationStatusDisabled) + } + + // AbortIncompleteMultipartUpload + if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 { + rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{ + DaysAfterInitiation: aws.Int64(int64(val)), + } + } + + // Expiration + expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).([]interface{}) + if len(expiration) > 0 && expiration[0] != nil { + e := expiration[0].(map[string]interface{}) + i := &s3.LifecycleExpiration{} + if val, ok := e["date"].(string); ok && val != "" { + t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) + if err != nil { + return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) + } + i.Date = aws.Time(t) + } else if val, ok := e["days"].(int); ok && val > 0 { + i.Days = aws.Int64(int64(val)) + } else if val, ok := e["expired_object_delete_marker"].(bool); ok { + i.ExpiredObjectDeleteMarker = aws.Bool(val) + } + rule.Expiration = i + } + + // NoncurrentVersionExpiration + nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).([]interface{}) + if len(nc_expiration) > 0 && nc_expiration[0] != nil { + e := nc_expiration[0].(map[string]interface{}) + + if val, ok := e["days"].(int); ok && val > 0 { + rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{ + NoncurrentDays: aws.Int64(int64(val)), + } + } + } + + // Transitions + transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.transition", i)).(*schema.Set).List() + if len(transitions) > 0 { + rule.Transitions = make([]*s3.Transition, 0, len(transitions)) + for _, transition := range transitions { + transition := transition.(map[string]interface{}) + i := &s3.Transition{} + if val, ok := transition["date"].(string); ok && val != "" { + t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) + if err != nil { + return fmt.Errorf("Error Parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) + } + i.Date = aws.Time(t) + } else if val, ok := transition["days"].(int); ok && val >= 0 { + i.Days = aws.Int64(int64(val)) + } + if val, ok := transition["storage_class"].(string); ok && val != "" { + i.StorageClass = aws.String(val) + } + + rule.Transitions = append(rule.Transitions, i) + } + } + // NoncurrentVersionTransitions + nc_transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_transition", i)).(*schema.Set).List() + if len(nc_transitions) > 0 { + rule.NoncurrentVersionTransitions = make([]*s3.NoncurrentVersionTransition, 0, len(nc_transitions)) + for _, transition := range nc_transitions { + transition := transition.(map[string]interface{}) + i := &s3.NoncurrentVersionTransition{} + if val, ok := transition["days"].(int); ok && val >= 0 { + i.NoncurrentDays = aws.Int64(int64(val)) + } + if val, ok := transition["storage_class"].(string); ok && val != "" { + i.StorageClass = aws.String(val) + } + + rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, i) + } + } + + // As a lifecycle rule requires 1 or more transition/expiration actions, + // we explicitly pass a default ExpiredObjectDeleteMarker value to be able to create + // the rule while keeping the policy unaffected if the conditions are not met. + if rule.Expiration == nil && rule.NoncurrentVersionExpiration == nil && + rule.Transitions == nil && rule.NoncurrentVersionTransitions == nil && + rule.AbortIncompleteMultipartUpload == nil { + rule.Expiration = &s3.LifecycleExpiration{ExpiredObjectDeleteMarker: aws.Bool(false)} + } + + rules = append(rules, rule) + } + + input := &s3.PutBucketLifecycleConfigurationInput{ + Bucket: aws.String(d.Id()), + LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ + Rules: rules, + }, + } + + _, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.PutBucketLifecycleConfiguration(input) + }) + + return err +} + func resourceBucketInternalObjectLockConfigurationUpdate(conn *s3.S3, d *schema.ResourceData) error { // S3 Object Lock configuration cannot be deleted, only updated. req := &s3.PutObjectLockConfigurationInput{ diff --git a/internal/service/s3/bucket_acl_test.go b/internal/service/s3/bucket_acl_test.go index 2a07172d9a5..e3825bceebd 100644 --- a/internal/service/s3/bucket_acl_test.go +++ b/internal/service/s3/bucket_acl_test.go @@ -305,7 +305,7 @@ func TestAccS3BucketAcl_disappears(t *testing.T) { func TestAccS3BucketAcl_migrate_aclNoChange(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - bucketResourceName := "aws_s3_bucket.bucket" + bucketResourceName := "aws_s3_bucket.test" resourceName := "aws_s3_bucket_acl.test" resource.ParallelTest(t, resource.TestCase{ @@ -334,7 +334,7 @@ func TestAccS3BucketAcl_migrate_aclNoChange(t *testing.T) { func TestAccS3BucketAcl_migrate_aclWithChange(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - bucketResourceName := "aws_s3_bucket.bucket" + bucketResourceName := "aws_s3_bucket.test" resourceName := "aws_s3_bucket_acl.test" resource.ParallelTest(t, resource.TestCase{ @@ -363,7 +363,7 @@ func TestAccS3BucketAcl_migrate_aclWithChange(t *testing.T) { func TestAccS3BucketAcl_migrate_grantsNoChange(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - bucketResourceName := "aws_s3_bucket.bucket" + bucketResourceName := "aws_s3_bucket.test" resourceName := "aws_s3_bucket_acl.test" resource.ParallelTest(t, resource.TestCase{ @@ -412,7 +412,7 @@ func TestAccS3BucketAcl_migrate_grantsNoChange(t *testing.T) { func TestAccS3BucketAcl_migrate_grantsWithChange(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - bucketResourceName := "aws_s3_bucket.bucket" + bucketResourceName := "aws_s3_bucket.test" resourceName := "aws_s3_bucket_acl.test" resource.ParallelTest(t, resource.TestCase{ @@ -755,12 +755,12 @@ resource "aws_s3_bucket_acl" "test" { func testAccBucketAcl_Migrate_AclConfig(rName, acl string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q } resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.bucket.id + bucket = aws_s3_bucket.test.id acl = %[2]q } `, rName, acl) @@ -770,12 +770,12 @@ func testAccBucketAcl_Migrate_GrantsNoChangeConfig(rName string) string { return fmt.Sprintf(` data "aws_canonical_user_id" "current" {} -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q } resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.bucket.id + bucket = aws_s3_bucket.test.id access_control_policy { grant { grantee { @@ -807,12 +807,12 @@ data "aws_canonical_user_id" "current" {} data "aws_partition" "current" {} -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q } resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.bucket.id + bucket = aws_s3_bucket.test.id access_control_policy { grant { grantee { diff --git a/internal/service/s3/bucket_lifecycle_configuration_test.go b/internal/service/s3/bucket_lifecycle_configuration_test.go index c7633061ce2..1d75237e10a 100644 --- a/internal/service/s3/bucket_lifecycle_configuration_test.go +++ b/internal/service/s3/bucket_lifecycle_configuration_test.go @@ -842,6 +842,89 @@ func TestAccS3BucketLifecycleConfiguration_EmptyFilter_NonCurrentVersions(t *tes }, }) } +func TestAccS3BucketLifecycleConfiguration_migrate_noChange(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_lifecycle_configuration.test" + bucketResourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withLifecycleExpireMarker(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.id", "id1"), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.enabled", "true"), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.prefix", "path1/"), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.expiration.0.days", "0"), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.expiration.0.date", ""), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.expiration.0.expired_object_delete_marker", "true"), + ), + }, + { + Config: testAccBucketLifecycleConfiguration_Migrate_NoChangeConfig(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketLifecycleConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "bucket", bucketResourceName, "bucket"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.id", "id1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.status", "Enabled"), + resource.TestCheckResourceAttr(resourceName, "rule.0.prefix", "path1/"), + resource.TestCheckResourceAttr(resourceName, "rule.0.expiration.0.days", "0"), + resource.TestCheckResourceAttr(resourceName, "rule.0.expiration.0.date", ""), + resource.TestCheckResourceAttr(resourceName, "rule.0.expiration.0.expired_object_delete_marker", "true"), + ), + }, + }, + }) +} + +func TestAccS3BucketLifecycleConfiguration_migrate_withChange(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_lifecycle_configuration.test" + bucketResourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withLifecycleExpireMarker(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketExists(bucketResourceName), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.#", "1"), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.id", "id1"), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.enabled", "true"), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.prefix", "path1/"), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.expiration.0.days", "0"), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.expiration.0.date", ""), + resource.TestCheckResourceAttr(bucketResourceName, "lifecycle_rule.0.expiration.0.expired_object_delete_marker", "true"), + ), + }, + { + Config: testAccBucketLifecycleConfiguration_Migrate_WithChangeConfig(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckBucketLifecycleConfigurationExists(resourceName), + resource.TestCheckResourceAttrPair(resourceName, "bucket", bucketResourceName, "bucket"), + resource.TestCheckResourceAttr(resourceName, "rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.id", "id1"), + resource.TestCheckResourceAttr(resourceName, "rule.0.status", "Disabled"), + resource.TestCheckResourceAttr(resourceName, "rule.0.prefix", "path1/"), + resource.TestCheckResourceAttr(resourceName, "rule.0.expiration.0.days", "0"), + resource.TestCheckResourceAttr(resourceName, "rule.0.expiration.0.date", ""), + resource.TestCheckResourceAttr(resourceName, "rule.0.expiration.0.expired_object_delete_marker", "false"), + ), + }, + }, + }) +} // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/23884 func TestAccS3BucketLifecycleConfiguration_Update_filterWithAndToFilterWithPrefix(t *testing.T) { @@ -1683,3 +1766,57 @@ resource "aws_s3_bucket_lifecycle_configuration" "test" { } }`, rName, prefix) } + +func testAccBucketLifecycleConfiguration_Migrate_NoChangeConfig(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_bucket_acl" "test" { + bucket = aws_s3_bucket.test.id + acl = "private" +} + +resource "aws_s3_bucket_lifecycle_configuration" "test" { + bucket = aws_s3_bucket.test.bucket + + rule { + id = "id1" + prefix = "path1/" + status = "Enabled" + + expiration { + expired_object_delete_marker = true + } + } +} +`, rName) +} + +func testAccBucketLifecycleConfiguration_Migrate_WithChangeConfig(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_bucket_acl" "test" { + bucket = aws_s3_bucket.test.id + acl = "private" +} + +resource "aws_s3_bucket_lifecycle_configuration" "test" { + bucket = aws_s3_bucket.test.bucket + + rule { + id = "id1" + prefix = "path1/" + status = "Disabled" + + expiration { + expired_object_delete_marker = false + } + } +} +`, rName) +} diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index 7c9ca3171e4..e8703a97a24 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -43,7 +43,7 @@ func TestAccS3Bucket_Basic_basic(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") region := acctest.Region() hostedZoneID, _ := tfs3.HostedZoneIDForRegion(region) - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -159,7 +159,7 @@ func TestAccS3Bucket_Basic_namePrefix(t *testing.T) { } func TestAccS3Bucket_Basic_forceDestroy(t *testing.T) { - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") resource.ParallelTest(t, resource.TestCase{ @@ -185,7 +185,7 @@ func TestAccS3Bucket_Basic_forceDestroy(t *testing.T) { // to not contain these extra slashes, out-of-band handling and other AWS // services may create keys with extra slashes (empty "directory" prefixes). func TestAccS3Bucket_Basic_forceDestroyWithEmptyPrefixes(t *testing.T) { - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") resource.ParallelTest(t, resource.TestCase{ @@ -206,7 +206,7 @@ func TestAccS3Bucket_Basic_forceDestroyWithEmptyPrefixes(t *testing.T) { } func TestAccS3Bucket_Basic_forceDestroyWithObjectLockEnabled(t *testing.T) { - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") resource.ParallelTest(t, resource.TestCase{ @@ -268,7 +268,7 @@ func TestAccS3Bucket_Basic_acceleration(t *testing.T) { // See https://github.com/hashicorp/terraform/pull/2925 func TestAccS3Bucket_disappears(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -312,7 +312,7 @@ func TestAccS3Bucket_Tags_basic(t *testing.T) { } func TestAccS3Bucket_Tags_withNoSystemTags(t *testing.T) { - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") resource.ParallelTest(t, resource.TestCase{ @@ -371,7 +371,7 @@ func TestAccS3Bucket_Tags_withNoSystemTags(t *testing.T) { } func TestAccS3Bucket_Tags_withSystemTags(t *testing.T) { - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") var stackID string @@ -456,7 +456,7 @@ func TestAccS3Bucket_Tags_withSystemTags(t *testing.T) { } func TestAccS3Bucket_Tags_ignoreTags(t *testing.T) { - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" bucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ @@ -500,6 +500,211 @@ func TestAccS3Bucket_Tags_ignoreTags(t *testing.T) { }) } +func TestAccS3Bucket_Manage_lifecycleBasic(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withLifecycle(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.#", "6"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.id", "id1"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.prefix", "path1/"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.days", "365"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.date", ""), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.expired_object_delete_marker", "false"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.transition.*", map[string]string{ + "date": "", + "days": "30", + "storage_class": "STANDARD_IA", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.transition.*", map[string]string{ + "date": "", + "days": "60", + "storage_class": "INTELLIGENT_TIERING", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.transition.*", map[string]string{ + "date": "", + "days": "90", + "storage_class": "ONEZONE_IA", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.transition.*", map[string]string{ + "date": "", + "days": "120", + "storage_class": "GLACIER", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.0.transition.*", map[string]string{ + "date": "", + "days": "210", + "storage_class": "DEEP_ARCHIVE", + }), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.id", "id2"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.prefix", "path2/"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.expiration.0.date", "2016-01-12"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.expiration.0.days", "0"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.1.expiration.0.expired_object_delete_marker", "false"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.2.id", "id3"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.2.prefix", "path3/"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.2.transition.*", map[string]string{ + "days": "0", + }), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.3.id", "id4"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.3.prefix", "path4/"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.3.tags.tagKey", "tagValue"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.3.tags.terraform", "hashicorp"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.4.id", "id5"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.4.tags.tagKey", "tagValue"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.4.tags.terraform", "hashicorp"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.4.transition.*", map[string]string{ + "days": "0", + "storage_class": "GLACIER", + }), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.5.id", "id6"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.5.tags.tagKey", "tagValue"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "lifecycle_rule.5.transition.*", map[string]string{ + "days": "0", + "storage_class": "GLACIER", + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + { + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + ), + }, + }, + }) +} + +func TestAccS3Bucket_Manage_lifecycleExpireMarkerOnly(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withLifecycleExpireMarker(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.id", "id1"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.prefix", "path1/"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.days", "0"), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.date", ""), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.0.expiration.0.expired_object_delete_marker", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, + }, + { + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + ), + }, + }, + }) +} + +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/11420 +func TestAccS3Bucket_Manage_lifecycleRuleExpirationEmptyBlock(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withLifecycleRuleExpirationEmptyConfigurationBlock(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + ), + }, + }, + }) +} + +// Reference: https://github.com/hashicorp/terraform-provider-aws/issues/15138 +func TestAccS3Bucket_Manage_lifecycleRuleAbortIncompleteMultipartUploadDaysNoExpiration(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withLifecycleRuleAbortIncompleteMultipartUploadDays(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccS3Bucket_Manage_lifecycleRemove(t *testing.T) { + bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + resourceName := "aws_s3_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckBucketDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketConfig_withLifecycle(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.#", "6"), + ), + }, + { + Config: testAccBucketConfig_Basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketExists(resourceName), + // As Lifecycle Rule is a Computed field, removing them from terraform will not + // trigger an update to remove them from the S3 bucket. + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.#", "6"), + ), + }, + }, + }) +} + func TestAccS3Bucket_Manage_objectLock(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") resourceName := "aws_s3_bucket.test" @@ -647,7 +852,7 @@ func TestAccS3Bucket_Manage_objectLockWithVersioning_deprecatedEnabled(t *testin func TestAccS3Bucket_Security_updateACL(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -681,7 +886,7 @@ func TestAccS3Bucket_Security_updateACL(t *testing.T) { func TestAccS3Bucket_Security_updateGrant(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -741,7 +946,7 @@ func TestAccS3Bucket_Security_updateGrant(t *testing.T) { func TestAccS3Bucket_Security_aclToGrant(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -771,7 +976,7 @@ func TestAccS3Bucket_Security_aclToGrant(t *testing.T) { func TestAccS3Bucket_Security_grantToACL(t *testing.T) { bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") - resourceName := "aws_s3_bucket.bucket" + resourceName := "aws_s3_bucket.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(t) }, @@ -1496,7 +1701,7 @@ func testAccCheckBucketCheckTags(n string, expectedTags map[string]string) resou func testAccBucketConfig_Basic(bucketName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q } `, bucketName) @@ -1513,7 +1718,7 @@ resource "aws_s3_bucket" "test" { func testAccBucketConfig_withACL(bucketName, acl string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q acl = %[2]q } @@ -1569,7 +1774,7 @@ func testAccBucketConfig_withGrants(bucketName string) string { return fmt.Sprintf(` data "aws_canonical_user_id" "current" {} -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q grant { @@ -1585,7 +1790,7 @@ func testAccBucketConfig_withUpdatedGrants(bucketName string) string { return fmt.Sprintf(` data "aws_canonical_user_id" "current" {} -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q grant { @@ -1603,15 +1808,172 @@ resource "aws_s3_bucket" "bucket" { `, bucketName) } +func testAccBucketConfig_withLifecycle(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "private" + + lifecycle_rule { + id = "id1" + prefix = "path1/" + enabled = true + + expiration { + days = 365 + } + + transition { + days = 30 + storage_class = "STANDARD_IA" + } + + transition { + days = 60 + storage_class = "INTELLIGENT_TIERING" + } + + transition { + days = 90 + storage_class = "ONEZONE_IA" + } + + transition { + days = 120 + storage_class = "GLACIER" + } + + transition { + days = 210 + storage_class = "DEEP_ARCHIVE" + } + } + + lifecycle_rule { + id = "id2" + prefix = "path2/" + enabled = true + + expiration { + date = "2016-01-12" + } + } + + lifecycle_rule { + id = "id3" + prefix = "path3/" + enabled = true + + transition { + days = 0 + storage_class = "GLACIER" + } + } + + lifecycle_rule { + id = "id4" + prefix = "path4/" + enabled = true + + tags = { + "tagKey" = "tagValue" + "terraform" = "hashicorp" + } + + expiration { + date = "2016-01-12" + } + } + + lifecycle_rule { + id = "id5" + enabled = true + + tags = { + "tagKey" = "tagValue" + "terraform" = "hashicorp" + } + + transition { + days = 0 + storage_class = "GLACIER" + } + } + + lifecycle_rule { + id = "id6" + enabled = true + + tags = { + "tagKey" = "tagValue" + } + + transition { + days = 0 + storage_class = "GLACIER" + } + } +} +`, bucketName) +} + +func testAccBucketConfig_withLifecycleExpireMarker(bucketName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "private" + + lifecycle_rule { + id = "id1" + prefix = "path1/" + enabled = true + + expiration { + expired_object_delete_marker = "true" + } + } +} +`, bucketName) +} + +func testAccBucketConfig_withLifecycleRuleExpirationEmptyConfigurationBlock(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + lifecycle_rule { + enabled = true + id = "id1" + + expiration {} + } +} +`, rName) +} + +func testAccBucketConfig_withLifecycleRuleAbortIncompleteMultipartUploadDays(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + + lifecycle_rule { + abort_incomplete_multipart_upload_days = 7 + enabled = true + id = "id1" + } +} +`, rName) +} + func testAccBucketConfig_withNoTags(bucketName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q force_destroy = false } resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.bucket.id + bucket = aws_s3_bucket.test.id acl = "private" } `, bucketName) @@ -1619,7 +1981,7 @@ resource "aws_s3_bucket_acl" "test" { func testAccBucketConfig_withTags(bucketName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q force_destroy = false @@ -1631,7 +1993,7 @@ resource "aws_s3_bucket" "bucket" { } resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.bucket.id + bucket = aws_s3_bucket.test.id acl = "private" } `, bucketName) @@ -1639,7 +2001,7 @@ resource "aws_s3_bucket_acl" "test" { func testAccBucketConfig_withUpdatedTags(bucketName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = %[1]q force_destroy = false @@ -1652,7 +2014,7 @@ resource "aws_s3_bucket" "bucket" { } resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.bucket.id + bucket = aws_s3_bucket.test.id acl = "private" } `, bucketName) @@ -1824,13 +2186,13 @@ resource "aws_s3_bucket_versioning" "test" { func testAccBucketConfig_forceDestroy(bucketName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = "%s" force_destroy = true } resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.bucket.id + bucket = aws_s3_bucket.test.id acl = "private" } `, bucketName) @@ -1838,7 +2200,7 @@ resource "aws_s3_bucket_acl" "test" { func testAccBucketConfig_forceDestroyWithObjectLockEnabled(bucketName string) string { return fmt.Sprintf(` -resource "aws_s3_bucket" "bucket" { +resource "aws_s3_bucket" "test" { bucket = "%s" force_destroy = true @@ -1846,12 +2208,12 @@ resource "aws_s3_bucket" "bucket" { } resource "aws_s3_bucket_acl" "test" { - bucket = aws_s3_bucket.bucket.id + bucket = aws_s3_bucket.test.id acl = "private" } resource "aws_s3_bucket_versioning" "bucket" { - bucket = aws_s3_bucket.bucket.id + bucket = aws_s3_bucket.test.id versioning_configuration { status = "Enabled" } diff --git a/internal/service/s3/validate.go b/internal/service/s3/validate.go new file mode 100644 index 00000000000..0c5ce27b36a --- /dev/null +++ b/internal/service/s3/validate.go @@ -0,0 +1,55 @@ +package s3 + +import ( + "fmt" + "regexp" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// ValidBucketName validates any S3 bucket name that is not inside the us-east-1 region. +// Buckets outside of this region have to be DNS-compliant. After the same restrictions are +// applied to buckets in the us-east-1 region, this function can be refactored as a SchemaValidateFunc +func ValidBucketName(value string, region string) error { + if region != endpoints.UsEast1RegionID { + if (len(value) < 3) || (len(value) > 63) { + return fmt.Errorf("%q must contain from 3 to 63 characters", value) + } + if !regexp.MustCompile(`^[0-9a-z-.]+$`).MatchString(value) { + return fmt.Errorf("only lowercase alphanumeric characters and hyphens allowed in %q", value) + } + if regexp.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`).MatchString(value) { + return fmt.Errorf("%q must not be formatted as an IP address", value) + } + if strings.HasPrefix(value, `.`) { + return fmt.Errorf("%q cannot start with a period", value) + } + if strings.HasSuffix(value, `.`) { + return fmt.Errorf("%q cannot end with a period", value) + } + if strings.Contains(value, `..`) { + return fmt.Errorf("%q can be only one period between labels", value) + } + } else { + if len(value) > 255 { + return fmt.Errorf("%q must contain less than 256 characters", value) + } + if !regexp.MustCompile(`^[0-9a-zA-Z-._]+$`).MatchString(value) { + return fmt.Errorf("only alphanumeric characters, hyphens, periods, and underscores allowed in %q", value) + } + } + return nil +} + +func validBucketLifecycleTimestamp(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + _, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", value)) + if err != nil { + errors = append(errors, fmt.Errorf( + "%q cannot be parsed as RFC3339 Timestamp Format", value)) + } + + return +} diff --git a/website/docs/r/s3_bucket.html.markdown b/website/docs/r/s3_bucket.html.markdown index f896105f4ce..3d0c3d5b2f1 100644 --- a/website/docs/r/s3_bucket.html.markdown +++ b/website/docs/r/s3_bucket.html.markdown @@ -28,6 +28,10 @@ Configuring with both will cause inconsistencies and may overwrite configuration or with the deprecated parameter `cors_rule` in the resource `aws_s3_bucket`. Configuring with both will cause inconsistencies and may overwrite configuration. +~> **NOTE on S3 Bucket Lifecycle Configuration:** S3 Bucket Lifecycle can be configured in either the standalone resource [`aws_s3_bucket_lifecycle_configuration`](s3_bucket_lifecycle_configuration.html) +or with the deprecated parameter `lifecycle_rule` in the resource `aws_s3_bucket`. +Configuring with both will cause inconsistencies and may overwrite configuration. + ## Example Usage ### Private Bucket w/ Tags @@ -85,8 +89,81 @@ See the [`aws_s3_bucket_logging` resource](s3_bucket_logging.html.markdown) for ### Using object lifecycle -The `lifecycle_rule` argument is read-only as of version 4.0 of the Terraform AWS Provider. -See the [`aws_s3_bucket_lifecycle_configuration` resource](s3_bucket_lifecycle_configuration.html.markdown) for configuration details. +### Using object lifecycle + +-> **NOTE:** The parameter `lifecycle_rule` is deprecated. +Use the resource [`aws_s3_bucket_lifecycle_configuration`](s3_bucket_lifecycle_configuration.html) instead. + +```terraform +resource "aws_s3_bucket" "bucket" { + bucket = "my-bucket" + acl = "private" + + lifecycle_rule { + id = "log" + enabled = true + + prefix = "log/" + + tags = { + rule = "log" + autoclean = "true" + } + + transition { + days = 30 + storage_class = "STANDARD_IA" # or "ONEZONE_IA" + } + + transition { + days = 60 + storage_class = "GLACIER" + } + + expiration { + days = 90 + } + } + + lifecycle_rule { + id = "tmp" + prefix = "tmp/" + enabled = true + + expiration { + date = "2016-01-12" + } + } +} + +resource "aws_s3_bucket" "versioning_bucket" { + bucket = "my-versioning-bucket" + acl = "private" + + versioning { + enabled = true + } + + lifecycle_rule { + prefix = "config/" + enabled = true + + noncurrent_version_transition { + days = 30 + storage_class = "STANDARD_IA" + } + + noncurrent_version_transition { + days = 60 + storage_class = "GLACIER" + } + + noncurrent_version_expiration { + days = 90 + } + } +} +``` ### Using object lock configuration @@ -142,6 +219,8 @@ The following arguments are supported: * `grant` - (Optional, **Deprecated**) An [ACL policy grant](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#sample-acl). See [Grant](#grant) below for details. Conflicts with `acl`. Terraform will only perform drift detection if a configuration value is provided. Use the resource [`aws_s3_bucket_acl`](s3_bucket_acl.html.markdown) instead. * `cors_rule` - (Optional, **Deprecated**) A rule of [Cross-Origin Resource Sharing](https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). See [CORS rule](#cors-rule) below for details. Terraform will only perform drift detection if a configuration value is provided. Use the resource [`aws_s3_bucket_cors_configuration`](s3_bucket_cors_configuration.html.markdown) instead. * `force_destroy` - (Optional, Default:`false`) A boolean that indicates all objects (including any [locked objects](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html)) should be deleted from the bucket so that the bucket can be destroyed without error. These objects are *not* recoverable. +* `lifecycle_rule` - (Optional, **Deprecated**) A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). See [Lifecycle Rule](#lifecycle-rule) below for details. Terraform will only perform drift detection if a configuration value is provided. + Use the resource [`aws_s3_bucket_lifecycle_configuration`](s3_bucket_lifecycle_configuration.html) instead. * `object_lock_enabled` - (Optional, Default:`false`, Forces new resource) Indicates whether this bucket has an Object Lock configuration enabled. * `object_lock_configuration` - (Optional) A configuration of [S3 object locking](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). See [Object Lock Configuration](#object-lock-configuration) below. * `tags` - (Optional) A map of tags to assign to the bucket. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -169,6 +248,53 @@ The `grant` configuration block supports the following arguments: * `permissions` - (Required) List of permissions to apply for grantee. Valid values are `READ`, `WRITE`, `READ_ACP`, `WRITE_ACP`, `FULL_CONTROL`. * `uri` - (Optional) Uri address to grant for. Used only when `type` is `Group`. +### Lifecycle Rule + +~> **NOTE:** Currently, changes to the `lifecycle_rule` configuration of _existing_ resources cannot be automatically detected by Terraform. To manage changes of Lifecycle rules to an S3 bucket, use the `aws_s3_bucket_lifecycle_configuration` resource instead. If you use `lifecycle_rule` on an `aws_s3_bucket`, Terraform will assume management over the full set of Lifecycle rules for the S3 bucket, treating additional Lifecycle rules as drift. For this reason, `lifecycle_rule` cannot be mixed with the external `aws_s3_bucket_lifecycle_configuration` resource for a given S3 bucket. + +~> **NOTE:** At least one of `abort_incomplete_multipart_upload_days`, `expiration`, `transition`, `noncurrent_version_expiration`, `noncurrent_version_transition` must be specified. + +The `lifecycle_rule` configuration block supports the following arguments: + +* `id` - (Optional) Unique identifier for the rule. Must be less than or equal to 255 characters in length. +* `prefix` - (Optional) Object key prefix identifying one or more objects to which the rule applies. +* `tags` - (Optional) Specifies object tags key and value. +* `enabled` - (Required) Specifies lifecycle rule status. +* `abort_incomplete_multipart_upload_days` (Optional) Specifies the number of days after initiating a multipart upload when the multipart upload must be completed. +* `expiration` - (Optional) Specifies a period in the object's expire. See [Expiration](#expiration) below for details. +* `transition` - (Optional) Specifies a period in the object's transitions. See [Transition](#transition) below for details. +* `noncurrent_version_expiration` - (Optional) Specifies when noncurrent object versions expire. See [Noncurrent Version Expiration](#noncurrent-version-expiration) below for details. +* `noncurrent_version_transition` - (Optional) Specifies when noncurrent object versions transitions. See [Noncurrent Version Transition](#noncurrent-version-transition) below for details. + +### Expiration + +The `expiration` configuration block supports the following arguments: + +* `date` - (Optional) Specifies the date after which you want the corresponding action to take effect. +* `days` - (Optional) Specifies the number of days after object creation when the specific rule action takes effect. +* `expired_object_delete_marker` - (Optional) On a versioned bucket (versioning-enabled or versioning-suspended bucket), you can add this element in the lifecycle configuration to direct Amazon S3 to delete expired object delete markers. This cannot be specified with Days or Date in a Lifecycle Expiration Policy. + +### Transition + +The `transition` configuration block supports the following arguments: + +* `date` - (Optional) Specifies the date after which you want the corresponding action to take effect. +* `days` - (Optional) Specifies the number of days after object creation when the specific rule action takes effect. +* `storage_class` - (Required) Specifies the Amazon S3 [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Transition.html#AmazonS3-Type-Transition-StorageClass) to which you want the object to transition. + +### Noncurrent Version Expiration + +The `noncurrent_version_expiration` configuration block supports the following arguments: + +* `days` - (Required) Specifies the number of days noncurrent object versions expire. + +### Noncurrent Version Transition + +The `noncurrent_version_transition` configuration supports the following arguments: + +* `days` - (Required) Specifies the number of days noncurrent object versions transition. +* `storage_class` - (Required) Specifies the Amazon S3 [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Transition.html#AmazonS3-Type-Transition-StorageClass) to which you want the object to transition. + ### Object Lock Configuration ~> **NOTE:** You can only **enable** S3 Object Lock for **new** buckets. If you need to **enable** S3 Object Lock for an **existing** bucket, please contact AWS Support. @@ -189,25 +315,6 @@ In addition to all arguments above, the following attributes are exported: * `bucket_domain_name` - The bucket domain name. Will be of format `bucketname.s3.amazonaws.com`. * `bucket_regional_domain_name` - The bucket region-specific domain name. The bucket domain name including the region name, please refer [here](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) for format. Note: The AWS CloudFront allows specifying S3 region-specific endpoint when creating S3 origin, it will prevent [redirect issues](https://forums.aws.amazon.com/thread.jspa?threadID=216814) from CloudFront to S3 Origin URL. * `hosted_zone_id` - The [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region. -* `lifecycle_rule` - A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). - * `id` - Unique identifier for the rule. - * `prefix` - Object key prefix identifying one or more objects to which the rule applies. - * `tags` - Object tags key and value. - * `enabled` - Lifecycle rule status. - * `abort_incomplete_multipart_upload_days` - Number of days after initiating a multipart upload when the multipart upload must be completed. - * `expiration` - The expiration for the lifecycle of the object in the form of date, days and, whether the object has a delete marker. - * `date` - Indicates at what date the object is to be moved or deleted. - * `days` - Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer. - * `expired_object_delete_marker` - Indicates whether Amazon S3 will remove a delete marker with no noncurrent versions. - * `transition` - Specifies when an Amazon S3 object transitions to a specified storage class. - * `date` - The date after which you want the corresponding action to take effect. - * `days` - The number of days after object creation when the specific rule action takes effect. - * `storage_class` - The Amazon S3 [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Transition.html#AmazonS3-Type-Transition-StorageClass) an object will transition to. - * `noncurrent_version_expiration` - When noncurrent object versions expire. - * `days` - The number of days noncurrent object versions expire. - * `noncurrent_version_transition` - When noncurrent object versions transition. - * `days` - The number of days noncurrent object versions transition. - * `storage_class` - The Amazon S3 [storage class](https://docs.aws.amazon.com/AmazonS3/latest/API/API_Transition.html#AmazonS3-Type-Transition-StorageClass) an object will transition to. * `logging` - The [logging parameters](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) for the bucket. * `target_bucket` - The name of the bucket that receives the log objects. * `target_prefix` - The prefix for all log object keys/