diff --git a/.changelog/34647.txt b/.changelog/34647.txt new file mode 100644 index 000000000000..c50a926504e0 --- /dev/null +++ b/.changelog/34647.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_s3_directory_bucket: Fix `NotImplemented: This bucket does not support Object Versioning` errors on resource Delete when `force_destroy` is `true` +``` diff --git a/go.mod b/go.mod index 6710c46b4b83..75c006bbd80a 100644 --- a/go.mod +++ b/go.mod @@ -95,7 +95,6 @@ require ( github.com/aws/aws-sdk-go-v2/service/vpclattice v1.5.1 github.com/aws/aws-sdk-go-v2/service/workspaces v1.35.1 github.com/aws/aws-sdk-go-v2/service/xray v1.23.1 - github.com/aws/smithy-go v1.18.1 github.com/beevik/etree v1.2.0 github.com/davecgh/go-spew v1.1.1 github.com/gertd/go-pluralize v0.2.1 @@ -158,6 +157,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.7 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.18.1 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.1 // indirect + github.com/aws/smithy-go v1.18.1 // indirect github.com/bgentry/speakeasy v0.1.0 // indirect github.com/boombuler/barcode v1.0.1 // indirect github.com/bufbuild/protocompile v0.6.0 // indirect diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index ac5184bff25f..37d4a0c4244a 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -23,7 +23,6 @@ import ( "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3manager" - smithy "github.com/aws/smithy-go" "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" tfawserr_sdkv2 "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -1442,25 +1441,13 @@ func findBucket(ctx context.Context, conn *s3_sdkv2.Client, bucket string, optFn _, err := conn.HeadBucket(ctx, input, optFns...) - if tfawserr_sdkv2.ErrHTTPStatusCodeEquals(err, http.StatusNotFound) || tfawserr_sdkv2.ErrCodeEquals(err, errCodeNoSuchBucket) { + if tfawserr_sdkv2.ErrHTTPStatusCodeEquals(err, http.StatusNotFound) || tfawserr_sdkv2.ErrCodeEquals(err, errCodeNoSuchBucket) || errs.Contains(err, errCodeNoSuchBucket) { return &retry.NotFoundError{ LastError: err, LastRequest: input, } } - // FIXME Move to aws-sdk-go-base - // FIXME &smithy.OperationError{ServiceID:"S3", OperationName:"HeadBucket", Err:(*errors.errorString)(0xc00202bb60)} - // FIXME "operation error S3: HeadBucket, get identity: get credentials: operation error S3: CreateSession, https response error StatusCode: 404, RequestID: 0033eada6b00018c17de82890509d9eada65ba39, HostID: F31dBn, NoSuchBucket:" - if operationErr, ok := errs.As[*smithy.OperationError](err); ok { - if strings.Contains(operationErr.Err.Error(), errCodeNoSuchBucket) { - return &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - } - return err } diff --git a/internal/service/s3/delete.go b/internal/service/s3/delete.go index a85a39d36f60..c82485b4feae 100644 --- a/internal/service/s3/delete.go +++ b/internal/service/s3/delete.go @@ -13,16 +13,13 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" ) -const ( - listObjectVersionsMaxKeys = 1000 -) - -// emptyBucket empties the specified S3 bucket by deleting all object versions and delete markers. +// emptyBucket empties the specified S3 general purpose bucket by deleting all object versions and delete markers. // If `force` is `true` then S3 Object Lock governance mode restrictions are bypassed and // an attempt is made to remove any S3 Object Lock legal holds. -// Returns the number of objects deleted. +// Returns the number of object versions and delete markers deleted. func emptyBucket(ctx context.Context, conn *s3.Client, bucket string, force bool) (int64, error) { nObjects, err := forEachObjectVersionsPage(ctx, conn, bucket, func(ctx context.Context, conn *s3.Client, bucket string, page *s3.ListObjectVersionsOutput) (int64, error) { return deletePageOfObjectVersions(ctx, conn, bucket, force, page) @@ -38,13 +35,18 @@ func emptyBucket(ctx context.Context, conn *s3.Client, bucket string, force bool return nObjects, err } +// emptyDirectoryBucket empties the specified S3 directory bucket by deleting all objects. +// Returns the number of objects deleted. +func emptyDirectoryBucket(ctx context.Context, conn *s3.Client, bucket string) (int64, error) { + return forEachObjectsPage(ctx, conn, bucket, deletePageOfObjects) +} + // forEachObjectVersionsPage calls the specified function for each page returned from the S3 ListObjectVersionsPages API. func forEachObjectVersionsPage(ctx context.Context, conn *s3.Client, bucket string, fn func(ctx context.Context, conn *s3.Client, bucket string, page *s3.ListObjectVersionsOutput) (int64, error)) (int64, error) { var nObjects int64 input := &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucket), - MaxKeys: aws.Int32(listObjectVersionsMaxKeys), + Bucket: aws.String(bucket), } var lastErr error @@ -72,21 +74,52 @@ func forEachObjectVersionsPage(ctx context.Context, conn *s3.Client, bucket stri return nObjects, nil } +// forEachObjectsPage calls the specified function for each page returned from the S3 ListObjectsV2 API. +func forEachObjectsPage(ctx context.Context, conn *s3.Client, bucket string, fn func(ctx context.Context, conn *s3.Client, bucket string, page *s3.ListObjectsV2Output) (int64, error)) (int64, error) { + var nObjects int64 + + input := &s3.ListObjectsV2Input{ + Bucket: aws.String(bucket), + } + var lastErr error + + pages := s3.NewListObjectsV2Paginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nObjects, fmt.Errorf("listing S3 bucket (%s) objects: %w", bucket, err) + } + + n, err := fn(ctx, conn, bucket, page) + nObjects += n + + if err != nil { + lastErr = err + break + } + } + + if lastErr != nil { + return nObjects, lastErr + } + + return nObjects, nil +} + // deletePageOfObjectVersions deletes a page (<= 1000) of S3 object versions. // If `force` is `true` then S3 Object Lock governance mode restrictions are bypassed and // an attempt is made to remove any S3 Object Lock legal holds. // Returns the number of objects deleted. func deletePageOfObjectVersions(ctx context.Context, conn *s3.Client, bucket string, force bool, page *s3.ListObjectVersionsOutput) (int64, error) { - var nObjects int64 - - toDelete := make([]types.ObjectIdentifier, 0, len(page.Versions)) - for _, v := range page.Versions { - toDelete = append(toDelete, types.ObjectIdentifier{ + toDelete := tfslices.ApplyToAll(page.Versions, func(v types.ObjectVersion) types.ObjectIdentifier { + return types.ObjectIdentifier{ Key: v.Key, VersionId: v.VersionId, - }) - } + } + }) + var nObjects int64 if nObjects = int64(len(toDelete)); nObjects == 0 { return nObjects, nil } @@ -109,16 +142,14 @@ func deletePageOfObjectVersions(ctx context.Context, conn *s3.Client, bucket str } if err != nil { - return nObjects, fmt.Errorf("deleting S3 bucket (%s) objects: %w", bucket, err) + return nObjects, fmt.Errorf("deleting S3 bucket (%s) object versions: %w", bucket, err) } nObjects -= int64(len(output.Errors)) - var deleteErrs []error - + var errs []error for _, v := range output.Errors { code := aws.ToString(v.Code) - if code == errCodeNoSuchKey { continue } @@ -139,8 +170,8 @@ func deletePageOfObjectVersions(ctx context.Context, conn *s3.Client, bucket str if err != nil { // Add the original error and the new error. - deleteErrs = append(deleteErrs, newDeleteObjectVersionError(v)) - deleteErrs = append(deleteErrs, fmt.Errorf("removing legal hold: %w", newObjectVersionError(key, versionID, err))) + errs = append(errs, newDeleteObjectVersionError(v)) + errs = append(errs, fmt.Errorf("removing legal hold: %w", newObjectVersionError(key, versionID, err))) } else { // Attempt to delete the object once the legal hold has been removed. _, err := conn.DeleteObject(ctx, &s3.DeleteObjectInput{ @@ -150,18 +181,18 @@ func deletePageOfObjectVersions(ctx context.Context, conn *s3.Client, bucket str }) if err != nil { - deleteErrs = append(deleteErrs, fmt.Errorf("deleting: %w", newObjectVersionError(key, versionID, err))) + errs = append(errs, fmt.Errorf("deleting: %w", newObjectVersionError(key, versionID, err))) } else { nObjects++ } } } else { - deleteErrs = append(deleteErrs, newDeleteObjectVersionError(v)) + errs = append(errs, newDeleteObjectVersionError(v)) } } - if err := errors.Join(deleteErrs...); err != nil { - return nObjects, fmt.Errorf("deleting S3 bucket (%s) objects: %w", bucket, err) + if err := errors.Join(errs...); err != nil { + return nObjects, fmt.Errorf("deleting S3 bucket (%s) object versions: %w", bucket, err) } return nObjects, nil @@ -170,16 +201,14 @@ func deletePageOfObjectVersions(ctx context.Context, conn *s3.Client, bucket str // deletePageOfDeleteMarkers deletes a page (<= 1000) of S3 object delete markers. // Returns the number of delete markers deleted. func deletePageOfDeleteMarkers(ctx context.Context, conn *s3.Client, bucket string, page *s3.ListObjectVersionsOutput) (int64, error) { - var nObjects int64 - - toDelete := make([]types.ObjectIdentifier, 0, len(page.Versions)) - for _, v := range page.DeleteMarkers { - toDelete = append(toDelete, types.ObjectIdentifier{ + toDelete := tfslices.ApplyToAll(page.Versions, func(v types.ObjectVersion) types.ObjectIdentifier { + return types.ObjectIdentifier{ Key: v.Key, VersionId: v.VersionId, - }) - } + } + }) + var nObjects int64 if nObjects = int64(len(toDelete)); nObjects == 0 { return nObjects, nil } @@ -204,19 +233,64 @@ func deletePageOfDeleteMarkers(ctx context.Context, conn *s3.Client, bucket stri nObjects -= int64(len(output.Errors)) - var deleteErrs []error - + var errs []error for _, v := range output.Errors { - deleteErrs = append(deleteErrs, newDeleteObjectVersionError(v)) + errs = append(errs, newDeleteObjectVersionError(v)) } - if err := errors.Join(deleteErrs...); err != nil { + if err := errors.Join(errs...); err != nil { return nObjects, fmt.Errorf("deleting S3 bucket (%s) delete markers: %w", bucket, err) } return nObjects, nil } +// deletePageOfObjects deletes a page (<= 1000) of S3 objects. +// Returns the number of objects deleted. +func deletePageOfObjects(ctx context.Context, conn *s3.Client, bucket string, page *s3.ListObjectsV2Output) (int64, error) { + toDelete := tfslices.ApplyToAll(page.Contents, func(v types.Object) types.ObjectIdentifier { + return types.ObjectIdentifier{ + Key: v.Key, + } + }) + + var nObjects int64 + if nObjects = int64(len(toDelete)); nObjects == 0 { + return nObjects, nil + } + + input := &s3.DeleteObjectsInput{ + Bucket: aws.String(bucket), + Delete: &types.Delete{ + Objects: toDelete, + Quiet: aws.Bool(true), // Only report errors. + }, + } + + output, err := conn.DeleteObjects(ctx, input) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { + return nObjects, nil + } + + if err != nil { + return nObjects, fmt.Errorf("deleting S3 bucket (%s) objects: %w", bucket, err) + } + + nObjects -= int64(len(output.Errors)) + + var errs []error + for _, v := range output.Errors { + errs = append(errs, newDeleteObjectVersionError(v)) + } + + if err := errors.Join(errs...); err != nil { + return nObjects, fmt.Errorf("deleting S3 bucket (%s) objects: %w", bucket, err) + } + + return nObjects, nil +} + func newObjectVersionError(key, versionID string, err error) error { if err == nil { return nil @@ -235,20 +309,21 @@ func newDeleteObjectVersionError(err types.Error) error { return fmt.Errorf("deleting: %w", newObjectVersionError(aws.ToString(err.Key), aws.ToString(err.VersionId), s3Err)) } -// deleteAllObjectVersions deletes all versions of a specified key from an S3 bucket. +// deleteAllObjectVersions deletes all versions of a specified key from an S3 general purpose bucket. // If key is empty then all versions of all objects are deleted. -// Set force to true to override any S3 object lock protections on object lock enabled buckets. +// Set `force` to `true` to override any S3 object lock protections on object lock enabled buckets. // Returns the number of objects deleted. +// Use `emptyBucket` to delete all versions of all objects in a bucket. func deleteAllObjectVersions(ctx context.Context, conn *s3.Client, bucket, key string, force, ignoreObjectErrors bool) (int64, error) { - var nObjects int64 + if key == "" { + return 0, errors.New("use `emptyBucket` to delete all versions of all objects in an S3 general purpose bucket") + } input := &s3.ListObjectVersionsInput{ - Bucket: aws.String(bucket), - MaxKeys: aws.Int32(listObjectVersionsMaxKeys), - } - if key != "" { - input.Prefix = aws.String(key) + Bucket: aws.String(bucket), + Prefix: aws.String(key), } + var nObjects int64 var lastErr error pages := s3.NewListObjectVersionsPaginator(conn, input) @@ -267,7 +342,7 @@ func deleteAllObjectVersions(ctx context.Context, conn *s3.Client, bucket, key s objectKey := aws.ToString(objectVersion.Key) objectVersionID := aws.ToString(objectVersion.VersionId) - if key != "" && key != objectKey { + if key != objectKey { continue } @@ -358,7 +433,7 @@ func deleteAllObjectVersions(ctx context.Context, conn *s3.Client, bucket, key s deleteMarkerKey := aws.ToString(deleteMarker.Key) deleteMarkerVersionID := aws.ToString(deleteMarker.VersionId) - if key != "" && key != deleteMarkerKey { + if key != deleteMarkerKey { continue } @@ -383,7 +458,7 @@ func deleteAllObjectVersions(ctx context.Context, conn *s3.Client, bucket, key s } // deleteObjectVersion deletes a specific object version. -// Set force to true to override any S3 object lock protections. +// Set `force` to `true` to override any S3 object lock protections. func deleteObjectVersion(ctx context.Context, conn *s3.Client, b, k, v string, force bool) error { input := &s3.DeleteObjectInput{ Bucket: aws.String(b), diff --git a/internal/service/s3/directory_bucket.go b/internal/service/s3/directory_bucket.go index ecdc077bfc0c..a14857584236 100644 --- a/internal/service/s3/directory_bucket.go +++ b/internal/service/s3/directory_bucket.go @@ -254,7 +254,7 @@ func (r *directoryBucketResource) Delete(ctx context.Context, request resource.D if tfawserr.ErrCodeEquals(err, errCodeBucketNotEmpty) { if data.ForceDestroy.ValueBool() { // Empty the bucket and try again. - _, err = emptyBucket(ctx, conn, data.ID.ValueString(), false) + _, err = emptyDirectoryBucket(ctx, conn, data.ID.ValueString()) if err != nil { response.Diagnostics.AddError(fmt.Sprintf("emptying S3 Directory Bucket (%s)", data.ID.ValueString()), err.Error()) diff --git a/internal/service/s3/directory_bucket_test.go b/internal/service/s3/directory_bucket_test.go index 598f98530879..a8a3cc53cb03 100644 --- a/internal/service/s3/directory_bucket_test.go +++ b/internal/service/s3/directory_bucket_test.go @@ -75,6 +75,28 @@ func TestAccS3DirectoryBucket_disappears(t *testing.T) { }) } +func TestAccS3DirectoryBucket_forceDestroy(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_directory_bucket.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDirectoryBucketDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDirectoryBucketConfig_forceDestroy(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDirectoryBucketExists(ctx, resourceName), + testAccCheckBucketAddObjects(ctx, resourceName, "data.txt", "prefix/more_data.txt"), + ), + }, + }, + }) +} + func testAccCheckDirectoryBucketDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) @@ -139,3 +161,17 @@ resource "aws_s3_directory_bucket" "test" { } `) } + +func testAccDirectoryBucketConfig_forceDestroy(rName string) string { + return acctest.ConfigCompose(testAccDirectoryBucketConfig_base(rName), ` +resource "aws_s3_directory_bucket" "test" { + bucket = local.bucket + + location { + name = local.location_name + } + + force_destroy = true +} +`) +} diff --git a/internal/service/s3/object_test.go b/internal/service/s3/object_test.go index d7f207591465..f1b8c0bebb09 100644 --- a/internal/service/s3/object_test.go +++ b/internal/service/s3/object_test.go @@ -164,6 +164,54 @@ func TestAccS3Object_basic(t *testing.T) { }) } +func TestAccS3Object_disappears(t *testing.T) { + ctx := acctest.Context(t) + var obj s3.GetObjectOutput + resourceName := "aws_s3_object.object" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckObjectDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccObjectConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckObjectExists(ctx, resourceName, &obj), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfs3.ResourceObject(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccS3Object_Disappears_bucket(t *testing.T) { + ctx := acctest.Context(t) + var obj s3.GetObjectOutput + resourceName := "aws_s3_object.object" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckObjectDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccObjectConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckObjectExists(ctx, resourceName, &obj), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfs3.ResourceBucket(), "aws_s3_bucket.test"), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func TestAccS3Object_upgradeFromV4(t *testing.T) { ctx := acctest.Context(t) var obj s3.GetObjectOutput @@ -1724,6 +1772,32 @@ func TestAccS3Object_directoryBucket(t *testing.T) { }) } +func TestAccS3Object_DirectoryBucket_disappears(t *testing.T) { // nosemgrep:ci.acceptance-test-naming-parent-disappears + ctx := acctest.Context(t) + var obj s3.GetObjectOutput + resourceName := "aws_s3_object.object" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + // FIXME "Error running post-test destroy, there may be dangling resources: operation error S3: HeadObject, https response error StatusCode: 403, RequestID: 0033eada6b00018c1804fda905093646dd76f12a, HostID: SfKUL8OB, api error Forbidden: Forbidden" + // CheckDestroy: testAccCheckObjectDestroy(ctx), + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccObjectConfig_directoryBucket(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckObjectExists(ctx, resourceName, &obj), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfs3.ResourceObject(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func TestAccS3Object_DirectoryBucket_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) var obj s3.GetObjectOutput @@ -1958,7 +2032,8 @@ func testAccCheckObjectCheckTags(ctx context.Context, n string, expectedTags map func testAccObjectConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "test" { - bucket = %[1]q + bucket = %[1]q + force_destroy = true } resource "aws_s3_object" "object" { @@ -2631,6 +2706,8 @@ resource "aws_s3_directory_bucket" "test" { location { name = local.location_name } + + force_destroy = true } resource "aws_s3_object" "object" { diff --git a/internal/service/s3/sweep.go b/internal/service/s3/sweep.go index a86164dfc424..ed903d53d7b8 100644 --- a/internal/service/s3/sweep.go +++ b/internal/service/s3/sweep.go @@ -58,7 +58,7 @@ func sweepObjects(region string) error { } conn := client.S3Client(ctx) - // General-purpose buckets. + // General purpose buckets. output, err := conn.ListBuckets(ctx, &s3.ListBucketsInput{}) if awsv2.SkipSweepError(err) { @@ -114,7 +114,7 @@ func sweepObjects(region string) error { continue } - sweepables = append(sweepables, objectSweeper{ + sweepables = append(sweepables, directoryBucketObjectSweeper{ conn: conn, bucket: aws.ToString(v.Name), }) @@ -138,13 +138,26 @@ type objectSweeper struct { func (os objectSweeper) Delete(ctx context.Context, timeout time.Duration, optFns ...tfresource.OptionsFunc) error { // Delete everything including locked objects. - _, err := deleteAllObjectVersions(ctx, os.conn, os.bucket, "", os.locked, true) + _, err := emptyBucket(ctx, os.conn, os.bucket, os.locked) if err != nil { return fmt.Errorf("deleting S3 Bucket (%s) objects: %w", os.bucket, err) } return nil } +type directoryBucketObjectSweeper struct { + conn *s3.Client + bucket string +} + +func (os directoryBucketObjectSweeper) Delete(ctx context.Context, timeout time.Duration, optFns ...tfresource.OptionsFunc) error { + _, err := emptyDirectoryBucket(ctx, os.conn, os.bucket) + if err != nil { + return fmt.Errorf("deleting S3 Directory Bucket (%s) objects: %w", os.bucket, err) + } + return nil +} + func sweepBuckets(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) diff --git a/website/docs/r/s3_object.html.markdown b/website/docs/r/s3_object.html.markdown index 2ebd54609169..152fd236427a 100644 --- a/website/docs/r/s3_object.html.markdown +++ b/website/docs/r/s3_object.html.markdown @@ -134,7 +134,7 @@ resource "aws_s3_object" "examplebucket_object" { S3 objects support a [maximum of 10 tags](https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html). If the resource's own `tags` and the provider-level `default_tags` would together lead to more than 10 tags on an S3 object, use the `override_provider` configuration block to suppress any provider-level `default_tags`. --> S3 objects stored in Amazon S3 Express directory buckets do not support tags, so any provider-level `default_tags` must be ignored. +-> S3 objects stored in Amazon S3 Express directory buckets do not support tags, so any provider-level `default_tags` must be suppressed. ```terraform resource "aws_s3_bucket" "examplebucket" {