From a85f96de6abcf4cf55ee2186b488d263462b9e01 Mon Sep 17 00:00:00 2001 From: Angie Pinilla Date: Wed, 26 Jan 2022 17:34:44 -0500 Subject: [PATCH] r/s3_bucket_logging: new resource --- .changelog/22608.txt | 3 + internal/provider/provider.go | 1 + internal/service/s3/bucket.go | 4 +- internal/service/s3/bucket_logging.go | 402 +++++++++++++ internal/service/s3/bucket_logging_test.go | 533 ++++++++++++++++++ .../docs/r/s3_bucket_logging.html.markdown | 78 +++ 6 files changed, 1019 insertions(+), 2 deletions(-) create mode 100644 .changelog/22608.txt create mode 100644 internal/service/s3/bucket_logging.go create mode 100644 internal/service/s3/bucket_logging_test.go create mode 100644 website/docs/r/s3_bucket_logging.html.markdown diff --git a/.changelog/22608.txt b/.changelog/22608.txt new file mode 100644 index 00000000000..06299d87d00 --- /dev/null +++ b/.changelog/22608.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_s3_bucket_logging +``` diff --git a/internal/provider/provider.go b/internal/provider/provider.go index cb1f54193f0..711438209b6 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -1625,6 +1625,7 @@ func Provider() *schema.Provider { "aws_s3_bucket_cors_configuration": s3.ResourceBucketCorsConfiguration(), "aws_s3_bucket_intelligent_tiering_configuration": s3.ResourceBucketIntelligentTieringConfiguration(), "aws_s3_bucket_inventory": s3.ResourceBucketInventory(), + "aws_s3_bucket_logging": s3.ResourceBucketLogging(), "aws_s3_bucket_metric": s3.ResourceBucketMetric(), "aws_s3_bucket_notification": s3.ResourceBucketNotification(), "aws_s3_bucket_ownership_controls": s3.ResourceBucketOwnershipControls(), diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 4c5f8836b78..f419816d319 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -806,7 +806,7 @@ func resourceBucketUpdate(d *schema.ResourceData, meta interface{}) error { } if d.HasChange("logging") { - if err := resourceBucketLoggingUpdate(conn, d); err != nil { + if err := resourceBucketInternalLoggingUpdate(conn, d); err != nil { return err } } @@ -1868,7 +1868,7 @@ func resourceBucketInternalVersioningUpdate(conn *s3.S3, bucket string, versioni return nil } -func resourceBucketLoggingUpdate(conn *s3.S3, d *schema.ResourceData) error { +func resourceBucketInternalLoggingUpdate(conn *s3.S3, d *schema.ResourceData) error { logging := d.Get("logging").(*schema.Set).List() bucket := d.Get("bucket").(string) loggingStatus := &s3.BucketLoggingStatus{} diff --git a/internal/service/s3/bucket_logging.go b/internal/service/s3/bucket_logging.go new file mode 100644 index 00000000000..4393a786f13 --- /dev/null +++ b/internal/service/s3/bucket_logging.go @@ -0,0 +1,402 @@ +package s3 + +import ( + "context" + "fmt" + "log" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/verify" +) + +func ResourceBucketLogging() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceBucketLoggingCreate, + ReadContext: resourceBucketLoggingRead, + UpdateContext: resourceBucketLoggingUpdate, + DeleteContext: resourceBucketLoggingDelete, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 63), + }, + "expected_bucket_owner": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: verify.ValidAccountID, + }, + "target_bucket": { + Type: schema.TypeString, + Required: true, + }, + "target_grant": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "grantee": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Computed: true, + }, + "email_address": { + Type: schema.TypeString, + Optional: true, + }, + "id": { + Type: schema.TypeString, + Optional: true, + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.Type_Values(), false), + }, + "uri": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "permission": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(s3.BucketLogsPermission_Values(), false), + }, + }, + }, + }, + "target_prefix": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceBucketLoggingCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*conns.AWSClient).S3Conn + + bucket := d.Get("bucket").(string) + expectedBucketOwner := d.Get("expected_bucket_owner").(string) + + loggingEnabled := &s3.LoggingEnabled{ + TargetBucket: aws.String(d.Get("target_bucket").(string)), + TargetPrefix: aws.String(d.Get("target_prefix").(string)), + } + + if v, ok := d.GetOk("target_grant"); ok && v.(*schema.Set).Len() > 0 { + loggingEnabled.TargetGrants = expandBucketLoggingTargetGrants(v.(*schema.Set).List()) + } + + input := &s3.PutBucketLoggingInput{ + Bucket: aws.String(bucket), + BucketLoggingStatus: &s3.BucketLoggingStatus{ + LoggingEnabled: loggingEnabled, + }, + } + + if expectedBucketOwner != "" { + input.ExpectedBucketOwner = aws.String(expectedBucketOwner) + } + + _, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.PutBucketLoggingWithContext(ctx, input) + }) + + if err != nil { + return diag.FromErr(fmt.Errorf("error putting S3 bucket (%s) logging: %w", bucket, err)) + } + + d.SetId(resourceBucketLoggingCreateResourceID(bucket, expectedBucketOwner)) + + return resourceBucketLoggingRead(ctx, d, meta) +} + +func resourceBucketLoggingRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*conns.AWSClient).S3Conn + + bucket, expectedBucketOwner, err := resourceBucketLoggingParseResourceID(d.Id()) + if err != nil { + return diag.FromErr(err) + } + + input := &s3.GetBucketLoggingInput{ + Bucket: aws.String(bucket), + } + + if expectedBucketOwner != "" { + input.ExpectedBucketOwner = aws.String(expectedBucketOwner) + } + + resp, err := verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.GetBucketLoggingWithContext(ctx, input) + }) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + log.Printf("[WARN] S3 Bucket Logging (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return diag.FromErr(fmt.Errorf("error reading S3 Bucket (%s) Logging: %w", d.Id(), err)) + } + + output, ok := resp.(*s3.GetBucketLoggingOutput) + + if !ok || output.LoggingEnabled == nil { + if d.IsNewResource() { + return diag.FromErr(fmt.Errorf("error reading S3 Bucket (%s) Logging: empty output", d.Id())) + } + log.Printf("[WARN] S3 Bucket Logging (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + loggingEnabled := output.LoggingEnabled + + d.Set("bucket", d.Id()) + d.Set("expected_bucket_owner", expectedBucketOwner) + d.Set("target_bucket", loggingEnabled.TargetBucket) + d.Set("target_prefix", loggingEnabled.TargetPrefix) + + if err := d.Set("target_grant", flattenBucketLoggingTargetGrants(loggingEnabled.TargetGrants)); err != nil { + return diag.FromErr(fmt.Errorf("error setting target_grant: %w", err)) + } + + return nil +} + +func resourceBucketLoggingUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*conns.AWSClient).S3Conn + + bucket, expectedBucketOwner, err := resourceBucketLoggingParseResourceID(d.Id()) + if err != nil { + return diag.FromErr(err) + } + + loggingEnabled := &s3.LoggingEnabled{ + TargetBucket: aws.String(d.Get("target_bucket").(string)), + TargetPrefix: aws.String(d.Get("target_prefix").(string)), + } + + if v, ok := d.GetOk("target_grant"); ok && v.(*schema.Set).Len() > 0 { + loggingEnabled.TargetGrants = expandBucketLoggingTargetGrants(v.(*schema.Set).List()) + } + + input := &s3.PutBucketLoggingInput{ + Bucket: aws.String(bucket), + BucketLoggingStatus: &s3.BucketLoggingStatus{ + LoggingEnabled: loggingEnabled, + }, + } + + if expectedBucketOwner != "" { + input.ExpectedBucketOwner = aws.String(expectedBucketOwner) + } + + _, err = verify.RetryOnAWSCode(s3.ErrCodeNoSuchBucket, func() (interface{}, error) { + return conn.PutBucketLoggingWithContext(ctx, input) + }) + + if err != nil { + return diag.FromErr(fmt.Errorf("error updating S3 bucket (%s) logging: %w", d.Id(), err)) + } + + return resourceBucketLoggingRead(ctx, d, meta) +} + +func resourceBucketLoggingDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*conns.AWSClient).S3Conn + + bucket, expectedBucketOwner, err := resourceBucketLoggingParseResourceID(d.Id()) + if err != nil { + return diag.FromErr(err) + } + + input := &s3.PutBucketLoggingInput{ + Bucket: aws.String(bucket), + BucketLoggingStatus: &s3.BucketLoggingStatus{}, + } + + if expectedBucketOwner != "" { + input.ExpectedBucketOwner = aws.String(expectedBucketOwner) + } + + _, err = conn.PutBucketLoggingWithContext(ctx, input) + + if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + return nil + } + + if err != nil { + return diag.FromErr(fmt.Errorf("error deleting S3 Bucket (%s) Logging: %w", d.Id(), err)) + } + + return nil +} + +func resourceBucketLoggingCreateResourceID(bucket, expectedBucketOwner string) string { + if bucket == "" { + return expectedBucketOwner + } + + if expectedBucketOwner == "" { + return bucket + } + + parts := []string{bucket, expectedBucketOwner} + id := strings.Join(parts, ",") + + return id +} + +func resourceBucketLoggingParseResourceID(id string) (string, string, error) { + parts := strings.Split(id, ",") + + if len(parts) == 1 && parts[0] != "" { + return parts[0], "", nil + } + + if len(parts) == 2 && parts[0] != "" && parts[1] != "" { + return parts[0], parts[1], nil + } + + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected BUCKET or BUCKET,EXPECTED_BUCKET_OWNER", id) +} + +func expandBucketLoggingTargetGrants(l []interface{}) []*s3.TargetGrant { + var grants []*s3.TargetGrant + + for _, tfMapRaw := range l { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } + + grant := &s3.TargetGrant{} + + if v, ok := tfMap["grantee"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + grant.Grantee = expandBucketLoggingTargetGrantGrantee(v) + } + + if v, ok := tfMap["permission"].(string); ok && v != "" { + grant.Permission = aws.String(v) + } + + grants = append(grants, grant) + + } + + return grants +} + +func expandBucketLoggingTargetGrantGrantee(l []interface{}) *s3.Grantee { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + if !ok { + return nil + } + + grantee := &s3.Grantee{} + + if v, ok := tfMap["display_name"].(string); ok && v != "" { + grantee.DisplayName = aws.String(v) + } + + if v, ok := tfMap["email_address"].(string); ok && v != "" { + grantee.EmailAddress = aws.String(v) + } + + if v, ok := tfMap["id"].(string); ok && v != "" { + grantee.ID = aws.String(v) + } + + if v, ok := tfMap["type"].(string); ok && v != "" { + grantee.Type = aws.String(v) + } + + if v, ok := tfMap["uri"].(string); ok && v != "" { + grantee.URI = aws.String(v) + } + + return grantee +} + +func flattenBucketLoggingTargetGrants(grants []*s3.TargetGrant) []interface{} { + var results []interface{} + + for _, grant := range grants { + if grant == nil { + continue + } + + m := make(map[string]interface{}) + + if grant.Grantee != nil { + m["grantee"] = flattenBucketLoggingTargetGrantGrantee(grant.Grantee) + } + + if grant.Permission != nil { + m["permission"] = aws.StringValue(grant.Permission) + } + + results = append(results, m) + } + + return results +} + +func flattenBucketLoggingTargetGrantGrantee(g *s3.Grantee) []interface{} { + if g == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if g.DisplayName != nil { + m["display_name"] = aws.StringValue(g.DisplayName) + } + + if g.EmailAddress != nil { + m["email_address"] = aws.StringValue(g.EmailAddress) + } + + if g.ID != nil { + m["id"] = aws.StringValue(g.ID) + } + + if g.Type != nil { + m["type"] = aws.StringValue(g.Type) + } + + if g.URI != nil { + m["uri"] = aws.StringValue(g.URI) + } + + return []interface{}{m} +} diff --git a/internal/service/s3/bucket_logging_test.go b/internal/service/s3/bucket_logging_test.go new file mode 100644 index 00000000000..74bef941bd4 --- /dev/null +++ b/internal/service/s3/bucket_logging_test.go @@ -0,0 +1,533 @@ +package s3_test + +import ( + "fmt" + "os" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfs3 "github.com/hashicorp/terraform-provider-aws/internal/service/s3" +) + +func TestAccS3BucketLogging_basic(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_logging.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketLoggingDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketLoggingBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketLoggingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "bucket", rName), + resource.TestCheckResourceAttrPair(resourceName, "target_bucket", "aws_s3_bucket.log_bucket", "bucket"), + resource.TestCheckResourceAttr(resourceName, "target_prefix", "log/"), + resource.TestCheckResourceAttr(resourceName, "target_grant.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccS3BucketLogging_disappears(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_logging.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketLoggingDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketLoggingBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketLoggingExists(resourceName), + acctest.CheckResourceDisappears(acctest.Provider, tfs3.ResourceBucketLogging(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccS3BucketLogging_update(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + targetBucketName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_logging.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketLoggingDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketLoggingBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketLoggingExists(resourceName), + ), + }, + { + // Test updating "target_prefix" + Config: testAccBucketLoggingUpdateConfig(rName, rName, "tmp/"), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketLoggingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "bucket", rName), + resource.TestCheckResourceAttrPair(resourceName, "target_bucket", "aws_s3_bucket.log_bucket", "bucket"), + resource.TestCheckResourceAttr(resourceName, "target_prefix", "tmp/"), + resource.TestCheckResourceAttr(resourceName, "target_grant.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + // Test updating "target_bucket" and "target_prefix" + Config: testAccBucketLoggingUpdateConfig(rName, targetBucketName, "log/"), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketLoggingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "bucket", rName), + resource.TestCheckResourceAttrPair(resourceName, "target_bucket", "aws_s3_bucket.log_bucket", "bucket"), + resource.TestCheckResourceAttr(resourceName, "target_prefix", "log/"), + resource.TestCheckResourceAttr(resourceName, "target_grant.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccS3BucketLogging_TargetGrantByID(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_logging.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketLoggingDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketLoggingTargetGrantConfig_ById(rName, s3.BucketLogsPermissionFullControl), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketLoggingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "target_grant.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "target_grant.*", map[string]string{ + "grantee.#": "1", + "grantee.0.type": s3.TypeCanonicalUser, + "permission": s3.BucketLogsPermissionFullControl, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "target_grant.*.grantee.0.id", "data.aws_canonical_user_id.current", "id"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "target_grant.*.grantee.0.display_name", "data.aws_canonical_user_id.current", "display_name"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccBucketLoggingTargetGrantConfig_ById(rName, s3.BucketLogsPermissionRead), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketLoggingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "target_grant.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "target_grant.*", map[string]string{ + "grantee.#": "1", + "grantee.0.type": s3.TypeCanonicalUser, + "permission": s3.BucketLogsPermissionRead, + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "target_grant.*.grantee.0.display_name", "data.aws_canonical_user_id.current", "display_name"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccBucketLoggingBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketLoggingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "target_grant.#", "0"), + ), + }, + }, + }) +} + +func TestAccS3BucketLogging_TargetGrantByEmail(t *testing.T) { + rEmail, ok := os.LookupEnv("AWS_S3_BUCKET_LOGGING_AMAZON_CUSTOMER_BY_EMAIL") + + if !ok { + acctest.Skip(t, "'AWS_S3_BUCKET_LOGGING_AMAZON_CUSTOMER_BY_EMAIL' not set, skipping test.") + } + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_logging.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketLoggingDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketLoggingTargetGrantConfig_ByEmail(rName, rEmail, s3.BucketLogsPermissionFullControl), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketLoggingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "target_grant.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "target_grant.*", map[string]string{ + "grantee.#": "1", + "grantee.0.email_address": rEmail, + "grantee.0.type": s3.TypeAmazonCustomerByEmail, + "permission": s3.BucketLogsPermissionFullControl, + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccBucketLoggingTargetGrantConfig_ByEmail(rName, rEmail, s3.BucketLogsPermissionRead), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketLoggingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "target_grant.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "target_grant.*", map[string]string{ + "grantee.#": "1", + "grantee.0.email": rEmail, + "grantee.0.type": s3.TypeAmazonCustomerByEmail, + "permission": s3.BucketLogsPermissionRead, + }), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccBucketLoggingBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketLoggingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "target_grant.#", "0"), + ), + }, + }, + }) +} + +func TestAccS3BucketLogging_TargetGrantByGroup(t *testing.T) { + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_bucket_logging.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ProviderFactories: acctest.ProviderFactories, + CheckDestroy: testAccCheckBucketLoggingDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBucketLoggingTargetGrantConfig_ByGroup(rName, s3.BucketLogsPermissionFullControl), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketLoggingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "target_grant.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "target_grant.*", map[string]string{ + "grantee.#": "1", + "grantee.0.type": s3.TypeGroup, + "permission": s3.BucketLogsPermissionFullControl, + }), + testAccCheckBucketLoggingTargetGrantGranteeUri(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccBucketLoggingTargetGrantConfig_ByGroup(rName, s3.BucketLogsPermissionRead), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketLoggingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "target_grant.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "target_grant.*", map[string]string{ + "grantee.#": "1", + "grantee.0.type": s3.TypeGroup, + "permission": s3.BucketLogsPermissionRead, + }), + testAccCheckBucketLoggingTargetGrantGranteeUri(resourceName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccBucketLoggingBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckBucketLoggingExists(resourceName), + resource.TestCheckResourceAttr(resourceName, "target_grant.#", "0"), + ), + }, + }, + }) +} + +func testAccCheckBucketLoggingDestroy(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_s3_bucket_logging" { + continue + } + + input := &s3.GetBucketLoggingInput{ + Bucket: aws.String(rs.Primary.ID), + } + + output, err := conn.GetBucketLogging(input) + + if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + continue + } + + if err != nil { + return fmt.Errorf("error getting S3 Bucket Logging (%s): %w", rs.Primary.ID, err) + } + + if output != nil && output.LoggingEnabled != nil { + return fmt.Errorf("S3 Bucket Logging (%s) still exists", rs.Primary.ID) + } + } + + return nil +} + +func testAccCheckBucketLoggingExists(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("Resource (%s) ID not set", resourceName) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn + + input := &s3.GetBucketLoggingInput{ + Bucket: aws.String(rs.Primary.ID), + } + + output, err := conn.GetBucketLogging(input) + + if err != nil { + return fmt.Errorf("error getting S3 Bucket Logging (%s): %w", rs.Primary.ID, err) + } + + if output == nil || output.LoggingEnabled == nil { + return fmt.Errorf("S3 Bucket Logging (%s) not found", rs.Primary.ID) + } + + return nil + } +} + +func testAccCheckBucketLoggingTargetGrantGranteeUri(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + uri := fmt.Sprintf("http://acs.%s/groups/s3/LogDelivery", acctest.PartitionDNSSuffix()) + return resource.TestCheckTypeSetElemNestedAttrs(resourceName, "target_grant.*", map[string]string{ + "grantee.0.uri": uri, + })(s) + } +} + +func testAccBucketLoggingBasicConfig(rName string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "log_bucket" { + bucket = "%[1]s-log" + acl = "log-delivery-write" +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "private" + + lifecycle { + ignore_changes = [ + logging + ] + } +} + +resource "aws_s3_bucket_logging" "test" { + bucket = aws_s3_bucket.test.id + + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" +} +`, rName) +} + +func testAccBucketLoggingUpdateConfig(rName, targetBucketName, targetPrefix string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "private" + + lifecycle { + ignore_changes = [ + logging + ] + } +} + +resource "aws_s3_bucket" "log_bucket" { + bucket = "%[2]s-log" + acl = "log-delivery-write" +} + +resource "aws_s3_bucket_logging" "test" { + bucket = aws_s3_bucket.test.id + + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = %[3]q +} +`, rName, targetBucketName, targetPrefix) +} + +func testAccBucketLoggingTargetGrantConfig_ById(rName, permission string) string { + return fmt.Sprintf(` +data "aws_canonical_user_id" "current" {} + +resource "aws_s3_bucket" "log_bucket" { + bucket = "%[1]s-log" + acl = "log-delivery-write" +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "private" + + lifecycle { + ignore_changes = [ + logging + ] + } +} + +resource "aws_s3_bucket_logging" "test" { + bucket = aws_s3_bucket.test.id + + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" + + target_grant { + grantee { + id = data.aws_canonical_user_id.current.id + type = "CanonicalUser" + } + permission = %[2]q + } +} +`, rName, permission) +} + +func testAccBucketLoggingTargetGrantConfig_ByEmail(rName, email, permission string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "log_bucket" { + bucket = "%[1]s-log" + acl = "log-delivery-write" +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "private" + + lifecycle { + ignore_changes = [ + logging + ] + } +} + +resource "aws_s3_bucket_logging" "test" { + bucket = aws_s3_bucket.test.id + + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" + + target_grant { + grantee { + email_address = %[2]q + type = "AmazonCustomerByEmail" + } + permission = %[3]q + } +} +`, rName, email, permission) +} + +func testAccBucketLoggingTargetGrantConfig_ByGroup(rName, permission string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_s3_bucket" "log_bucket" { + bucket = "%[1]s-log" + acl = "log-delivery-write" +} + +resource "aws_s3_bucket" "test" { + bucket = %[1]q + acl = "private" + + lifecycle { + ignore_changes = [ + logging + ] + } +} + +resource "aws_s3_bucket_logging" "test" { + bucket = aws_s3_bucket.test.id + + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" + + target_grant { + grantee { + type = "Group" + # Test with S3 log delivery group + uri = "http://acs.${data.aws_partition.current.dns_suffix}/groups/s3/LogDelivery" + } + permission = %[2]q + } +} +`, rName, permission) +} diff --git a/website/docs/r/s3_bucket_logging.html.markdown b/website/docs/r/s3_bucket_logging.html.markdown new file mode 100644 index 00000000000..7be2102e33e --- /dev/null +++ b/website/docs/r/s3_bucket_logging.html.markdown @@ -0,0 +1,78 @@ +--- +subcategory: "S3" +layout: "aws" +page_title: "AWS: aws_s3_bucket_logging" +description: |- + Provides a S3 bucket logging resource. +--- + +# Resource: aws_s3_bucket_logging + +Provides a S3 bucket logging resource. + +## Example Usage + +```terraform +resource "aws_s3_bucket" "example" { + bucket = "my-tf-example-bucket" + acl = "private" +} + +resource "aws_s3_bucket" "log_bucket" { + bucket = "my-tf-log-bucket" + acl = "log-delivery-write" +} + +resource "aws_s3_bucket_logging" "example" { + bucket = aws_s3_bucket.example.id + + target_bucket = aws_s3_bucket.log_bucket.id + target_prefix = "log/" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `bucket` - (Required, Forces new resource) The name of the bucket. +* `expected_bucket_owner` - (Optional, Forces new resource) The account ID of the expected bucket owner. +* `target_bucket` - (Required) The bucket where you want Amazon S3 to store server access logs. +* `target_prefix` - (Required) A prefix for all log object keys. +* `target_grant` - (Optional) Set of configuration blocks with information for granting permissions [documented below](#target_grant). + +### target_grant + +The `target_grant` configuration block supports the following arguments: + +* `grantee` - (Required) A configuration block for the person being granted permissions [documented below](#grantee). +* `permission` - (Required) Logging permissions assigned to the grantee for the bucket. Valid values: `FULL_CONTROL`, `READ`, `WRITE`. + +### grantee + +The `grantee` configuration block supports the following arguments: + +* `email_address` - (Optional) Email address of the grantee. See [Regions and Endpoints](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) for supported AWS regions where this argument can be specified. +* `id` - (Optional) The canonical user ID of the grantee. +* `type` - (Required) Type of grantee. Valid values: `CanonicalUser`, `AmazonCustomerByEmail`, `Group`. +* `uri` - (Optional) URI of the grantee group. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - The `bucket` or `bucket` and `expected_bucket_owner` separated by a comma (`,`) if the latter is provided. + +## Import + +S3 bucket logging can be imported using the `bucket` e.g., + +``` +$ terraform import aws_s3_bucket_logging.example bucket-name +``` + +In addition, S3 bucket logging can be imported using the `bucket` and `expected_bucket_owner` separated by a comma (`,`) e.g., + +``` +$ terraform import aws_s3_bucket_logging.example bucket-name,123456789012 +```