From e8c0d62236a923b3f25e2c48cbb7fe7111714552 Mon Sep 17 00:00:00 2001 From: Anthony Wat Date: Sun, 31 Mar 2024 00:56:18 -0400 Subject: [PATCH] feat: Add s3_storage_options configuration block for aws_transfer_server --- .changelog/36664.txt | 3 ++ internal/service/transfer/server.go | 57 ++++++++++++++++++++ internal/service/transfer/server_test.go | 57 +++++++++++++++++++- internal/service/transfer/transfer_test.go | 1 + website/docs/r/transfer_server.html.markdown | 46 ++++++++++++---- 5 files changed, 152 insertions(+), 12 deletions(-) create mode 100644 .changelog/36664.txt diff --git a/.changelog/36664.txt b/.changelog/36664.txt new file mode 100644 index 00000000000..156c2326e9f --- /dev/null +++ b/.changelog/36664.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_transfer_server: Add `s3_storage_options` configuration block +``` \ No newline at end of file diff --git a/internal/service/transfer/server.go b/internal/service/transfer/server.go index 3fdc5b1ecc1..010c0b72b2a 100644 --- a/internal/service/transfer/server.go +++ b/internal/service/transfer/server.go @@ -221,6 +221,22 @@ func resourceServer() *schema.Resource { ValidateFunc: validation.StringInSlice(transfer.Protocol_Values(), false), }, }, + "s3_storage_options": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "directory_listing_optimization": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(transfer.DirectoryListingOptimization_Values(), false), + }, + }, + }, + }, "security_policy_name": { Type: schema.TypeString, Optional: true, @@ -376,6 +392,10 @@ func resourceServerCreate(ctx context.Context, d *schema.ResourceData, meta inte input.Protocols = flex.ExpandStringSet(v.(*schema.Set)) } + if v, ok := d.GetOk("s3_storage_options"); ok && len(v.([]interface{})) > 0 { + input.S3StorageOptions = expandS3StorageOptions(v.([]interface{})) + } + if v, ok := d.GetOk("security_policy_name"); ok { input.SecurityPolicyName = aws.String(v.(string)) } @@ -501,6 +521,9 @@ func resourceServerRead(ctx context.Context, d *schema.ResourceData, meta interf return sdkdiag.AppendErrorf(diags, "setting protocol_details: %s", err) } d.Set("protocols", aws.StringValueSlice(output.Protocols)) + if err := d.Set("s3_storage_options", flattenS3StorageOptions(output.S3StorageOptions)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting s3_storage_options: %s", err) + } d.Set("security_policy_name", output.SecurityPolicyName) d.Set("structured_log_destinations", aws.StringValueSlice(output.StructuredLogDestinations)) if output.IdentityProviderDetails != nil { @@ -676,6 +699,10 @@ func resourceServerUpdate(ctx context.Context, d *schema.ResourceData, meta inte input.Protocols = flex.ExpandStringSet(d.Get("protocols").(*schema.Set)) } + if d.HasChange("s3_storage_options") { + input.S3StorageOptions = expandS3StorageOptions(d.Get("s3_storage_options").([]interface{})) + } + if d.HasChange("security_policy_name") { input.SecurityPolicyName = aws.String(d.Get("security_policy_name").(string)) } @@ -914,6 +941,36 @@ func flattenProtocolDetails(apiObject *transfer.ProtocolDetails) []interface{} { return []interface{}{tfMap} } +func expandS3StorageOptions(m []interface{}) *transfer.S3StorageOptions { + if len(m) < 1 || m[0] == nil { + return nil + } + + tfMap := m[0].(map[string]interface{}) + + apiObject := &transfer.S3StorageOptions{} + + if v, ok := tfMap["directory_listing_optimization"].(string); ok && len(v) > 0 { + apiObject.DirectoryListingOptimization = aws.String(v) + } + + return apiObject +} + +func flattenS3StorageOptions(apiObject *transfer.S3StorageOptions) []interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.DirectoryListingOptimization; v != nil { + tfMap["directory_listing_optimization"] = aws.StringValue(v) + } + + return []interface{}{tfMap} +} + func expandWorkflowDetails(tfMap []interface{}) *transfer.WorkflowDetails { apiObject := &transfer.WorkflowDetails{ OnPartialUpload: []*transfer.WorkflowDetail{}, diff --git a/internal/service/transfer/server_test.go b/internal/service/transfer/server_test.go index 3b048440523..e43255f821a 100644 --- a/internal/service/transfer/server_test.go +++ b/internal/service/transfer/server_test.go @@ -72,6 +72,8 @@ func testAccServer_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "protocol_details.0.tls_session_resumption_mode", "ENFORCED"), resource.TestCheckResourceAttr(resourceName, "protocols.#", "1"), resource.TestCheckTypeSetElemAttr(resourceName, "protocols.*", "SFTP"), + resource.TestCheckResourceAttr(resourceName, "s3_storage_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "s3_storage_options.0.directory_listing_optimization", "DISABLED"), resource.TestCheckResourceAttr(resourceName, "security_policy_name", "TransferSecurityPolicy-2018-11"), resource.TestCheckResourceAttr(resourceName, "structured_log_destinations.#", "0"), resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), @@ -942,6 +944,49 @@ func testAccServer_protocolDetails(t *testing.T) { }) } +func testAccServer_s3StorageOptions(t *testing.T) { + ctx := acctest.Context(t) + var s transfer.DescribedServer + resourceName := "aws_transfer_server.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.TransferServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckServerDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccServerConfig_s3StorageOptions("ENABLED"), + Check: resource.ComposeTestCheckFunc( + testAccCheckServerExists(ctx, resourceName, &s), + resource.TestCheckResourceAttr(resourceName, "s3_storage_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "s3_storage_options.0.directory_listing_optimization", "ENABLED"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccServerConfig_s3StorageOptions("DISABLED"), + Check: resource.ComposeTestCheckFunc( + testAccCheckServerExists(ctx, resourceName, &s), + resource.TestCheckResourceAttr(resourceName, "s3_storage_options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "s3_storage_options.0.directory_listing_optimization", "DISABLED"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + func testAccServer_apiGateway(t *testing.T) { ctx := acctest.Context(t) var conf transfer.DescribedServer @@ -2078,6 +2123,16 @@ resource "aws_transfer_server" "test" { `, rName, domain)) } +func testAccServerConfig_s3StorageOptions(directoryListingOptimization string) string { + return fmt.Sprintf(` +resource "aws_transfer_server" "test" { + s3_storage_options { + directory_listing_optimization = %[1]q + } +} +`, directoryListingOptimization) +} + func testAccServerConfig_lambdaFunctionIdentityProviderType(rName string, forceDestroy bool) string { return acctest.ConfigCompose( acctest.ConfigLambdaBase(rName, rName, rName), @@ -2088,7 +2143,7 @@ resource "aws_lambda_function" "test" { function_name = %[1]q role = aws_iam_role.iam_for_lambda.arn handler = "index.handler" - runtime = "nodejs14.x" + runtime = "nodejs20.x" } resource "aws_transfer_server" "test" { diff --git a/internal/service/transfer/transfer_test.go b/internal/service/transfer/transfer_test.go index de67d78bb3f..06ab4673cbf 100644 --- a/internal/service/transfer/transfer_test.go +++ b/internal/service/transfer/transfer_test.go @@ -41,6 +41,7 @@ func TestAccTransfer_serial(t *testing.T) { "LambdaFunction": testAccServer_lambdaFunction, "Protocols": testAccServer_protocols, "ProtocolDetails": testAccServer_protocolDetails, + "S3StorageOptions": testAccServer_s3StorageOptions, "SecurityPolicy": testAccServer_securityPolicy, "SecurityPolicyFIPS": testAccServer_securityPolicyFIPS, "StructuredLogDestinations": testAccServer_structuredLogDestinations, diff --git a/website/docs/r/transfer_server.html.markdown b/website/docs/r/transfer_server.html.markdown index 3b81313ccb3..bf114fe8316 100644 --- a/website/docs/r/transfer_server.html.markdown +++ b/website/docs/r/transfer_server.html.markdown @@ -132,7 +132,7 @@ This resource supports the following arguments: * `SFTP`: File transfer over SSH * `FTPS`: File transfer with TLS encryption * `FTP`: Unencrypted file transfer -* `endpoint_details` - (Optional) The virtual private cloud (VPC) endpoint settings that you want to configure for your SFTP server. Fields documented below. +* `endpoint_details` - (Optional) The virtual private cloud (VPC) endpoint settings that you want to configure for your SFTP server. See [`endpoint_details` block](#endpoint_details-block) below for details. * `endpoint_type` - (Optional) The type of endpoint that you want your SFTP server connect to. If you connect to a `VPC` (or `VPC_ENDPOINT`), your SFTP server isn't accessible over the public internet. If you want to connect your SFTP server via public internet, set `PUBLIC`. Defaults to `PUBLIC`. * `invocation_role` - (Optional) Amazon Resource Name (ARN) of the IAM role used to authenticate the user account with an `identity_provider_type` of `API_GATEWAY`. * `host_key` - (Optional) RSA, ECDSA, or ED25519 private key (e.g., as generated by the `ssh-keygen -t rsa -b 2048 -N "" -m PEM -f my-new-server-key`, `ssh-keygen -t ecdsa -b 256 -N "" -m PEM -f my-new-server-key` or `ssh-keygen -t ed25519 -N "" -f my-new-server-key` commands). @@ -144,7 +144,8 @@ This resource supports the following arguments: * `force_destroy` - (Optional) A boolean that indicates all users associated with the server should be deleted so that the Server can be destroyed without error. The default value is `false`. This option only applies to servers configured with a `SERVICE_MANAGED` `identity_provider_type`. * `post_authentication_login_banner`- (Optional) Specify a string to display when users connect to a server. This string is displayed after the user authenticates. The SFTP protocol does not support post-authentication display banners. * `pre_authentication_login_banner`- (Optional) Specify a string to display when users connect to a server. This string is displayed before the user authenticates. -* `protocol_details`- (Optional) The protocol settings that are configured for your server. +* `protocol_details`- (Optional) The protocol settings that are configured for your server. See [`protocol_details` block](#protocol_details-block) below for details. +* `s3_storage_options`- (Optional) Specifies whether or not performance for your Amazon S3 directories is optimized. This is disabled by default. See [`s3_storage_options` block](#s3_storage_options-block) below for details. * `security_policy_name` - (Optional) Specifies the name of the security policy that is attached to the server. Default value is: `TransferSecurityPolicy-2018-11`. The available values are: * `TransferSecurityPolicy-2024-01` * `TransferSecurityPolicy-2023-05` @@ -158,9 +159,11 @@ This resource supports the following arguments: * `TransferSecurityPolicy-PQ-SSH-FIPS-Experimental-2023-04` * `structured_log_destinations` - (Optional) A set of ARNs of destinations that will receive structured logs from the transfer server such as CloudWatch Log Group ARNs. If provided this enables the transfer server to emit structured logs to the specified locations. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `workflow_details` - (Optional) Specifies the workflow details. See Workflow Details below. +* `workflow_details` - (Optional) Specifies the workflow details. See [`workflow_details` block](#workflow_details-block) below for details. -### Endpoint Details +### `endpoint_details` block + +The `endpoint_details` configuration block supports the following arguments: * `address_allocation_ids` - (Optional) A list of address allocation IDs that are required to attach an Elastic IP address to your SFTP server's endpoint. This property can only be used when `endpoint_type` is set to `VPC`. * `security_group_ids` - (Optional) A list of security groups IDs that are available to attach to your server's endpoint. If no security groups are specified, the VPC's default security groups are automatically assigned to your endpoint. This property can only be used when `endpoint_type` is set to `VPC`. @@ -168,22 +171,43 @@ This resource supports the following arguments: * `vpc_endpoint_id` - (Optional) The ID of the VPC endpoint. This property can only be used when `endpoint_type` is set to `VPC_ENDPOINT` * `vpc_id` - (Optional) The VPC ID of the virtual private cloud in which the SFTP server's endpoint will be hosted. This property can only be used when `endpoint_type` is set to `VPC`. -### Protocol Details +### `protocol_details` block + +THe `protocol_details` configuration block supports the following arguments: * `as2_transports` - (Optional) Indicates the transport method for the AS2 messages. Currently, only `HTTP` is supported. * `passive_ip` - (Optional) Indicates passive mode, for FTP and FTPS protocols. Enter a single IPv4 address, such as the public IP address of a firewall, router, or load balancer. * `set_stat_option` - (Optional) Use to ignore the error that is generated when the client attempts to use `SETSTAT` on a file you are uploading to an S3 bucket. Valid values: `DEFAULT`, `ENABLE_NO_OP`. * `tls_session_resumption_mode` - (Optional) A property used with Transfer Family servers that use the FTPS protocol. Provides a mechanism to resume or share a negotiated secret key between the control and data connection for an FTPS session. Valid values: `DISABLED`, `ENABLED`, `ENFORCED`. - -### Workflow Details -* `on_upload` - (Optional) A trigger that starts a workflow: the workflow begins to execute after a file is uploaded. See Workflow Detail below. -* `on_partial_upload` - (Optional) A trigger that starts a workflow if a file is only partially uploaded. See Workflow Detail below. +### `s3_storage_options` block + +The `s3_storage_options` configuration block supports the following arguments: + +* `directory_listing_optimization` - (Optional) Specifies whether or not performance for your Amazon S3 directories is optimized. Valid values are `DISABLED`, `ENABLED`. + + By default, home directory mappings have a `TYPE` of `DIRECTORY`. If you enable this option, you would then need to explicitly set the `HomeDirectoryMapEntry` Type to `FILE` if you want a mapping to have a file target. See [Using logical directories to simplify your Transfer Family directory structures](https://docs.aws.amazon.com/transfer/latest/userguide/logical-dir-mappings.html) for details. + +### `workflow_details` block + +The `workflow_details` configuration block supports the following arguments: + +* `on_upload` - (Optional) A trigger that starts a workflow: the workflow begins to execute after a file is uploaded. See [`on_upload` block](#on_upload-block) below for details. +* `on_partial_upload` - (Optional) A trigger that starts a workflow if a file is only partially uploaded. See Workflow Detail below. See [`on_partial_upload` block](#on_partial_upload-block) below for details. + +#### `on_upload` block + +The `on_upload` configuration block supports the following arguments: + +* `execution_role` - (Required) Includes the necessary permissions for S3, EFS, and Lambda operations that Transfer can assume, so that all workflow steps can operate on the required resources. +* `workflow_id` - (Required) A unique identifier for the workflow. + +#### `on_partial_upload` block -#### Workflow Detail +The `on_partial_upload` configuration block supports the following arguments: * `execution_role` - (Required) Includes the necessary permissions for S3, EFS, and Lambda operations that Transfer can assume, so that all workflow steps can operate on the required resources. -* `workflow_id` - (Required) A unique identifier for the workflow. +* `workflow_id` - (Required) A unique identifier for the workflow. ## Attribute Reference