diff --git a/CHANGELOG.md b/CHANGELOG.md index 9aff376e7d2..2c05a55f16d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +Release v1.54.13 (2024-07-02) +=== + +### Service Client Updates +* `service/ec2`: Updates service API and documentation + * Documentation updates for Elastic Compute Cloud (EC2). +* `service/fms`: Updates service API +* `service/s3`: Updates service API, documentation, and examples + * Added response overrides to Head Object requests. + Release v1.54.12 (2024-07-01) === diff --git a/aws/version.go b/aws/version.go index c0219fd8f6d..f5727092231 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.54.12" +const SDKVersion = "1.54.13" diff --git a/models/apis/ec2/2016-11-15/api-2.json b/models/apis/ec2/2016-11-15/api-2.json index a1e569d1b9f..3ee66003f4f 100755 --- a/models/apis/ec2/2016-11-15/api-2.json +++ b/models/apis/ec2/2016-11-15/api-2.json @@ -25182,6 +25182,7 @@ "HostTenancy":{ "type":"string", "enum":[ + "default", "dedicated", "host" ] diff --git a/models/apis/ec2/2016-11-15/docs-2.json b/models/apis/ec2/2016-11-15/docs-2.json index 902cc703ccf..304e5cc1da2 100755 --- a/models/apis/ec2/2016-11-15/docs-2.json +++ b/models/apis/ec2/2016-11-15/docs-2.json @@ -1930,7 +1930,7 @@ "BlobAttributeValue": { "base": null, "refs": { - "ModifyInstanceAttributeRequest$UserData": "

Changes the instance's user data to the specified value. If you are using an Amazon Web Services SDK or command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text.

" + "ModifyInstanceAttributeRequest$UserData": "

Changes the instance's user data to the specified value. User data must be base64-encoded. Depending on the tool or SDK that you're using, the base64-encoding might be performed for you. For more information, see Work with instance user data.

" } }, "BlockDeviceMapping": { @@ -2064,7 +2064,7 @@ "CreateImageRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "CreateImageRequest$NoReboot": "

Indicates whether or not the instance should be automatically rebooted before creating the image. Specify one of the following values:

Default: false

", "CreateInstanceConnectEndpointRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", - "CreateInstanceConnectEndpointRequest$PreserveClientIp": "

Indicates whether your client's IP address is preserved as the source. The value is true or false.

Default: true

", + "CreateInstanceConnectEndpointRequest$PreserveClientIp": "

Indicates whether the client IP address is preserved as the source. The following are the possible values.

Default: false

", "CreateInstanceEventWindowRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "CreateInternetGatewayRequest$DryRun": "

Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", "CreateIpamPoolRequest$DryRun": "

A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -3046,7 +3046,7 @@ "ByoipCidrState": { "base": null, "refs": { - "ByoipCidr$State": "

The state of the address pool.

" + "ByoipCidr$State": "

The state of the address range.

" } }, "CancelBatchErrorCode": { @@ -12289,10 +12289,10 @@ "InstanceNetworkInterfaceSpecification$Ipv6PrefixCount": "

The number of IPv6 delegated prefixes to be automatically assigned to the network interface. You cannot use this option if you use the Ipv6Prefix option.

", "InstanceRequirements$SpotMaxPricePercentageOverLowestPrice": "

[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.

This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

Only one of SpotMaxPricePercentageOverLowestPrice or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999.

Default: 100

", "InstanceRequirements$OnDemandMaxPricePercentageOverLowestPrice": "

[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

To turn off price protection, specify a high value, such as 999999.

This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.

Default: 20

", - "InstanceRequirements$MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": "

[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.

Only one of SpotMaxPricePercentageOverLowestPrice or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999.

", + "InstanceRequirements$MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": "

[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.

Only one of SpotMaxPricePercentageOverLowestPrice or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999.

", "InstanceRequirementsRequest$SpotMaxPricePercentageOverLowestPrice": "

[Price protection] The price protection threshold for Spot Instances, as a percentage higher than an identified Spot price. The identified Spot price is the Spot price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified Spot price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose Spot price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.

This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

Only one of SpotMaxPricePercentageOverLowestPrice or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999.

Default: 100

", "InstanceRequirementsRequest$OnDemandMaxPricePercentageOverLowestPrice": "

[Price protection] The price protection threshold for On-Demand Instances, as a percentage higher than an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

To indicate no price protection threshold, specify a high value, such as 999999.

This parameter is not supported for GetSpotPlacementScores and GetInstanceTypesFromInstanceRequirements.

If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price.

Default: 20

", - "InstanceRequirementsRequest$MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": "

[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.

Only one of SpotMaxPricePercentageOverLowestPrice or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999.

", + "InstanceRequirementsRequest$MaxSpotPriceAsPercentageOfOptimalOnDemandPrice": "

[Price protection] The price protection threshold for Spot Instances, as a percentage of an identified On-Demand price. The identified On-Demand price is the price of the lowest priced current generation C, M, or R instance type with your specified attributes. If no current generation C, M, or R instance type matches your attributes, then the identified price is from the lowest priced current generation instance types, and failing that, from the lowest priced previous generation instance types that match your attributes. When Amazon EC2 selects instance types with your attributes, it will exclude instance types whose price exceeds your specified threshold.

The parameter accepts an integer, which Amazon EC2 interprets as a percentage.

If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection threshold is based on the per vCPU or per memory price instead of the per instance price.

Only one of SpotMaxPricePercentageOverLowestPrice or MaxSpotPriceAsPercentageOfOptimalOnDemandPrice can be specified. If you don't specify either, Amazon EC2 will automatically apply optimal price protection to consistently select from a wide range of instance types. To indicate no price protection threshold for Spot Instances, meaning you want to consider all instance types that match your attributes, include one of these parameters and specify a high value, such as 999999.

", "InstanceState$Code": "

The state of the instance as a 16-bit unsigned integer.

The high byte is all of the bits between 2^8 and (2^16)-1, which equals decimal values between 256 and 65,535. These numerical values are used for internal purposes and should be ignored.

The low byte is all of the bits between 2^0 and (2^8)-1, which equals decimal values between 0 and 255.

The valid values for instance-state-code will all be in the range of the low byte and they are:

You can ignore the high byte value by zeroing out all of the bits above 2^8 or 256 in decimal.

", "InstanceUsage$UsedInstanceCount": "

The number of instances the Amazon Web Services account currently has in the Capacity Reservation.

", "IpPermission$FromPort": "

If the protocol is TCP or UDP, this is the start of the port range. If the protocol is ICMP or ICMPv6, this is the ICMP type or -1 (all ICMP types).

", @@ -18329,7 +18329,7 @@ "RunInstancesUserData": { "base": null, "refs": { - "RunInstancesRequest$UserData": "

The user data script to make available to the instance. For more information, see Run commands on your Amazon EC2 instance at launch in the Amazon EC2 User Guide. If you are using a command line tool, base64-encoding is performed for you, and you can load the text from a file. Otherwise, you must provide base64-encoded text. User data is limited to 16 KB.

" + "RunInstancesRequest$UserData": "

The user data to make available to the instance. User data must be base64-encoded. Depending on the tool or SDK that you're using, the base64-encoding might be performed for you. For more information, see Work with instance user data.

" } }, "RunScheduledInstancesRequest": { @@ -19771,7 +19771,7 @@ "CreateVerifiedAccessTrustProviderRequest$Description": "

A description for the Verified Access trust provider.

", "CreateVerifiedAccessTrustProviderRequest$ClientToken": "

A unique, case-sensitive token that you provide to ensure idempotency of your modification request. For more information, see Ensuring idempotency.

", "CreateVolumePermission$UserId": "

The ID of the Amazon Web Services account to be added or removed.

", - "CreateVolumeRequest$OutpostArn": "

The Amazon Resource Name (ARN) of the Outpost.

", + "CreateVolumeRequest$OutpostArn": "

The Amazon Resource Name (ARN) of the Outpost on which to create the volume.

If you intend to use a volume with an instance running on an outpost, then you must create the volume on the same outpost as the instance. You can't use a volume created in an Amazon Web Services Region with an instance on an Amazon Web Services outpost, or the other way around.

", "CreateVolumeRequest$ClientToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensure Idempotency.

", "CreateVpcEndpointConnectionNotificationRequest$ConnectionNotificationArn": "

The ARN of the SNS topic for the notifications.

", "CreateVpcEndpointConnectionNotificationRequest$ClientToken": "

Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.

", @@ -23932,7 +23932,7 @@ "refs": { "DescribeVolumeStatusRequest$VolumeIds": "

The IDs of the volumes.

Default: Describes all your volumes.

", "DescribeVolumesModificationsRequest$VolumeIds": "

The IDs of the volumes.

", - "DescribeVolumesRequest$VolumeIds": "

The volume IDs.

", + "DescribeVolumesRequest$VolumeIds": "

The volume IDs. If not specified, then all volumes are included in the response.

", "InstanceSpecification$ExcludeDataVolumeIds": "

The IDs of the data (non-root) volumes to exclude from the multi-volume snapshot set. If you specify the ID of the root volume, the request fails. To exclude the root volume, use ExcludeBootVolume.

You can specify up to 40 volume IDs per request.

" } }, diff --git a/models/apis/fms/2018-01-01/api-2.json b/models/apis/fms/2018-01-01/api-2.json index 4f729cccb45..c9bfaa4df1b 100644 --- a/models/apis/fms/2018-01-01/api-2.json +++ b/models/apis/fms/2018-01-01/api-2.json @@ -11,7 +11,8 @@ "serviceId":"FMS", "signatureVersion":"v4", "targetPrefix":"AWSFMS_20180101", - "uid":"fms-2018-01-01" + "uid":"fms-2018-01-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AssociateAdminAccount":{ @@ -1741,7 +1742,7 @@ }, "ManagedServiceData":{ "type":"string", - "max":10000, + "max":30000, "min":1, "pattern":"^((?!\\\\[nr]).)+" }, diff --git a/models/apis/s3/2006-03-01/api-2.json b/models/apis/s3/2006-03-01/api-2.json index cf7aa8e0df0..840a222c1b5 100644 --- a/models/apis/s3/2006-03-01/api-2.json +++ b/models/apis/s3/2006-03-01/api-2.json @@ -11,7 +11,8 @@ "serviceFullName":"Amazon Simple Storage Service", "serviceId":"S3", "signatureVersion":"s3", - "uid":"s3-2006-03-01" + "uid":"s3-2006-03-01", + "auth":["aws.auth#sigv4"] }, "operations":{ "AbortMultipartUpload":{ @@ -1301,7 +1302,8 @@ }, "staticContextParams":{ "UseObjectLambdaEndpoint":{"value":true} - } + }, + "unsignedPayload":true } }, "shapes":{ @@ -4819,6 +4821,36 @@ "location":"header", "locationName":"Range" }, + "ResponseCacheControl":{ + "shape":"ResponseCacheControl", + "location":"querystring", + "locationName":"response-cache-control" + }, + "ResponseContentDisposition":{ + "shape":"ResponseContentDisposition", + "location":"querystring", + "locationName":"response-content-disposition" + }, + "ResponseContentEncoding":{ + "shape":"ResponseContentEncoding", + "location":"querystring", + "locationName":"response-content-encoding" + }, + "ResponseContentLanguage":{ + "shape":"ResponseContentLanguage", + "location":"querystring", + "locationName":"response-content-language" + }, + "ResponseContentType":{ + "shape":"ResponseContentType", + "location":"querystring", + "locationName":"response-content-type" + }, + "ResponseExpires":{ + "shape":"ResponseExpires", + "location":"querystring", + "locationName":"response-expires" + }, "VersionId":{ "shape":"ObjectVersionId", "location":"querystring", diff --git a/models/apis/s3/2006-03-01/docs-2.json b/models/apis/s3/2006-03-01/docs-2.json index 7c38f97c478..b6167257243 100644 --- a/models/apis/s3/2006-03-01/docs-2.json +++ b/models/apis/s3/2006-03-01/docs-2.json @@ -4,7 +4,7 @@ "operations": { "AbortMultipartUpload": "

This operation aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.

To verify that all parts have been removed and prevent getting charged for the part storage, you should call the ListParts API operation and ensure that the parts list is empty.

Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

Permissions
HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following operations are related to AbortMultipartUpload:

", "CompleteMultipartUpload": "

Completes a multipart upload by assembling previously uploaded parts.

You first initiate the multipart upload and then upload all parts using the UploadPart operation or the UploadPartCopy operation. After successfully uploading all relevant parts of an upload, you call this CompleteMultipartUpload operation to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the CompleteMultipartUpload request, you must provide the parts list and ensure that the parts list is complete. The CompleteMultipartUpload API operation concatenates the parts that you provide in the list. For each part in the list, you must provide the PartNumber value and the ETag value that are returned after that part was uploaded.

The processing of a CompleteMultipartUpload request could take several minutes to finalize. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. A request could fail after the initial 200 OK response has been sent. This means that a 200 OK response can contain either a success or an error. The error response might be embedded in the 200 OK response. If you call this API operation directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).

Note that if CompleteMultipartUpload fails, applications should be prepared to retry any failed requests (including 500 error responses). For more information, see Amazon S3 Error Best Practices.

You can't use Content-Type: application/x-www-form-urlencoded for the CompleteMultipartUpload requests. Also, if you don't provide a Content-Type header, CompleteMultipartUpload can still return a 200 OK response.

For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.

Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

Permissions
Special errors
HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following operations are related to CompleteMultipartUpload:

", - "CopyObject": "

Creates a copy of an object that is already stored in Amazon S3.

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.

You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.

Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

Authentication and authorization

All CopyObject requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication.

Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject API operation, instead of using the temporary security credentials through the CreateSession API operation.

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

Permissions

You must have read access to the source object and write access to the destination bucket.

Response and special errors

When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length. You always need to read the entire response body to check if the copy succeeds. to keep the connection alive while we copy the data.

Charge

The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following operations are related to CopyObject:

", + "CopyObject": "

Creates a copy of an object that is already stored in Amazon S3.

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.

You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.

Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

Authentication and authorization

All CopyObject requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication.

Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject API operation, instead of using the temporary security credentials through the CreateSession API operation.

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

Permissions

You must have read access to the source object and write access to the destination bucket.

Response and special errors

When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length. You always need to read the entire response body to check if the copy succeeds.

Charge

The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following operations are related to CopyObject:

", "CreateBucket": "

This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts bucket, see CreateBucket .

Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.

There are two types of buckets: general purpose buckets and directory buckets. For more information about these bucket types, see Creating, configuring, and working with Amazon S3 buckets in the Amazon S3 User Guide.

Permissions
HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.

The following operations are related to CreateBucket:

", "CreateMultipartUpload": "

This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request. For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide.

After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stops charging you for storing them only after you either complete or abort a multipart upload.

If you have configured a lifecycle rule to abort incomplete multipart uploads, the created multipart upload must be completed within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration.

Request signing

For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4) in the Amazon S3 User Guide.

Permissions
Encryption
HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following operations are related to CreateMultipartUpload:

", "CreateSession": "

Creates a session that establishes temporary security credentials to support fast authentication and authorization for the Zonal endpoint APIs on directory buckets. For more information about Zonal endpoint APIs that include the Availability Zone in the request endpoint, see S3 Express One Zone APIs in the Amazon S3 User Guide.

To make Zonal endpoint API requests on a directory bucket, use the CreateSession API operation. Specifically, you grant s3express:CreateSession permission to a bucket in a bucket policy or an IAM identity-based policy. Then, you use IAM credentials to make the CreateSession API request on the bucket, which returns temporary security credentials that include the access key ID, secret access key, session token, and expiration. These credentials have associated permissions to access the Zonal endpoint APIs. After the session is created, you don’t need to use other policies to grant permissions to each Zonal endpoint API individually. Instead, in your Zonal endpoint API requests, you sign your requests by applying the temporary security credentials of the session to the request headers and following the SigV4 protocol for authentication. You also apply the session token to the x-amz-s3session-token request header for authorization. Temporary security credentials are scoped to the bucket and expire after 5 minutes. After the expiration time, any calls that you make with those credentials will fail. You must use IAM credentials again to make a CreateSession API request that generates a new set of temporary credentials for use. Temporary credentials cannot be extended or refreshed beyond the original specified interval.

If you use Amazon Web Services SDKs, SDKs handle the session token refreshes automatically to avoid service interruptions when a session expires. We recommend that you use the Amazon Web Services SDKs to initiate and manage requests to the CreateSession API. For more information, see Performance guidelines and design patterns in the Amazon S3 User Guide.

Permissions

To obtain temporary security credentials, you must create a bucket policy or an IAM identity-based policy that grants s3express:CreateSession permission to the bucket. In a policy, you can have the s3express:SessionMode condition key to control who can create a ReadWrite or ReadOnly session. For more information about ReadWrite or ReadOnly sessions, see x-amz-create-session-mode . For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.

To grant cross-account access to Zonal endpoint APIs, the bucket policy should also grant both accounts the s3express:CreateSession permission.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

", @@ -23,7 +23,7 @@ "DeleteBucketWebsite": "

This operation is not supported by directory buckets.

This action removes the website configuration for a bucket. Amazon S3 returns a 200 OK response upon successfully deleting a website configuration on the specified bucket. You will get a 200 OK response if the website configuration you are trying to delete does not exist on the bucket. Amazon S3 returns a 404 response if the bucket specified in the request does not exist.

This DELETE action requires the S3:DeleteBucketWebsite permission. By default, only the bucket owner can delete the website configuration attached to a bucket. However, bucket owners can grant other users permission to delete the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite permission.

For more information about hosting websites, see Hosting Websites on Amazon S3.

The following operations are related to DeleteBucketWebsite:

", "DeleteObject": "

Removes an object from a bucket. The behavior depends on the bucket's versioning state:

To remove a specific version, you must use the versionId query parameter. Using this query parameter permanently deletes the version. If the object deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker to true.

If the object you want to delete is in a bucket where the bucket versioning configuration is MFA Delete enabled, you must include the x-amz-mfa request header in the DELETE versionId request. Requests that include x-amz-mfa must use HTTPS. For more information about MFA Delete, see Using MFA Delete in the Amazon S3 User Guide. To see sample requests that use versioning, see Sample Request.

Directory buckets - MFA delete is not supported by directory buckets.

You can delete objects by explicitly calling DELETE Object or calling (PutBucketLifecycle) to enable Amazon S3 to remove them for you. If you want to block users or accounts from removing or deleting objects from your bucket, you must deny them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration actions.

Directory buckets - S3 Lifecycle is not supported by directory buckets.

Permissions
HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following action is related to DeleteObject:

", "DeleteObjectTagging": "

This operation is not supported by directory buckets.

Removes the entire tag set from the specified object. For more information about managing object tags, see Object Tagging.

To use this operation, you must have permission to perform the s3:DeleteObjectTagging action.

To delete tags of a specific object version, add the versionId query parameter in the request. You will need permission for the s3:DeleteObjectVersionTagging action.

The following operations are related to DeleteObjectTagging:

", - "DeleteObjects": "

This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.

The request can contain a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.

The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion in a quiet mode, the operation does not return any information about the delete in the response body.

When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete in the Amazon S3 User Guide.

Directory buckets - MFA delete is not supported by directory buckets.

Permissions
Content-MD5 request header
HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following operations are related to DeleteObjects:

", + "DeleteObjects": "

This operation enables you to delete multiple objects from a bucket using a single HTTP request. If you know the object keys that you want to delete, then this operation provides a suitable alternative to sending individual delete requests, reducing per-request overhead.

The request can contain a list of up to 1000 keys that you want to delete. In the XML, you provide the object key names, and optionally, version IDs if you want to delete a specific version of the object from a versioning-enabled bucket. For each key, Amazon S3 performs a delete operation and returns the result of that delete, success or failure, in the response. Note that if the object specified in the request is not found, Amazon S3 returns the result as deleted.

The operation supports two modes for the response: verbose and quiet. By default, the operation uses verbose mode in which the response includes the result of deletion of each key in your request. In quiet mode the response includes only keys where the delete operation encountered an error. For a successful deletion in a quiet mode, the operation does not return any information about the delete in the response body.

When performing this action on an MFA Delete enabled bucket, that attempts to delete any versioned objects, you must include an MFA token. If you do not provide one, the entire request will fail, even if there are non-versioned objects you are trying to delete. If you provide an invalid token, whether there are versioned keys in the request or not, the entire Multi-Object Delete request will fail. For information about MFA Delete, see MFA Delete in the Amazon S3 User Guide.

Directory buckets - MFA delete is not supported by directory buckets.

Permissions
Content-MD5 request header
HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following operations are related to DeleteObjects:

", "DeletePublicAccessBlock": "

This operation is not supported by directory buckets.

Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

The following operations are related to DeletePublicAccessBlock:

", "GetBucketAccelerateConfiguration": "

This operation is not supported by directory buckets.

This implementation of the GET action uses the accelerate subresource to return the Transfer Acceleration state of a bucket, which is either Enabled or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that enables you to perform faster data transfers to and from Amazon S3.

To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to your Amazon S3 Resources in the Amazon S3 User Guide.

You set the Transfer Acceleration state of an existing bucket to Enabled or Suspended by using the PutBucketAccelerateConfiguration operation.

A GET accelerate request does not return a state value for a bucket that has no transfer acceleration state. A bucket has no Transfer Acceleration state if a state has never been set on the bucket.

For more information about transfer acceleration, see Transfer Acceleration in the Amazon S3 User Guide.

The following operations are related to GetBucketAccelerateConfiguration:

", "GetBucketAcl": "

This operation is not supported by directory buckets.

This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket. To use GET to return the ACL of the bucket, you must have the READ_ACP access to the bucket. If READ_ACP permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.

When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.

When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. For more information about InvalidAccessPointAliasError, see List of Error Codes.

If your bucket uses the bucket owner enforced setting for S3 Object Ownership, requests to read ACLs are still supported and return the bucket-owner-full-control ACL with the owner being the account that created the bucket. For more information, see Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.

The following operations are related to GetBucketAcl:

", @@ -99,7 +99,7 @@ "RestoreObject": "

This operation is not supported by directory buckets.

Restores an archived copy of an object back into Amazon S3

This functionality is not supported for Amazon S3 on Outposts.

This action performs the following types of requests:

For more information about the S3 structure in the request body, see the following:

Permissions

To use this operation, you must have permissions to perform the s3:RestoreObject action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

Restoring objects

Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first initiate a restore request, and then wait until a temporary copy of the object is available. If you want a permanent copy of the object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you must restore the object for the duration (number of days) that you specify. For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier.

To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.

When restoring an archived object, you can specify one of the following data access tier options in the Tier element of the request body:

For more information about archive retrieval options and provisioned capacity for Expedited data access, see Restoring Archived Objects in the Amazon S3 User Guide.

You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon S3 User Guide.

To get the status of object restoration, you can send a HEAD request. Operations return the x-amz-restore header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide.

After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.

If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User Guide.

Responses

A successful action returns either the 200 OK or 202 Accepted status code.

The following operations are related to RestoreObject:

", "SelectObjectContent": "

This operation is not supported by directory buckets.

This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.

This functionality is not supported for Amazon S3 on Outposts.

For more information about Amazon S3 Select, see Selecting Content from Objects and SELECT Command in the Amazon S3 User Guide.

Permissions

You must have the s3:GetObject permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon S3 User Guide.

Object Data Formats

You can use Amazon S3 Select to query objects that have the following format properties:

Working with the Response Body

Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding header with chunked as its value in the response. For more information, see Appendix: SelectObjectContent Response.

GetObject Support

The SelectObjectContent action does not support the following GetObject functionality. For more information, see GetObject.

Special Errors

For a list of special errors for this operation, see List of SELECT Object Content Error Codes

The following operations are related to SelectObjectContent:

", "UploadPart": "

Uploads a part in a multipart upload.

In this operation, you provide new data as a part of an object in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.

You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier that you must include in your upload part request.

Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten.

For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.

After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.

For more information on multipart uploads, go to Multipart Upload Overview in the Amazon S3 User Guide .

Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

Permissions
Data integrity

General purpose bucket - To ensure that data is not corrupted traversing the network, specify the Content-MD5 header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error. If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 header as a checksum instead of Content-MD5. For more information see Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4).

Directory buckets - MD5 is not supported by directory buckets. You can use checksum algorithms to check object integrity.

Encryption

For more information, see Using Server-Side Encryption in the Amazon S3 User Guide.

Special errors
HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following operations are related to UploadPart:

", - "UploadPartCopy": "

Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source in your request. To specify a byte range, you add the request header x-amz-copy-source-range in your request.

For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.

Instead of copying data from an existing object as part data, you might use the UploadPart action to upload new data as a part of an object in your request.

You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request.

For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide.

Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

Authentication and authorization

All UploadPartCopy requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication.

Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy API operation, instead of using the temporary security credentials through the CreateSession API operation.

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

Permissions

You must have READ access to the source object and WRITE access to the destination bucket.

Encryption
Special errors
HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following operations are related to UploadPartCopy:

", + "UploadPartCopy": "

Uploads a part by copying data from an existing object as data source. To specify the data source, you add the request header x-amz-copy-source in your request. To specify a byte range, you add the request header x-amz-copy-source-range in your request.

For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.

Instead of copying data from an existing object as part data, you might use the UploadPart action to upload new data as a part of an object in your request.

You must initiate a multipart upload before you can upload any part. In response to your initiate request, Amazon S3 returns the upload ID, a unique identifier that you must include in your upload part request.

For conceptual information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide. For information about copying objects using a single atomic action vs. a multipart upload, see Operations on Objects in the Amazon S3 User Guide.

Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide.

Authentication and authorization

All UploadPartCopy requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication.

Directory buckets - You must use IAM credentials to authenticate and authorize your access to the UploadPartCopy API operation, instead of using the temporary security credentials through the CreateSession API operation.

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

Permissions

You must have READ access to the source object and WRITE access to the destination bucket.

Encryption
Special errors
HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com.

The following operations are related to UploadPartCopy:

", "WriteGetObjectResponse": "

This operation is not supported by directory buckets.

Passes transformed objects to a GetObject operation when using Object Lambda access points. For information about Object Lambda access points, see Transforming objects with Object Lambda access points in the Amazon S3 User Guide.

This operation supports metadata that can be returned by GetObject, in addition to RequestRoute, RequestToken, StatusCode, ErrorCode, and ErrorMessage. The GetObject response metadata is supported so that the WriteGetObjectResponse caller, typically an Lambda function, can provide the same metadata when it internally invokes GetObject. When WriteGetObjectResponse is called by a customer-owned Lambda function, the metadata returned to the end user GetObject call might differ from what Amazon S3 would normally return.

You can include any number of metadata headers. When including a metadata header, it should be prefaced with x-amz-meta. For example, x-amz-meta-my-custom-header: MyCustomValue. The primary use case for this is to forward GetObject metadata.

Amazon Web Services provides some prebuilt Lambda functions that you can use with S3 Object Lambda to detect and redact personally identifiable information (PII) and decompress S3 objects. These Lambda functions are available in the Amazon Web Services Serverless Application Repository, and can be selected through the Amazon Web Services Management Console when you create your Object Lambda access point.

Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically detects personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.

Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically redacts personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.

Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is equipped to decompress objects stored in S3 in one of six compressed file formats including bzip2, gzip, snappy, zlib, zstandard and ZIP.

For information on how to view and use these functions, see Using Amazon Web Services built Lambda functions in the Amazon S3 User Guide.

" }, "shapes": { @@ -1404,10 +1404,10 @@ "ListMultipartUploadsRequest$EncodingType": null, "ListObjectVersionsOutput$EncodingType": "

Encoding type used by Amazon S3 to encode object key names in the XML response.

If you specify the encoding-type request parameter, Amazon S3 includes this element in the response, and returns encoded key name values in the following response elements:

KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter.

", "ListObjectVersionsRequest$EncodingType": null, - "ListObjectsOutput$EncodingType": "

Encoding type used by Amazon S3 to encode object keys in the response. If using url, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png will appear as test_file%283%29.png.

", + "ListObjectsOutput$EncodingType": "

Encoding type used by Amazon S3 to encode object keys in the response. If using url, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png will appear as test_file%283%29.png.

", "ListObjectsRequest$EncodingType": null, "ListObjectsV2Output$EncodingType": "

Encoding type used by Amazon S3 to encode object key names in the XML response.

If you specify the encoding-type request parameter, Amazon S3 includes this element in the response, and returns encoded key name values in the following response elements:

Delimiter, Prefix, Key, and StartAfter.

", - "ListObjectsV2Request$EncodingType": "

Encoding type used by Amazon S3 to encode object keys in the response. If using url, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png will appear as test_file%283%29.png.

" + "ListObjectsV2Request$EncodingType": "

Encoding type used by Amazon S3 to encode object keys in the response. If using url, non-ASCII characters used in an object's key name will be URL encoded. For example, the object test_file(3).png will appear as test_file%283%29.png.

" } }, "Encryption": { @@ -3784,37 +3784,43 @@ "ResponseCacheControl": { "base": null, "refs": { - "GetObjectRequest$ResponseCacheControl": "

Sets the Cache-Control header of the response.

" + "GetObjectRequest$ResponseCacheControl": "

Sets the Cache-Control header of the response.

", + "HeadObjectRequest$ResponseCacheControl": "

Sets the Cache-Control header of the response.

" } }, "ResponseContentDisposition": { "base": null, "refs": { - "GetObjectRequest$ResponseContentDisposition": "

Sets the Content-Disposition header of the response.

" + "GetObjectRequest$ResponseContentDisposition": "

Sets the Content-Disposition header of the response.

", + "HeadObjectRequest$ResponseContentDisposition": "

Sets the Content-Disposition header of the response.

" } }, "ResponseContentEncoding": { "base": null, "refs": { - "GetObjectRequest$ResponseContentEncoding": "

Sets the Content-Encoding header of the response.

" + "GetObjectRequest$ResponseContentEncoding": "

Sets the Content-Encoding header of the response.

", + "HeadObjectRequest$ResponseContentEncoding": "

Sets the Content-Encoding header of the response.

" } }, "ResponseContentLanguage": { "base": null, "refs": { - "GetObjectRequest$ResponseContentLanguage": "

Sets the Content-Language header of the response.

" + "GetObjectRequest$ResponseContentLanguage": "

Sets the Content-Language header of the response.

", + "HeadObjectRequest$ResponseContentLanguage": "

Sets the Content-Language header of the response.

" } }, "ResponseContentType": { "base": null, "refs": { - "GetObjectRequest$ResponseContentType": "

Sets the Content-Type header of the response.

" + "GetObjectRequest$ResponseContentType": "

Sets the Content-Type header of the response.

", + "HeadObjectRequest$ResponseContentType": "

Sets the Content-Type header of the response.

" } }, "ResponseExpires": { "base": null, "refs": { - "GetObjectRequest$ResponseExpires": "

Sets the Expires header of the response.

" + "GetObjectRequest$ResponseExpires": "

Sets the Expires header of the response.

", + "HeadObjectRequest$ResponseExpires": "

Sets the Expires header of the response.

" } }, "Restore": { @@ -4099,7 +4105,7 @@ "SessionCredentials": { "base": "

The established temporary security credentials of the session.

Directory buckets - These session credentials are only supported for the authentication and authorization of Zonal endpoint APIs on directory buckets.

", "refs": { - "CreateSessionOutput$Credentials": "

The established temporary security credentials for the created session..

" + "CreateSessionOutput$Credentials": "

The established temporary security credentials for the created session.

" } }, "SessionExpiration": { @@ -4225,7 +4231,7 @@ "Suffix": { "base": null, "refs": { - "IndexDocument$Suffix": "

A suffix that is appended to a request that is for a directory on the website endpoint (for example,if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html) The suffix must not be empty and must not include a slash character.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" + "IndexDocument$Suffix": "

A suffix that is appended to a request that is for a directory on the website endpoint. (For example, if the suffix is index.html and you make a request to samplebucket/images/, the data that is returned will be for the object with the key name images/index.html.) The suffix must not be empty and must not include a slash character.

Replacement must be made for object keys containing special characters (such as carriage returns) when using XML requests. For more information, see XML related object key constraints.

" } }, "Tag": { @@ -4444,8 +4450,8 @@ "VersionCount": { "base": null, "refs": { - "NoncurrentVersionExpiration$NewerNoncurrentVersions": "

Specifies how many newer noncurrent versions must exist before Amazon S3 can perform the associated action on a given version. If there are this many more recent noncurrent versions, Amazon S3 will take the associated action. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.

", - "NoncurrentVersionTransition$NewerNoncurrentVersions": "

Specifies how many newer noncurrent versions must exist before Amazon S3 can perform the associated action on a given version. If there are this many more recent noncurrent versions, Amazon S3 will take the associated action. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.

" + "NoncurrentVersionExpiration$NewerNoncurrentVersions": "

Specifies how many noncurrent versions Amazon S3 will retain. You can specify up to 100 noncurrent versions to retain. Amazon S3 will permanently delete any additional noncurrent versions beyond the specified number to retain. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.

", + "NoncurrentVersionTransition$NewerNoncurrentVersions": "

Specifies how many noncurrent versions Amazon S3 will retain in the same storage class before transitioning objects. You can specify up to 100 noncurrent versions to retain. Amazon S3 will transition any additional noncurrent versions beyond the specified number to retain. For more information about noncurrent versions, see Lifecycle configuration elements in the Amazon S3 User Guide.

" } }, "VersionIdMarker": { diff --git a/models/apis/s3/2006-03-01/examples-1.json b/models/apis/s3/2006-03-01/examples-1.json index e2c4125fd6d..e8572583fc3 100644 --- a/models/apis/s3/2006-03-01/examples-1.json +++ b/models/apis/s3/2006-03-01/examples-1.json @@ -84,10 +84,13 @@ "CreateBucket": [ { "input": { - "Bucket": "examplebucket" + "Bucket": "examplebucket", + "CreateBucketConfiguration": { + "LocationConstraint": "eu-west-1" + } }, "output": { - "Location": "/examplebucket" + "Location": "http://examplebucket..s3.amazonaws.com/" }, "comments": { "input": { @@ -95,19 +98,16 @@ "output": { } }, - "description": "The following example creates a bucket.", - "id": "to-create-a-bucket--1472851826060", - "title": "To create a bucket " + "description": "The following example creates a bucket. The request specifies an AWS region where to create the bucket.", + "id": "to-create-a-bucket-in-a-specific-region-1483399072992", + "title": "To create a bucket in a specific region" }, { "input": { - "Bucket": "examplebucket", - "CreateBucketConfiguration": { - "LocationConstraint": "eu-west-1" - } + "Bucket": "examplebucket" }, "output": { - "Location": "http://examplebucket..s3.amazonaws.com/" + "Location": "/examplebucket" }, "comments": { "input": { @@ -115,9 +115,9 @@ "output": { } }, - "description": "The following example creates a bucket. The request specifies an AWS region where to create the bucket.", - "id": "to-create-a-bucket-in-a-specific-region-1483399072992", - "title": "To create a bucket in a specific region" + "description": "The following example creates a bucket.", + "id": "to-create-a-bucket--1472851826060", + "title": "To create a bucket " } ], "CreateMultipartUpload": [ @@ -257,8 +257,10 @@ "DeleteObject": [ { "input": { - "Bucket": "ExampleBucket", - "Key": "HappyFace.jpg" + "Bucket": "examplebucket", + "Key": "objectkey.jpg" + }, + "output": { }, "comments": { "input": { @@ -266,16 +268,14 @@ "output": { } }, - "description": "The following example deletes an object from a non-versioned bucket.", - "id": "to-delete-an-object-from-a-non-versioned-bucket-1481588533089", - "title": "To delete an object (from a non-versioned bucket)" + "description": "The following example deletes an object from an S3 bucket.", + "id": "to-delete-an-object-1472850136595", + "title": "To delete an object" }, { "input": { - "Bucket": "examplebucket", - "Key": "objectkey.jpg" - }, - "output": { + "Bucket": "ExampleBucket", + "Key": "HappyFace.jpg" }, "comments": { "input": { @@ -283,9 +283,9 @@ "output": { } }, - "description": "The following example deletes an object from an S3 bucket.", - "id": "to-delete-an-object-1472850136595", - "title": "To delete an object" + "description": "The following example deletes an object from a non-versioned bucket.", + "id": "to-delete-an-object-from-a-non-versioned-bucket-1481588533089", + "title": "To delete an object (from a non-versioned bucket)" } ], "DeleteObjectTagging": [ @@ -334,12 +334,10 @@ "Delete": { "Objects": [ { - "Key": "HappyFace.jpg", - "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" + "Key": "objectkey1" }, { - "Key": "HappyFace.jpg", - "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" + "Key": "objectkey2" } ], "Quiet": false @@ -348,12 +346,14 @@ "output": { "Deleted": [ { - "Key": "HappyFace.jpg", - "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" + "DeleteMarker": "true", + "DeleteMarkerVersionId": "A._w1z6EFiCF5uhtQMDal9JDkID9tQ7F", + "Key": "objectkey1" }, { - "Key": "HappyFace.jpg", - "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" + "DeleteMarker": "true", + "DeleteMarkerVersionId": "iOd_ORxhkKe_e8G8_oSGxt2PjsCZKlkt", + "Key": "objectkey2" } ] }, @@ -363,9 +363,9 @@ "output": { } }, - "description": "The following example deletes objects from a bucket. The request specifies object versions. S3 deletes specific object versions and returns the key and versions of deleted objects in the response.", - "id": "to-delete-multiple-object-versions-from-a-versioned-bucket-1483147087737", - "title": "To delete multiple object versions from a versioned bucket" + "description": "The following example deletes objects from a bucket. The bucket is versioned, and the request does not specify the object version to delete. In this case, all versions remain in the bucket and S3 adds a delete marker.", + "id": "to-delete-multiple-objects-from-a-versioned-bucket-1483146248805", + "title": "To delete multiple objects from a versioned bucket" }, { "input": { @@ -373,10 +373,12 @@ "Delete": { "Objects": [ { - "Key": "objectkey1" + "Key": "HappyFace.jpg", + "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" }, { - "Key": "objectkey2" + "Key": "HappyFace.jpg", + "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" } ], "Quiet": false @@ -385,14 +387,12 @@ "output": { "Deleted": [ { - "DeleteMarker": "true", - "DeleteMarkerVersionId": "A._w1z6EFiCF5uhtQMDal9JDkID9tQ7F", - "Key": "objectkey1" + "Key": "HappyFace.jpg", + "VersionId": "yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd" }, { - "DeleteMarker": "true", - "DeleteMarkerVersionId": "iOd_ORxhkKe_e8G8_oSGxt2PjsCZKlkt", - "Key": "objectkey2" + "Key": "HappyFace.jpg", + "VersionId": "2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b" } ] }, @@ -402,9 +402,9 @@ "output": { } }, - "description": "The following example deletes objects from a bucket. The bucket is versioned, and the request does not specify the object version to delete. In this case, all versions remain in the bucket and S3 adds a delete marker.", - "id": "to-delete-multiple-objects-from-a-versioned-bucket-1483146248805", - "title": "To delete multiple objects from a versioned bucket" + "description": "The following example deletes objects from a bucket. The request specifies object versions. S3 deletes specific object versions and returns the key and versions of deleted objects in the response.", + "id": "to-delete-multiple-object-versions-from-a-versioned-bucket-1483147087737", + "title": "To delete multiple object versions from a versioned bucket" } ], "GetBucketCors": [ @@ -840,20 +840,17 @@ { "input": { "Bucket": "examplebucket", - "Key": "HappyFace.jpg" + "Key": "exampleobject", + "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" }, "output": { "TagSet": [ { - "Key": "Key4", - "Value": "Value4" - }, - { - "Key": "Key3", - "Value": "Value3" + "Key": "Key1", + "Value": "Value1" } ], - "VersionId": "null" + "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" }, "comments": { "input": { @@ -861,24 +858,27 @@ "output": { } }, - "description": "The following example retrieves tag set of an object.", - "id": "to-retrieve-tag-set-of-an-object-1481833847896", - "title": "To retrieve tag set of an object" + "description": "The following example retrieves tag set of an object. The request specifies object version.", + "id": "to-retrieve-tag-set-of-a-specific-object-version-1483400283663", + "title": "To retrieve tag set of a specific object version" }, { "input": { "Bucket": "examplebucket", - "Key": "exampleobject", - "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" + "Key": "HappyFace.jpg" }, "output": { "TagSet": [ { - "Key": "Key1", - "Value": "Value1" + "Key": "Key4", + "Value": "Value4" + }, + { + "Key": "Key3", + "Value": "Value3" } ], - "VersionId": "ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI" + "VersionId": "null" }, "comments": { "input": { @@ -886,9 +886,9 @@ "output": { } }, - "description": "The following example retrieves tag set of an object. The request specifies object version.", - "id": "to-retrieve-tag-set-of-a-specific-object-version-1483400283663", - "title": "To retrieve tag set of a specific object version" + "description": "The following example retrieves tag set of an object.", + "id": "to-retrieve-tag-set-of-an-object-1481833847896", + "title": "To retrieve tag set of an object" } ], "GetObjectTorrent": [ @@ -1567,16 +1567,13 @@ "PutObject": [ { "input": { - "Body": "filetoupload", + "Body": "HappyFace.jpg", "Bucket": "examplebucket", - "Key": "exampleobject", - "ServerSideEncryption": "AES256", - "Tagging": "key1=value1&key2=value2" + "Key": "HappyFace.jpg" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "ServerSideEncryption": "AES256", - "VersionId": "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt" + "VersionId": "tpf3zF08nBplQK1XLOefGskR7mGDwcDk" }, "comments": { "input": { @@ -1584,19 +1581,22 @@ "output": { } }, - "description": "The following example uploads an object. The request specifies the optional server-side encryption option. The request also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-upload-an-object-and-specify-server-side-encryption-and-object-tags-1483398331831", - "title": "To upload an object and specify server-side encryption and object tags" + "description": "The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file syntax. S3 returns VersionId of the newly created object.", + "id": "to-upload-an-object-1481760101010", + "title": "To upload an object" }, { "input": { "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "objectkey" + "Key": "exampleobject", + "ServerSideEncryption": "AES256", + "Tagging": "key1=value1&key2=value2" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ" + "ServerSideEncryption": "AES256", + "VersionId": "Ri.vC6qVlA4dEnjgRV4ZHsHoFIjqEMNt" }, "comments": { "input": { @@ -1604,9 +1604,9 @@ "output": { } }, - "description": "The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-create-an-object-1483147613675", - "title": "To create an object." + "description": "The following example uploads an object. The request specifies the optional server-side encryption option. The request also specifies optional object tags. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-upload-an-object-and-specify-server-side-encryption-and-object-tags-1483398331831", + "title": "To upload an object and specify server-side encryption and object tags" }, { "input": { @@ -1633,14 +1633,17 @@ }, { "input": { - "Body": "c:\\HappyFace.jpg", + "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "HappyFace.jpg", - "Tagging": "key1=value1&key2=value2" + "Key": "exampleobject", + "Metadata": { + "metadata1": "value1", + "metadata2": "value2" + } }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a" + "VersionId": "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0" }, "comments": { "input": { @@ -1648,23 +1651,20 @@ "output": { } }, - "description": "The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore S3 returns version ID of the newly created object.", - "id": "to-upload-an-object-and-specify-optional-tags-1481762310955", - "title": "To upload an object and specify optional tags" + "description": "The following example creates an object. The request also specifies optional metadata. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-upload-object-and-specify-user-defined-metadata-1483396974757", + "title": "To upload object and specify user-defined metadata" }, { "input": { - "Body": "filetoupload", + "Body": "c:\\HappyFace.jpg", "Bucket": "examplebucket", - "Key": "exampleobject", - "Metadata": { - "metadata1": "value1", - "metadata2": "value2" - } + "Key": "HappyFace.jpg", + "Tagging": "key1=value1&key2=value2" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "pSKidl4pHBiNwukdbcPXAIs.sshFFOc0" + "VersionId": "psM2sYY4.o1501dSx8wMvnkOzSBB.V4a" }, "comments": { "input": { @@ -1672,9 +1672,9 @@ "output": { } }, - "description": "The following example creates an object. The request also specifies optional metadata. If the bucket is versioning enabled, S3 returns version ID in response.", - "id": "to-upload-object-and-specify-user-defined-metadata-1483396974757", - "title": "To upload object and specify user-defined metadata" + "description": "The following example uploads an object. The request specifies optional object tags. The bucket is versioned, therefore S3 returns version ID of the newly created object.", + "id": "to-upload-an-object-and-specify-optional-tags-1481762310955", + "title": "To upload an object and specify optional tags" }, { "input": { @@ -1699,13 +1699,13 @@ }, { "input": { - "Body": "HappyFace.jpg", + "Body": "filetoupload", "Bucket": "examplebucket", - "Key": "HappyFace.jpg" + "Key": "objectkey" }, "output": { "ETag": "\"6805f2cfc46c0f04559748bb039d69ae\"", - "VersionId": "tpf3zF08nBplQK1XLOefGskR7mGDwcDk" + "VersionId": "Bvq0EDKxOcXLJXNo_Lkz37eM3R4pfzyQ" }, "comments": { "input": { @@ -1713,9 +1713,9 @@ "output": { } }, - "description": "The following example uploads an object to a versioning-enabled bucket. The source file is specified using Windows file syntax. S3 returns VersionId of the newly created object.", - "id": "to-upload-an-object-1481760101010", - "title": "To upload an object" + "description": "The following example creates an object. If the bucket is versioning enabled, S3 returns version ID in response.", + "id": "to-create-an-object-1483147613675", + "title": "To create an object." } ], "PutObjectAcl": [ diff --git a/service/ec2/api.go b/service/ec2/api.go index b05879b01ac..5d51566f953 100644 --- a/service/ec2/api.go +++ b/service/ec2/api.go @@ -65051,7 +65051,30 @@ type ByoipCidr struct { // this time. NetworkBorderGroup *string `locationName:"networkBorderGroup" type:"string"` - // The state of the address pool. + // The state of the address range. + // + // * advertised: The address range is being advertised to the internet by + // Amazon Web Services. + // + // * deprovisioned: The address range is deprovisioned. + // + // * failed-deprovision: The request to deprovision the address range was + // unsuccessful. Ensure that all EIPs from the range have been deallocated + // and try again. + // + // * failed-provision: The request to provision the address range was unsuccessful. + // + // * pending-deprovision: You’ve submitted a request to deprovision an + // address range and it's pending. + // + // * pending-provision: You’ve submitted a request to provision an address + // range and it's pending. + // + // * provisioned: The address range is provisioned and can be advertised. + // The range is not currently advertised. + // + // * provisioned-not-publicly-advertisable: The address range is provisioned + // and cannot be advertised. State *string `locationName:"state" type:"string" enum:"ByoipCidrState"` // Upon success, contains the ID of the address pool. Otherwise, contains an @@ -73326,15 +73349,14 @@ type CreateInstanceConnectEndpointInput struct { // it is UnauthorizedOperation. DryRun *bool `type:"boolean"` - // Indicates whether your client's IP address is preserved as the source. The - // value is true or false. + // Indicates whether the client IP address is preserved as the source. The following + // are the possible values. // - // * If true, your client's IP address is used when you connect to a resource. + // * true - Use the client IP address as the source. // - // * If false, the elastic network interface IP address is used when you - // connect to a resource. + // * false - Use the network interface IP address as the source. // - // Default: true + // Default: false PreserveClientIp *bool `type:"boolean"` // One or more security groups to associate with the endpoint. If you don't @@ -81941,7 +81963,12 @@ type CreateVolumeInput struct { // in the Amazon EBS User Guide. MultiAttachEnabled *bool `type:"boolean"` - // The Amazon Resource Name (ARN) of the Outpost. + // The Amazon Resource Name (ARN) of the Outpost on which to create the volume. + // + // If you intend to use a volume with an instance running on an outpost, then + // you must create the volume on the same outpost as the instance. You can't + // use a volume created in an Amazon Web Services Region with an instance on + // an Amazon Web Services outpost, or the other way around. OutpostArn *string `type:"string"` // The size of the volume, in GiBs. You must specify either a snapshot ID or @@ -111423,7 +111450,7 @@ type DescribeVolumesInput struct { // from the end of the items returned by the previous request. NextToken *string `locationName:"nextToken" type:"string"` - // The volume IDs. + // The volume IDs. If not specified, then all volumes are included in the response. VolumeIds []*string `locationName:"VolumeId" locationNameList:"VolumeId" type:"list"` } @@ -138008,7 +138035,7 @@ type InstanceRequirements struct { // // The parameter accepts an integer, which Amazon EC2 interprets as a percentage. // - // If you set DesiredCapacityType to vcpu or memory-mib, the price protection + // If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection // threshold is based on the per vCPU or per memory price instead of the per // instance price. // @@ -138500,7 +138527,7 @@ type InstanceRequirementsRequest struct { // // The parameter accepts an integer, which Amazon EC2 interprets as a percentage. // - // If you set DesiredCapacityType to vcpu or memory-mib, the price protection + // If you set TargetCapacityUnitType to vcpu or memory-mib, the price protection // threshold is based on the per vCPU or per memory price instead of the per // instance price. // @@ -150273,10 +150300,10 @@ type ModifyInstanceAttributeInput struct { // a PV instance can make it unreachable. SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` - // Changes the instance's user data to the specified value. If you are using - // an Amazon Web Services SDK or command line tool, base64-encoding is performed - // for you, and you can load the text from a file. Otherwise, you must provide - // base64-encoded text. + // Changes the instance's user data to the specified value. User data must be + // base64-encoded. Depending on the tool or SDK that you're using, the base64-encoding + // might be performed for you. For more information, see Work with instance + // user data (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-add-user-data.html). UserData *BlobAttributeValue `locationName:"userData" type:"structure"` // A new value for the attribute. Use only with the kernel, ramdisk, userData, @@ -173075,11 +173102,10 @@ type RunInstancesInput struct { // To tag a resource after it has been created, see CreateTags (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateTags.html). TagSpecifications []*TagSpecification `locationName:"TagSpecification" locationNameList:"item" type:"list"` - // The user data script to make available to the instance. For more information, - // see Run commands on your Amazon EC2 instance at launch (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) - // in the Amazon EC2 User Guide. If you are using a command line tool, base64-encoding - // is performed for you, and you can load the text from a file. Otherwise, you - // must provide base64-encoded text. User data is limited to 16 KB. + // The user data to make available to the instance. User data must be base64-encoded. + // Depending on the tool or SDK that you're using, the base64-encoding might + // be performed for you. For more information, see Work with instance user data + // (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-add-user-data.html). // // UserData is a sensitive parameter and its value will be // replaced with "sensitive" in string returned by RunInstancesInput's @@ -193898,6 +193924,9 @@ func HostRecovery_Values() []string { } const ( + // HostTenancyDefault is a HostTenancy enum value + HostTenancyDefault = "default" + // HostTenancyDedicated is a HostTenancy enum value HostTenancyDedicated = "dedicated" @@ -193908,6 +193937,7 @@ const ( // HostTenancy_Values returns all elements of the HostTenancy enum func HostTenancy_Values() []string { return []string{ + HostTenancyDefault, HostTenancyDedicated, HostTenancyHost, } diff --git a/service/s3/api.go b/service/s3/api.go index f472cdc8d2b..f1fa8dcf0cf 100644 --- a/service/s3/api.go +++ b/service/s3/api.go @@ -449,7 +449,7 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // When the request is an HTTP 1.1 request, the response is chunk encoded. When // the request is not an HTTP 1.1 request, the response would not contain the // Content-Length. You always need to read the entire response body to check -// if the copy succeeds. to keep the connection alive while we copy the data. +// if the copy succeeds. // // - If the copy is successful, you receive a response with information about // the copied object. @@ -2679,7 +2679,7 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque // in your policies when your DeleteObjects request includes specific headers. // s3:DeleteObject - To delete an object from a bucket, you must always specify // the s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific -// version of an object from a versiong-enabled bucket, you must specify +// version of an object from a versioning-enabled bucket, you must specify // the s3:DeleteObjectVersion permission. // // - Directory bucket permissions - To grant access to this API operation @@ -12001,15 +12001,15 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // source object that is being copied. If the destination bucket is a general // purpose bucket, you must have the s3:PutObject permission to write the // object copy to the destination bucket. For information about permissions -// required to use the multipart upload API, see Multipart Upload and Permissions -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// required to use the multipart upload API, see Multipart upload API and +// permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) // in the Amazon S3 User Guide. // // - Directory bucket permissions - You must have permissions in a bucket // policy or an IAM identity-based policy based on the source and destination // bucket types in an UploadPartCopy operation. If the source object that // you want to copy is in a directory bucket, you must have the s3express:CreateSession -// permission in the Action element of a policy to read the object . By default, +// permission in the Action element of a policy to read the object. By default, // the session is in the ReadWrite mode. If you want to restrict the access, // you can explicitly set the s3express:SessionMode condition key to ReadOnly // on the copy source bucket. If the copy destination is a directory bucket, @@ -16940,7 +16940,7 @@ func (s CreateSessionInput) updateArnableField(v string) (interface{}, error) { type CreateSessionOutput struct { _ struct{} `type:"structure"` - // The established temporary security credentials for the created session.. + // The established temporary security credentials for the created session. // // Credentials is a required field Credentials *SessionCredentials `locationName:"Credentials" type:"structure" required:"true"` @@ -26467,6 +26467,24 @@ type HeadObjectInput struct { // This functionality is not supported for directory buckets. RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response. + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"rfc822"` + // Specifies the algorithm to use when encrypting the object (for example, AES256). // // This functionality is not supported for directory buckets. @@ -26612,6 +26630,42 @@ func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput { return s } +// SetResponseCacheControl sets the ResponseCacheControl field's value. +func (s *HeadObjectInput) SetResponseCacheControl(v string) *HeadObjectInput { + s.ResponseCacheControl = &v + return s +} + +// SetResponseContentDisposition sets the ResponseContentDisposition field's value. +func (s *HeadObjectInput) SetResponseContentDisposition(v string) *HeadObjectInput { + s.ResponseContentDisposition = &v + return s +} + +// SetResponseContentEncoding sets the ResponseContentEncoding field's value. +func (s *HeadObjectInput) SetResponseContentEncoding(v string) *HeadObjectInput { + s.ResponseContentEncoding = &v + return s +} + +// SetResponseContentLanguage sets the ResponseContentLanguage field's value. +func (s *HeadObjectInput) SetResponseContentLanguage(v string) *HeadObjectInput { + s.ResponseContentLanguage = &v + return s +} + +// SetResponseContentType sets the ResponseContentType field's value. +func (s *HeadObjectInput) SetResponseContentType(v string) *HeadObjectInput { + s.ResponseContentType = &v + return s +} + +// SetResponseExpires sets the ResponseExpires field's value. +func (s *HeadObjectInput) SetResponseExpires(v time.Time) *HeadObjectInput { + s.ResponseExpires = &v + return s +} + // SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput { s.SSECustomerAlgorithm = &v @@ -27155,9 +27209,9 @@ type IndexDocument struct { _ struct{} `type:"structure"` // A suffix that is appended to a request that is for a directory on the website - // endpoint (for example,if the suffix is index.html and you make a request - // to samplebucket/images/ the data that is returned will be for the object - // with the key name images/index.html) The suffix must not be empty and must + // endpoint. (For example, if the suffix is index.html and you make a request + // to samplebucket/images/, the data that is returned will be for the object + // with the key name images/index.html.) The suffix must not be empty and must // not include a slash character. // // Replacement must be made for object keys containing special characters (such @@ -32179,9 +32233,9 @@ func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { type NoncurrentVersionExpiration struct { _ struct{} `type:"structure"` - // Specifies how many newer noncurrent versions must exist before Amazon S3 - // can perform the associated action on a given version. If there are this many - // more recent noncurrent versions, Amazon S3 will take the associated action. + // Specifies how many noncurrent versions Amazon S3 will retain. You can specify + // up to 100 noncurrent versions to retain. Amazon S3 will permanently delete + // any additional noncurrent versions beyond the specified number to retain. // For more information about noncurrent versions, see Lifecycle configuration // elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) // in the Amazon S3 User Guide. @@ -32235,11 +32289,11 @@ func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVers type NoncurrentVersionTransition struct { _ struct{} `type:"structure"` - // Specifies how many newer noncurrent versions must exist before Amazon S3 - // can perform the associated action on a given version. If there are this many - // more recent noncurrent versions, Amazon S3 will take the associated action. - // For more information about noncurrent versions, see Lifecycle configuration - // elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) + // Specifies how many noncurrent versions Amazon S3 will retain in the same + // storage class before transitioning objects. You can specify up to 100 noncurrent + // versions to retain. Amazon S3 will transition any additional noncurrent versions + // beyond the specified number to retain. For more information about noncurrent + // versions, see Lifecycle configuration elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) // in the Amazon S3 User Guide. NewerNoncurrentVersions *int64 `type:"integer"` diff --git a/service/s3/examples_test.go b/service/s3/examples_test.go index 0deb2f82d37..197d0bb8041 100644 --- a/service/s3/examples_test.go +++ b/service/s3/examples_test.go @@ -125,12 +125,16 @@ func ExampleS3_CopyObject_shared00() { fmt.Println(result) } -// To create a bucket -// The following example creates a bucket. +// To create a bucket in a specific region +// The following example creates a bucket. The request specifies an AWS region where +// to create the bucket. func ExampleS3_CreateBucket_shared00() { svc := s3.New(session.New()) input := &s3.CreateBucketInput{ Bucket: aws.String("examplebucket"), + CreateBucketConfiguration: &s3.CreateBucketConfiguration{ + LocationConstraint: aws.String("eu-west-1"), + }, } result, err := svc.CreateBucket(input) @@ -155,16 +159,12 @@ func ExampleS3_CreateBucket_shared00() { fmt.Println(result) } -// To create a bucket in a specific region -// The following example creates a bucket. The request specifies an AWS region where -// to create the bucket. +// To create a bucket +// The following example creates a bucket. func ExampleS3_CreateBucket_shared01() { svc := s3.New(session.New()) input := &s3.CreateBucketInput{ Bucket: aws.String("examplebucket"), - CreateBucketConfiguration: &s3.CreateBucketConfiguration{ - LocationConstraint: aws.String("eu-west-1"), - }, } result, err := svc.CreateBucket(input) @@ -398,13 +398,13 @@ func ExampleS3_DeleteBucketWebsite_shared00() { fmt.Println(result) } -// To delete an object (from a non-versioned bucket) -// The following example deletes an object from a non-versioned bucket. +// To delete an object +// The following example deletes an object from an S3 bucket. func ExampleS3_DeleteObject_shared00() { svc := s3.New(session.New()) input := &s3.DeleteObjectInput{ - Bucket: aws.String("ExampleBucket"), - Key: aws.String("HappyFace.jpg"), + Bucket: aws.String("examplebucket"), + Key: aws.String("objectkey.jpg"), } result, err := svc.DeleteObject(input) @@ -425,13 +425,13 @@ func ExampleS3_DeleteObject_shared00() { fmt.Println(result) } -// To delete an object -// The following example deletes an object from an S3 bucket. +// To delete an object (from a non-versioned bucket) +// The following example deletes an object from a non-versioned bucket. func ExampleS3_DeleteObject_shared01() { svc := s3.New(session.New()) input := &s3.DeleteObjectInput{ - Bucket: aws.String("examplebucket"), - Key: aws.String("objectkey.jpg"), + Bucket: aws.String("ExampleBucket"), + Key: aws.String("HappyFace.jpg"), } result, err := svc.DeleteObject(input) @@ -510,10 +510,10 @@ func ExampleS3_DeleteObjectTagging_shared01() { fmt.Println(result) } -// To delete multiple object versions from a versioned bucket -// The following example deletes objects from a bucket. The request specifies object -// versions. S3 deletes specific object versions and returns the key and versions of -// deleted objects in the response. +// To delete multiple objects from a versioned bucket +// The following example deletes objects from a bucket. The bucket is versioned, and +// the request does not specify the object version to delete. In this case, all versions +// remain in the bucket and S3 adds a delete marker. func ExampleS3_DeleteObjects_shared00() { svc := s3.New(session.New()) input := &s3.DeleteObjectsInput{ @@ -521,12 +521,10 @@ func ExampleS3_DeleteObjects_shared00() { Delete: &s3.Delete{ Objects: []*s3.ObjectIdentifier{ { - Key: aws.String("HappyFace.jpg"), - VersionId: aws.String("2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b"), + Key: aws.String("objectkey1"), }, { - Key: aws.String("HappyFace.jpg"), - VersionId: aws.String("yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd"), + Key: aws.String("objectkey2"), }, }, Quiet: aws.Bool(false), @@ -551,10 +549,10 @@ func ExampleS3_DeleteObjects_shared00() { fmt.Println(result) } -// To delete multiple objects from a versioned bucket -// The following example deletes objects from a bucket. The bucket is versioned, and -// the request does not specify the object version to delete. In this case, all versions -// remain in the bucket and S3 adds a delete marker. +// To delete multiple object versions from a versioned bucket +// The following example deletes objects from a bucket. The request specifies object +// versions. S3 deletes specific object versions and returns the key and versions of +// deleted objects in the response. func ExampleS3_DeleteObjects_shared01() { svc := s3.New(session.New()) input := &s3.DeleteObjectsInput{ @@ -562,10 +560,12 @@ func ExampleS3_DeleteObjects_shared01() { Delete: &s3.Delete{ Objects: []*s3.ObjectIdentifier{ { - Key: aws.String("objectkey1"), + Key: aws.String("HappyFace.jpg"), + VersionId: aws.String("2LWg7lQLnY41.maGB5Z6SWW.dcq0vx7b"), }, { - Key: aws.String("objectkey2"), + Key: aws.String("HappyFace.jpg"), + VersionId: aws.String("yoz3HB.ZhCS_tKVEmIOr7qYyyAaZSKVd"), }, }, Quiet: aws.Bool(false), @@ -996,13 +996,15 @@ func ExampleS3_GetObjectAcl_shared00() { fmt.Println(result) } -// To retrieve tag set of an object -// The following example retrieves tag set of an object. +// To retrieve tag set of a specific object version +// The following example retrieves tag set of an object. The request specifies object +// version. func ExampleS3_GetObjectTagging_shared00() { svc := s3.New(session.New()) input := &s3.GetObjectTaggingInput{ - Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), + Bucket: aws.String("examplebucket"), + Key: aws.String("exampleobject"), + VersionId: aws.String("ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"), } result, err := svc.GetObjectTagging(input) @@ -1023,15 +1025,13 @@ func ExampleS3_GetObjectTagging_shared00() { fmt.Println(result) } -// To retrieve tag set of a specific object version -// The following example retrieves tag set of an object. The request specifies object -// version. +// To retrieve tag set of an object +// The following example retrieves tag set of an object. func ExampleS3_GetObjectTagging_shared01() { svc := s3.New(session.New()) input := &s3.GetObjectTaggingInput{ - Bucket: aws.String("examplebucket"), - Key: aws.String("exampleobject"), - VersionId: aws.String("ydlaNkwWm0SfKJR.T1b1fIdPRbldTYRI"), + Bucket: aws.String("examplebucket"), + Key: aws.String("HappyFace.jpg"), } result, err := svc.GetObjectTagging(input) @@ -1745,18 +1745,16 @@ func ExampleS3_PutBucketWebsite_shared00() { fmt.Println(result) } -// To upload an object and specify server-side encryption and object tags -// The following example uploads an object. The request specifies the optional server-side -// encryption option. The request also specifies optional object tags. If the bucket -// is versioning enabled, S3 returns version ID in response. +// To upload an object +// The following example uploads an object to a versioning-enabled bucket. The source +// file is specified using Windows file syntax. S3 returns VersionId of the newly created +// object. func ExampleS3_PutObject_shared00() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), - Bucket: aws.String("examplebucket"), - Key: aws.String("exampleobject"), - ServerSideEncryption: aws.String("AES256"), - Tagging: aws.String("key1=value1&key2=value2"), + Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")), + Bucket: aws.String("examplebucket"), + Key: aws.String("HappyFace.jpg"), } result, err := svc.PutObject(input) @@ -1777,15 +1775,18 @@ func ExampleS3_PutObject_shared00() { fmt.Println(result) } -// To create an object. -// The following example creates an object. If the bucket is versioning enabled, S3 -// returns version ID in response. +// To upload an object and specify server-side encryption and object tags +// The following example uploads an object. The request specifies the optional server-side +// encryption option. The request also specifies optional object tags. If the bucket +// is versioning enabled, S3 returns version ID in response. func ExampleS3_PutObject_shared01() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), - Bucket: aws.String("examplebucket"), - Key: aws.String("objectkey"), + Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), + Bucket: aws.String("examplebucket"), + Key: aws.String("exampleobject"), + ServerSideEncryption: aws.String("AES256"), + Tagging: aws.String("key1=value1&key2=value2"), } result, err := svc.PutObject(input) @@ -1837,16 +1838,19 @@ func ExampleS3_PutObject_shared02() { fmt.Println(result) } -// To upload an object and specify optional tags -// The following example uploads an object. The request specifies optional object tags. -// The bucket is versioned, therefore S3 returns version ID of the newly created object. +// To upload object and specify user-defined metadata +// The following example creates an object. The request also specifies optional metadata. +// If the bucket is versioning enabled, S3 returns version ID in response. func ExampleS3_PutObject_shared03() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("c:\\HappyFace.jpg")), - Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), - Tagging: aws.String("key1=value1&key2=value2"), + Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), + Bucket: aws.String("examplebucket"), + Key: aws.String("exampleobject"), + Metadata: map[string]*string{ + "metadata1": aws.String("value1"), + "metadata2": aws.String("value2"), + }, } result, err := svc.PutObject(input) @@ -1867,19 +1871,16 @@ func ExampleS3_PutObject_shared03() { fmt.Println(result) } -// To upload object and specify user-defined metadata -// The following example creates an object. The request also specifies optional metadata. -// If the bucket is versioning enabled, S3 returns version ID in response. +// To upload an object and specify optional tags +// The following example uploads an object. The request specifies optional object tags. +// The bucket is versioned, therefore S3 returns version ID of the newly created object. func ExampleS3_PutObject_shared04() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), - Bucket: aws.String("examplebucket"), - Key: aws.String("exampleobject"), - Metadata: map[string]*string{ - "metadata1": aws.String("value1"), - "metadata2": aws.String("value2"), - }, + Body: aws.ReadSeekCloser(strings.NewReader("c:\\HappyFace.jpg")), + Bucket: aws.String("examplebucket"), + Key: aws.String("HappyFace.jpg"), + Tagging: aws.String("key1=value1&key2=value2"), } result, err := svc.PutObject(input) @@ -1931,16 +1932,15 @@ func ExampleS3_PutObject_shared05() { fmt.Println(result) } -// To upload an object -// The following example uploads an object to a versioning-enabled bucket. The source -// file is specified using Windows file syntax. S3 returns VersionId of the newly created -// object. +// To create an object. +// The following example creates an object. If the bucket is versioning enabled, S3 +// returns version ID in response. func ExampleS3_PutObject_shared06() { svc := s3.New(session.New()) input := &s3.PutObjectInput{ - Body: aws.ReadSeekCloser(strings.NewReader("HappyFace.jpg")), + Body: aws.ReadSeekCloser(strings.NewReader("filetoupload")), Bucket: aws.String("examplebucket"), - Key: aws.String("HappyFace.jpg"), + Key: aws.String("objectkey"), } result, err := svc.PutObject(input)