diff --git a/CHANGELOG.md b/CHANGELOG.md index c9903b477f1..48d7f311a2f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,19 @@ +Release v1.45.14 (2023-09-20) +=== + +### Service Client Updates +* `service/appconfig`: Updates service API, documentation, and paginators +* `service/apprunner`: Updates service API, documentation, and paginators +* `service/codeartifact`: Updates service API +* `service/kinesisvideo`: Updates service documentation + * Updated DescribeMediaStorageConfiguration, StartEdgeConfigurationUpdate, ImageGenerationConfiguration$SamplingInterval, and UpdateMediaStorageConfiguration to match AWS Docs. +* `service/logs`: Updates service API and documentation + * Add ClientToken to QueryDefinition CFN Handler in CWL +* `service/s3`: Updates service API, documentation, and examples + * Fix an issue where the SDK can fail to unmarshall response due to NumberFormatException +* `service/servicediscovery`: Updates service API and documentation +* `service/sso-oidc`: Adds new service + Release v1.45.13 (2023-09-19) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 2b7bdca86cf..5ea860c6b11 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -21192,6 +21192,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -21207,12 +21210,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -33685,6 +33694,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "states": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/aws/version.go b/aws/version.go index a19163bd2c7..5d06b42d9c4 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.45.13" +const SDKVersion = "1.45.14" diff --git a/models/apis/appconfig/2019-10-09/api-2.json b/models/apis/appconfig/2019-10-09/api-2.json index 5d31192326d..d6cd0c7438b 100644 --- a/models/apis/appconfig/2019-10-09/api-2.json +++ b/models/apis/appconfig/2019-10-09/api-2.json @@ -24,6 +24,7 @@ "output":{"shape":"Application"}, "errors":[ {"shape":"BadRequestException"}, + {"shape":"ServiceQuotaExceededException"}, {"shape":"InternalServerException"} ] }, @@ -39,7 +40,8 @@ "errors":[ {"shape":"BadRequestException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServerException"} + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} ] }, "CreateDeploymentStrategy":{ @@ -53,6 +55,7 @@ "output":{"shape":"DeploymentStrategy"}, "errors":[ {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"}, {"shape":"BadRequestException"} ] }, @@ -68,7 +71,8 @@ "errors":[ {"shape":"InternalServerException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"BadRequestException"} + {"shape":"BadRequestException"}, + {"shape":"ServiceQuotaExceededException"} ] }, "CreateExtension":{ @@ -1127,7 +1131,8 @@ "CompletedAt":{"shape":"Iso8601DateTime"}, "AppliedExtensions":{"shape":"AppliedExtensions"}, "KmsKeyArn":{"shape":"Arn"}, - "KmsKeyIdentifier":{"shape":"Identifier"} + "KmsKeyIdentifier":{"shape":"Identifier"}, + "VersionLabel":{"shape":"VersionLabel"} } }, "DeploymentEvent":{ @@ -1211,7 +1216,8 @@ "State":{"shape":"DeploymentState"}, "PercentageComplete":{"shape":"Percentage"}, "StartedAt":{"shape":"Iso8601DateTime"}, - "CompletedAt":{"shape":"Iso8601DateTime"} + "CompletedAt":{"shape":"Iso8601DateTime"}, + "VersionLabel":{"shape":"VersionLabel"} } }, "Deployments":{ diff --git a/models/apis/appconfig/2019-10-09/docs-2.json b/models/apis/appconfig/2019-10-09/docs-2.json index a5f63b73658..60fd072d702 100644 --- a/models/apis/appconfig/2019-10-09/docs-2.json +++ b/models/apis/appconfig/2019-10-09/docs-2.json @@ -6,7 +6,7 @@ "CreateConfigurationProfile": "
Creates a configuration profile, which is information that enables AppConfig to access the configuration source. Valid configuration sources include the following:
Configuration data in YAML, JSON, and other formats stored in the AppConfig hosted configuration store
Configuration data stored as objects in an Amazon Simple Storage Service (Amazon S3) bucket
Pipelines stored in CodePipeline
Secrets stored in Secrets Manager
Standard and secure string parameters stored in Amazon Web Services Systems Manager Parameter Store
Configuration data in SSM documents stored in the Systems Manager document store
A configuration profile includes the following information:
The URI location of the configuration data.
The Identity and Access Management (IAM) role that provides access to the configuration data.
A validator for the configuration data. Available validators include either a JSON Schema or an Amazon Web Services Lambda function.
For more information, see Create a Configuration and a Configuration Profile in the AppConfig User Guide.
", "CreateDeploymentStrategy": "Creates a deployment strategy that defines important criteria for rolling out your configuration to the designated targets. A deployment strategy includes the overall duration required, a percentage of targets to receive the deployment during each interval, an algorithm that defines how percentage grows, and bake time.
", "CreateEnvironment": "Creates an environment. For each application, you define one or more environments. An environment is a deployment group of AppConfig targets, such as applications in a Beta
or Production
environment. You can also define environments for application subcomponents such as the Web
, Mobile
and Back-end
components for your application. You can configure Amazon CloudWatch alarms for each environment. The system monitors alarms during a configuration deployment. If an alarm is triggered, the system rolls back the configuration.
Creates an AppConfig extension. An extension augments your ability to inject logic or behavior at different points during the AppConfig workflow of creating or deploying a configuration.
You can create your own extensions or use the Amazon Web Services authored extensions provided by AppConfig. For most use cases, to create your own extension, you must create an Lambda function to perform any computation and processing defined in the extension. For more information about extensions, see Working with AppConfig extensions in the AppConfig User Guide.
", + "CreateExtension": "Creates an AppConfig extension. An extension augments your ability to inject logic or behavior at different points during the AppConfig workflow of creating or deploying a configuration.
You can create your own extensions or use the Amazon Web Services authored extensions provided by AppConfig. For an AppConfig extension that uses Lambda, you must create a Lambda function to perform any computation and processing defined in the extension. If you plan to create custom versions of the Amazon Web Services authored notification extensions, you only need to specify an Amazon Resource Name (ARN) in the Uri
field for the new extension version.
For a custom EventBridge notification extension, enter the ARN of the EventBridge default events in the Uri
field.
For a custom Amazon SNS notification extension, enter the ARN of an Amazon SNS topic in the Uri
field.
For a custom Amazon SQS notification extension, enter the ARN of an Amazon SQS message queue in the Uri
field.
For more information about extensions, see Working with AppConfig extensions in the AppConfig User Guide.
", "CreateExtensionAssociation": "When you create an extension or configure an Amazon Web Services authored extension, you associate the extension with an AppConfig application, environment, or configuration profile. For example, you can choose to run the AppConfig deployment events to Amazon SNS
Amazon Web Services authored extension and receive notifications on an Amazon SNS topic anytime a configuration deployment is started for a specific application. Defining which extension to associate with an AppConfig resource is called an extension association. An extension association is a specified relationship between an extension and an AppConfig resource, such as an application or a configuration profile. For more information about extensions and associations, see Working with AppConfig extensions in the AppConfig User Guide.
Creates a new configuration in the AppConfig hosted configuration store.
", "DeleteApplication": "Deletes an application. Deleting an application does not delete a configuration from a host.
", @@ -367,7 +367,7 @@ "CreateExtensionRequest$Description": "Information about the extension.
", "CreateHostedConfigurationVersionRequest$Description": "A description of the configuration.
", "Deployment$Description": "The description of the deployment.
", - "DeploymentEvent$Description": "A description of the deployment event. Descriptions include, but are not limited to, the user account or the Amazon CloudWatch alarm ARN that initiated a rollback, the percentage of hosts that received the deployment, or in the case of an internal error, a recommendation to attempt a new deployment.
", + "DeploymentEvent$Description": "A description of the deployment event. Descriptions include, but are not limited to, the following:
The Amazon Web Services account or the Amazon CloudWatch alarm ARN that initiated a rollback.
The percentage of hosts that received the deployment.
A recommendation to attempt a new deployment (in the case of an internal error).
The description of the deployment strategy.
", "Environment$Description": "The description of the environment.
", "Extension$Description": "Information about the extension.
", @@ -886,7 +886,7 @@ } }, "ServiceQuotaExceededException": { - "base": "The number of hosted configuration versions exceeds the limit for the AppConfig hosted configuration store. Delete one or more versions and try again.
", + "base": "The number of one more AppConfig resources exceeds the maximum allowed. Verify that your environment doesn't exceed the following service quotas:
Applications: 100 max
Deployment strategies: 20 max
Configuration profiles: 100 max per application
Environments: 20 max per application
To resolve this issue, you can delete one or more resources and try again. Or, you can request a quota increase. For more information about quotas and to request an increase, see Service quotas for AppConfig in the Amazon Web Services General Reference.
", "refs": { } }, @@ -1036,7 +1036,7 @@ "ActionInvocation$Uri": "The extension URI associated to the action point in the extension definition. The URI can be an Amazon Resource Name (ARN) for one of the following: an Lambda function, an Amazon Simple Queue Service queue, an Amazon Simple Notification Service topic, or the Amazon EventBridge default event bus.
", "ConfigurationProfile$LocationUri": "The URI location of the configuration.
", "ConfigurationProfileSummary$LocationUri": "The URI location of the configuration.
", - "CreateConfigurationProfileRequest$LocationUri": "A URI to locate the configuration. You can specify the following:
For the AppConfig hosted configuration store and for feature flags, specify hosted
.
For an Amazon Web Services Systems Manager Parameter Store parameter, specify either the parameter name in the format ssm-parameter://<parameter name>
or the ARN.
For an Secrets Manager secret, specify the URI in the following format: secrets-manager
://<secret name>.
For an Amazon S3 object, specify the URI in the following format: s3://<bucket>/<objectKey>
. Here is an example: s3://my-bucket/my-app/us-east-1/my-config.json
For an SSM document, specify either the document name in the format ssm-document://<document name>
or the Amazon Resource Name (ARN).
A URI to locate the configuration. You can specify the following:
For the AppConfig hosted configuration store and for feature flags, specify hosted
.
For an Amazon Web Services Systems Manager Parameter Store parameter, specify either the parameter name in the format ssm-parameter://<parameter name>
or the ARN.
For an Amazon Web Services CodePipeline pipeline, specify the URI in the following format: codepipeline
://<pipeline name>.
For an Secrets Manager secret, specify the URI in the following format: secretsmanager
://<secret name>.
For an Amazon S3 object, specify the URI in the following format: s3://<bucket>/<objectKey>
. Here is an example: s3://my-bucket/my-app/us-east-1/my-config.json
For an SSM document, specify either the document name in the format ssm-document://<document name>
or the Amazon Resource Name (ARN).
Information about the source location of the configuration.
" } }, @@ -1079,7 +1079,7 @@ "Deployment$ConfigurationVersion": "The configuration version that was deployed.
", "DeploymentSummary$ConfigurationVersion": "The version of the configuration.
", "GetConfigurationRequest$ClientConfigurationVersion": "The configuration version returned in the most recent GetConfiguration
response.
AppConfig uses the value of the ClientConfigurationVersion
parameter to identify the configuration version on your clients. If you don’t send ClientConfigurationVersion
with each call to GetConfiguration
, your clients receive the current configuration. You are charged each time your clients receive a configuration.
To avoid excess charges, we recommend you use the StartConfigurationSession and GetLatestConfiguration APIs, which track the client configuration version on your behalf. If you choose to continue using GetConfiguration
, we recommend that you include the ClientConfigurationVersion
value with every call to GetConfiguration
. The value to use for ClientConfigurationVersion
comes from the ConfigurationVersion
attribute returned by GetConfiguration
when there is new or updated data, and should be saved for subsequent calls to GetConfiguration
.
For more information about working with configurations, see Retrieving the Configuration in the AppConfig User Guide.
", - "StartDeploymentRequest$ConfigurationVersion": "The configuration version to deploy. If deploying an AppConfig hosted configuration version, you can specify either the version number or version label.
", + "StartDeploymentRequest$ConfigurationVersion": "The configuration version to deploy. If deploying an AppConfig hosted configuration version, you can specify either the version number or version label. For all other configurations, you must specify the version number.
", "ValidateConfigurationRequest$ConfigurationVersion": "The version of the configuration to validate.
" } }, @@ -1087,6 +1087,8 @@ "base": null, "refs": { "CreateHostedConfigurationVersionRequest$VersionLabel": "An optional, user-defined label for the AppConfig hosted configuration version. This value must contain at least one non-numeric character. For example, \"v2.2.0\".
", + "Deployment$VersionLabel": "A user-defined label for an AppConfig hosted configuration version.
", + "DeploymentSummary$VersionLabel": "A user-defined label for an AppConfig hosted configuration version.
", "HostedConfigurationVersion$VersionLabel": "A user-defined label for an AppConfig hosted configuration version.
", "HostedConfigurationVersionSummary$VersionLabel": "A user-defined label for an AppConfig hosted configuration version.
" } diff --git a/models/apis/appconfig/2019-10-09/endpoint-rule-set-1.json b/models/apis/appconfig/2019-10-09/endpoint-rule-set-1.json index a77bfa88091..dc7da8dca97 100644 --- a/models/apis/appconfig/2019-10-09/endpoint-rule-set-1.json +++ b/models/apis/appconfig/2019-10-09/endpoint-rule-set-1.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://appconfig-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://appconfig-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,168 +225,128 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "Region" + }, + "us-gov-east-1" ] } ], - "type": "tree", - "rules": [ + "endpoint": { + "url": "https://appconfig.us-gov-east-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "us-gov-east-1" - ] - } - ], - "endpoint": { - "url": "https://appconfig.us-gov-east-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, + "fn": "stringEquals", + "argv": [ { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "us-gov-west-1" - ] - } - ], - "endpoint": { - "url": "https://appconfig.us-gov-west-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "ref": "Region" }, - { - "conditions": [], - "endpoint": { - "url": "https://appconfig-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "us-gov-west-1" ] } - ] + ], + "endpoint": { + "url": "https://appconfig.us-gov-west-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://appconfig-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://appconfig.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://appconfig.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://appconfig.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://appconfig.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/models/apis/appconfig/2019-10-09/endpoint-tests-1.json b/models/apis/appconfig/2019-10-09/endpoint-tests-1.json index 513cab3cb6c..be05a360bde 100644 --- a/models/apis/appconfig/2019-10-09/endpoint-tests-1.json +++ b/models/apis/appconfig/2019-10-09/endpoint-tests-1.json @@ -9,8 +9,8 @@ }, "params": { "Region": "af-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -22,8 +22,8 @@ }, "params": { "Region": "ap-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -35,8 +35,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -48,8 +48,8 @@ }, "params": { "Region": "ap-northeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -61,8 +61,8 @@ }, "params": { "Region": "ap-northeast-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -74,8 +74,8 @@ }, "params": { "Region": "ap-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -87,8 +87,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -100,8 +100,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -113,8 +113,8 @@ }, "params": { "Region": "ap-southeast-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -126,8 +126,8 @@ }, "params": { "Region": "ca-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -139,8 +139,8 @@ }, "params": { "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -152,8 +152,8 @@ }, "params": { "Region": "eu-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -165,8 +165,8 @@ }, "params": { "Region": "eu-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -178,8 +178,8 @@ }, "params": { "Region": "eu-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -191,8 +191,8 @@ }, "params": { "Region": "eu-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -204,8 +204,8 @@ }, "params": { "Region": "eu-west-3", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -217,8 +217,8 @@ }, "params": { "Region": "me-south-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -230,8 +230,8 @@ }, "params": { "Region": "sa-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -243,8 +243,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -256,8 +256,8 @@ }, "params": { "Region": "us-east-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -269,8 +269,8 @@ }, "params": { "Region": "us-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -282,8 +282,8 @@ }, "params": { "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -295,8 +295,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -308,8 +308,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -321,8 +321,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -334,8 +334,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -347,8 +347,8 @@ }, "params": { "Region": "cn-northwest-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -360,8 +360,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -373,8 +373,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -386,8 +386,8 @@ }, "params": { "Region": "cn-north-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -399,8 +399,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -412,8 +412,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -425,8 +425,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -438,8 +438,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false } }, { @@ -451,8 +451,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": true } }, { @@ -464,8 +464,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": true } }, { @@ -477,8 +477,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false } }, { @@ -490,8 +490,19 @@ }, "params": { "Region": "us-iso-west-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -503,8 +514,19 @@ }, "params": { "Region": "us-iso-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -516,8 +538,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { @@ -529,8 +562,19 @@ }, "params": { "Region": "us-isob-east-1", - "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true } }, { @@ -542,8 +586,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -555,8 +599,8 @@ } }, "params": { - "UseDualStack": false, "UseFIPS": false, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -567,8 +611,8 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": false, "UseFIPS": true, + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -579,10 +623,16 @@ }, "params": { "Region": "us-east-1", - "UseDualStack": true, "UseFIPS": false, + "UseDualStack": true, "Endpoint": "https://example.com" } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } } ], "version": "1.0" diff --git a/models/apis/appconfig/2019-10-09/paginators-1.json b/models/apis/appconfig/2019-10-09/paginators-1.json index e1a03113253..f176babae35 100644 --- a/models/apis/appconfig/2019-10-09/paginators-1.json +++ b/models/apis/appconfig/2019-10-09/paginators-1.json @@ -3,42 +3,50 @@ "ListApplications": { "input_token": "NextToken", "output_token": "NextToken", - "limit_key": "MaxResults" + "limit_key": "MaxResults", + "result_key": "Items" }, "ListConfigurationProfiles": { "input_token": "NextToken", "output_token": "NextToken", - "limit_key": "MaxResults" + "limit_key": "MaxResults", + "result_key": "Items" }, "ListDeploymentStrategies": { "input_token": "NextToken", "output_token": "NextToken", - "limit_key": "MaxResults" + "limit_key": "MaxResults", + "result_key": "Items" }, "ListDeployments": { "input_token": "NextToken", "output_token": "NextToken", - "limit_key": "MaxResults" + "limit_key": "MaxResults", + "result_key": "Items" }, "ListEnvironments": { "input_token": "NextToken", "output_token": "NextToken", - "limit_key": "MaxResults" + "limit_key": "MaxResults", + "result_key": "Items" }, "ListExtensionAssociations": { "input_token": "NextToken", "output_token": "NextToken", - "limit_key": "MaxResults" + "limit_key": "MaxResults", + "result_key": "Items" }, "ListExtensions": { "input_token": "NextToken", "output_token": "NextToken", - "limit_key": "MaxResults" + "limit_key": "MaxResults", + "result_key": "Items" }, "ListHostedConfigurationVersions": { "input_token": "NextToken", "output_token": "NextToken", - "limit_key": "MaxResults" + "limit_key": "MaxResults", + "result_key": "Items" } } } diff --git a/models/apis/apprunner/2020-05-15/api-2.json b/models/apis/apprunner/2020-05-15/api-2.json index d62d42e4d27..3ee90a73c3e 100644 --- a/models/apis/apprunner/2020-05-15/api-2.json +++ b/models/apis/apprunner/2020-05-15/api-2.json @@ -363,6 +363,20 @@ {"shape":"InternalServiceErrorException"} ] }, + "ListServicesForAutoScalingConfiguration":{ + "name":"ListServicesForAutoScalingConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListServicesForAutoScalingConfigurationRequest"}, + "output":{"shape":"ListServicesForAutoScalingConfigurationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServiceErrorException"}, + {"shape":"ResourceNotFoundException"} + ] + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -478,6 +492,20 @@ {"shape":"InvalidStateException"} ] }, + "UpdateDefaultAutoScalingConfiguration":{ + "name":"UpdateDefaultAutoScalingConfiguration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDefaultAutoScalingConfigurationRequest"}, + "output":{"shape":"UpdateDefaultAutoScalingConfigurationResponse"}, + "errors":[ + {"shape":"InvalidRequestException"}, + {"shape":"InternalServiceErrorException"}, + {"shape":"ResourceNotFoundException"} + ] + }, "UpdateService":{ "name":"UpdateService", "http":{ @@ -569,14 +597,16 @@ "members":{ "AutoScalingConfigurationArn":{"shape":"AppRunnerResourceArn"}, "AutoScalingConfigurationName":{"shape":"AutoScalingConfigurationName"}, - "AutoScalingConfigurationRevision":{"shape":"Integer"}, - "Latest":{"shape":"Boolean"}, + "AutoScalingConfigurationRevision":{"shape":"AutoScalingConfigurationRevision"}, + "Latest":{"shape":"Latest"}, "Status":{"shape":"AutoScalingConfigurationStatus"}, - "MaxConcurrency":{"shape":"Integer"}, - "MinSize":{"shape":"Integer"}, - "MaxSize":{"shape":"Integer"}, + "MaxConcurrency":{"shape":"MaxConcurrency"}, + "MinSize":{"shape":"MinSize"}, + "MaxSize":{"shape":"MaxSize"}, "CreatedAt":{"shape":"Timestamp"}, - "DeletedAt":{"shape":"Timestamp"} + "DeletedAt":{"shape":"Timestamp"}, + "HasAssociatedService":{"shape":"HasAssociatedService"}, + "IsDefault":{"shape":"IsDefault"} } }, "AutoScalingConfigurationName":{ @@ -585,6 +615,7 @@ "min":4, "pattern":"[A-Za-z0-9][A-Za-z0-9\\-_]{3,31}" }, + "AutoScalingConfigurationRevision":{"type":"integer"}, "AutoScalingConfigurationStatus":{ "type":"string", "enum":[ @@ -597,7 +628,11 @@ "members":{ "AutoScalingConfigurationArn":{"shape":"AppRunnerResourceArn"}, "AutoScalingConfigurationName":{"shape":"AutoScalingConfigurationName"}, - "AutoScalingConfigurationRevision":{"shape":"Integer"} + "AutoScalingConfigurationRevision":{"shape":"Integer"}, + "Status":{"shape":"AutoScalingConfigurationStatus"}, + "CreatedAt":{"shape":"Timestamp"}, + "HasAssociatedService":{"shape":"HasAssociatedService"}, + "IsDefault":{"shape":"IsDefault"} } }, "AutoScalingConfigurationSummaryList":{ @@ -878,7 +913,8 @@ "type":"structure", "required":["AutoScalingConfigurationArn"], "members":{ - "AutoScalingConfigurationArn":{"shape":"AppRunnerResourceArn"} + "AutoScalingConfigurationArn":{"shape":"AppRunnerResourceArn"}, + "DeleteAllRevisions":{"shape":"Boolean"} } }, "DeleteAutoScalingConfigurationResponse":{ @@ -1118,6 +1154,7 @@ "type":"string", "max":600 }, + "HasAssociatedService":{"type":"boolean"}, "HealthCheckConfiguration":{ "type":"structure", "members":{ @@ -1238,12 +1275,14 @@ }, "exception":true }, + "IsDefault":{"type":"boolean"}, "KmsKeyArn":{ "type":"string", "max":256, "min":0, "pattern":"arn:aws(-[\\w]+)*:kms:[a-z\\-]+-[0-9]{1}:[0-9]{12}:key\\/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" }, + "Latest":{"type":"boolean"}, "ListAutoScalingConfigurationsRequest":{ "type":"structure", "members":{ @@ -1315,6 +1354,23 @@ "NextToken":{"shape":"String"} } }, + "ListServicesForAutoScalingConfigurationRequest":{ + "type":"structure", + "required":["AutoScalingConfigurationArn"], + "members":{ + "AutoScalingConfigurationArn":{"shape":"AppRunnerResourceArn"}, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"NextToken"} + } + }, + "ListServicesForAutoScalingConfigurationResponse":{ + "type":"structure", + "required":["ServiceArnList"], + "members":{ + "ServiceArnList":{"shape":"ServiceArnList"}, + "NextToken":{"shape":"NextToken"} + } + }, "ListServicesRequest":{ "type":"structure", "members":{ @@ -1381,17 +1437,20 @@ "NextToken":{"shape":"NextToken"} } }, + "MaxConcurrency":{"type":"integer"}, "MaxResults":{ "type":"integer", "max":100, "min":1 }, + "MaxSize":{"type":"integer"}, "Memory":{ "type":"string", "max":6, "min":3, "pattern":"512|1024|2048|3072|4096|6144|8192|10240|12288|(0.5|1|2|3|4|6|8|10|12) GB" }, + "MinSize":{"type":"integer"}, "NetworkConfiguration":{ "type":"structure", "members":{ @@ -1616,6 +1675,10 @@ "ObservabilityConfiguration":{"shape":"ServiceObservabilityConfiguration"} } }, + "ServiceArnList":{ + "type":"list", + "member":{"shape":"AppRunnerResourceArn"} + }, "ServiceId":{ "type":"string", "max":32, @@ -1805,6 +1868,20 @@ "members":{ } }, + "UpdateDefaultAutoScalingConfigurationRequest":{ + "type":"structure", + "required":["AutoScalingConfigurationArn"], + "members":{ + "AutoScalingConfigurationArn":{"shape":"AppRunnerResourceArn"} + } + }, + "UpdateDefaultAutoScalingConfigurationResponse":{ + "type":"structure", + "required":["AutoScalingConfiguration"], + "members":{ + "AutoScalingConfiguration":{"shape":"AutoScalingConfiguration"} + } + }, "UpdateServiceRequest":{ "type":"structure", "required":["ServiceArn"], diff --git a/models/apis/apprunner/2020-05-15/docs-2.json b/models/apis/apprunner/2020-05-15/docs-2.json index 6236b3dcd1b..a6691d86ecd 100644 --- a/models/apis/apprunner/2020-05-15/docs-2.json +++ b/models/apis/apprunner/2020-05-15/docs-2.json @@ -9,7 +9,7 @@ "CreateService": "Create an App Runner service. After the service is created, the action also automatically starts a deployment.
This is an asynchronous operation. On a successful call, you can use the returned OperationId
and the ListOperations call to track the operation's progress.
Create an App Runner VPC connector resource. App Runner requires this resource when you want to associate your App Runner service to a custom Amazon Virtual Private Cloud (Amazon VPC).
", "CreateVpcIngressConnection": "Create an App Runner VPC Ingress Connection resource. App Runner requires this resource when you want to associate your App Runner service with an Amazon VPC endpoint.
", - "DeleteAutoScalingConfiguration": "Delete an App Runner automatic scaling configuration resource. You can delete a specific revision or the latest active revision. You can't delete a configuration that's used by one or more App Runner services.
", + "DeleteAutoScalingConfiguration": "Delete an App Runner automatic scaling configuration resource. You can delete a top level auto scaling configuration, a specific revision of one, or all revisions associated with the top level configuration. You can't delete the default auto scaling configuration or a configuration that's used by one or more App Runner services.
", "DeleteConnection": "Delete an App Runner connection. You must first ensure that there are no running App Runner services that use this connection. If there are any, the DeleteConnection
action fails.
Delete an App Runner observability configuration resource. You can delete a specific revision or the latest active revision. You can't delete a configuration that's used by one or more App Runner services.
", "DeleteService": "Delete an App Runner service.
This is an asynchronous operation. On a successful call, you can use the returned OperationId
and the ListOperations call to track the operation's progress.
Make sure that you don't have any active VPCIngressConnections associated with the service you want to delete.
Returns a list of active App Runner observability configurations in your Amazon Web Services account. You can query the revisions for a specific configuration name or the revisions for all active configurations in your account. You can optionally query only the latest revision of each requested name.
To retrieve a full description of a particular configuration revision, call and provide one of the ARNs returned by ListObservabilityConfigurations
.
Return a list of operations that occurred on an App Runner service.
The resulting list of OperationSummary objects is sorted in reverse chronological order. The first object on the list represents the last started operation.
", "ListServices": "Returns a list of running App Runner services in your Amazon Web Services account.
", + "ListServicesForAutoScalingConfiguration": "Returns a list of the associated App Runner services using an auto scaling configuration.
", "ListTagsForResource": "List tags that are associated with for an App Runner resource. The response contains a list of tag key-value pairs.
", "ListVpcConnectors": "Returns a list of App Runner VPC connectors in your Amazon Web Services account.
", "ListVpcIngressConnections": "Return a list of App Runner VPC Ingress Connections in your Amazon Web Services account.
", @@ -35,6 +36,7 @@ "StartDeployment": "Initiate a manual deployment of the latest commit in a source code repository or the latest image in a source image repository to an App Runner service.
For a source code repository, App Runner retrieves the commit and builds a Docker image. For a source image repository, App Runner retrieves the latest Docker image. In both cases, App Runner then deploys the new image to your service and starts a new container instance.
This is an asynchronous operation. On a successful call, you can use the returned OperationId
and the ListOperations call to track the operation's progress.
Add tags to, or update the tag values of, an App Runner resource. A tag is a key-value pair.
", "UntagResource": "Remove tags from an App Runner resource.
", + "UpdateDefaultAutoScalingConfiguration": "Update an auto scaling configuration to be the default. The existing default auto scaling configuration will be set to non-default automatically.
", "UpdateService": "Update an App Runner service. You can update the source configuration and instance configuration of the service. You can also update the ARN of the auto scaling configuration resource that's associated with the service. However, you can't change the name or the encryption configuration of the service. These can be set only when you create the service.
To update the tags applied to your service, use the separate actions TagResource and UntagResource.
This is an asynchronous operation. On a successful call, you can use the returned OperationId
and the ListOperations call to track the operation's progress.
Update an existing App Runner VPC Ingress Connection resource. The VPC Ingress Connection must be in one of the following states to be updated:
AVAILABLE
FAILED_CREATION
FAILED_UPDATE
The Amazon Resource Name (ARN) of the App Runner service that a custom domain name is disassociated from.
", "EgressConfiguration$VpcConnectorArn": "The Amazon Resource Name (ARN) of the App Runner VPC connector that you want to associate with your App Runner service. Only valid when EgressType = VPC
.
The Amazon Resource Name (ARN) of the App Runner service that you want a list of operations for.
", + "ListServicesForAutoScalingConfigurationRequest$AutoScalingConfigurationArn": "The Amazon Resource Name (ARN) of the App Runner auto scaling configuration that you want to list the services for.
The ARN can be a full auto scaling configuration ARN, or a partial ARN ending with either .../name
or .../name/revision
. If a revision isn't specified, the latest active revision is used.
The Amazon Resource Name (ARN) of the resource that a tag list is requested for.
It must be the ARN of an App Runner resource.
", "ListVpcIngressConnectionsFilter$ServiceArn": "The Amazon Resource Name (ARN) of a service to filter by.
", "ObservabilityConfiguration$ObservabilityConfigurationArn": "The Amazon Resource Name (ARN) of this observability configuration.
", @@ -94,11 +97,13 @@ "PauseServiceRequest$ServiceArn": "The Amazon Resource Name (ARN) of the App Runner service that you want to pause.
", "ResumeServiceRequest$ServiceArn": "The Amazon Resource Name (ARN) of the App Runner service that you want to resume.
", "Service$ServiceArn": "The Amazon Resource Name (ARN) of this service.
", + "ServiceArnList$member": null, "ServiceObservabilityConfiguration$ObservabilityConfigurationArn": "The Amazon Resource Name (ARN) of the observability configuration that is associated with the service. Specified only when ObservabilityEnabled
is true
.
Specify an ARN with a name and a revision number to associate that revision. For example: arn:aws:apprunner:us-east-1:123456789012:observabilityconfiguration/xray-tracing/3
Specify just the name to associate the latest revision. For example: arn:aws:apprunner:us-east-1:123456789012:observabilityconfiguration/xray-tracing
The Amazon Resource Name (ARN) of this service.
", "StartDeploymentRequest$ServiceArn": "The Amazon Resource Name (ARN) of the App Runner service that you want to manually deploy to.
", "TagResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) of the resource that you want to update tags for.
It must be the ARN of an App Runner resource.
", "UntagResourceRequest$ResourceArn": "The Amazon Resource Name (ARN) of the resource that you want to remove tags from.
It must be the ARN of an App Runner resource.
", + "UpdateDefaultAutoScalingConfigurationRequest$AutoScalingConfigurationArn": "The Amazon Resource Name (ARN) of the App Runner auto scaling configuration that you want to set as the default.
The ARN can be a full auto scaling configuration ARN, or a partial ARN ending with either .../name
or .../name/revision
. If a revision isn't specified, the latest active revision is set as the default.
The Amazon Resource Name (ARN) of the App Runner service that you want to update.
", "UpdateServiceRequest$AutoScalingConfigurationArn": "The Amazon Resource Name (ARN) of an App Runner automatic scaling configuration resource that you want to associate with the App Runner service.
", "UpdateVpcIngressConnectionRequest$VpcIngressConnectionArn": "The Amazon Resource Name (Arn) for the App Runner VPC Ingress Connection resource that you want to update.
", @@ -131,7 +136,8 @@ "refs": { "CreateAutoScalingConfigurationResponse$AutoScalingConfiguration": "A description of the App Runner auto scaling configuration that's created by this request.
", "DeleteAutoScalingConfigurationResponse$AutoScalingConfiguration": "A description of the App Runner auto scaling configuration that this request just deleted.
", - "DescribeAutoScalingConfigurationResponse$AutoScalingConfiguration": "A full description of the App Runner auto scaling configuration that you specified in this request.
" + "DescribeAutoScalingConfigurationResponse$AutoScalingConfiguration": "A full description of the App Runner auto scaling configuration that you specified in this request.
", + "UpdateDefaultAutoScalingConfigurationResponse$AutoScalingConfiguration": "A description of the App Runner auto scaling configuration that was set as default.
" } }, "AutoScalingConfigurationName": { @@ -139,14 +145,21 @@ "refs": { "AutoScalingConfiguration$AutoScalingConfigurationName": "The customer-provided auto scaling configuration name. It can be used in multiple revisions of a configuration.
", "AutoScalingConfigurationSummary$AutoScalingConfigurationName": "The customer-provided auto scaling configuration name. It can be used in multiple revisions of a configuration.
", - "CreateAutoScalingConfigurationRequest$AutoScalingConfigurationName": "A name for the auto scaling configuration. When you use it for the first time in an Amazon Web Services Region, App Runner creates revision number 1
of this name. When you use the same name in subsequent calls, App Runner creates incremental revisions of the configuration.
The name DefaultConfiguration
is reserved (it's the configuration that App Runner uses if you don't provide a custome one). You can't use it to create a new auto scaling configuration, and you can't create a revision of it.
When you want to use your own auto scaling configuration for your App Runner service, create a configuration with a different name, and then provide it when you create or update your service.
A name for the auto scaling configuration. When you use it for the first time in an Amazon Web Services Region, App Runner creates revision number 1
of this name. When you use the same name in subsequent calls, App Runner creates incremental revisions of the configuration.
Prior to the release of Managing auto scaling, the name DefaultConfiguration
was reserved.
This restriction is no longer in place. You can now manage DefaultConfiguration
the same way you manage your custom auto scaling configurations. This means you can do the following with the DefaultConfiguration
that App Runner provides:
Create new revisions of the DefaultConfiguration
.
Delete the revisions of the DefaultConfiguration
.
Delete the auto scaling configuration for which the App Runner DefaultConfiguration
was created.
If you delete the auto scaling configuration you can create another custom auto scaling configuration with the same DefaultConfiguration
name. The original DefaultConfiguration
resource provided by App Runner remains in your account unless you make changes to it.
The name of the App Runner auto scaling configuration that you want to list. If specified, App Runner lists revisions that share this name. If not specified, App Runner returns revisions of all active configurations.
" } }, + "AutoScalingConfigurationRevision": { + "base": null, + "refs": { + "AutoScalingConfiguration$AutoScalingConfigurationRevision": "The revision of this auto scaling configuration. It's unique among all the active configurations (\"Status\": \"ACTIVE\"
) that share the same AutoScalingConfigurationName
.
The current state of the auto scaling configuration. If the status of a configuration revision is INACTIVE
, it was deleted and can't be used. Inactive configuration revisions are permanently removed some time after they are deleted.
The current state of the auto scaling configuration. If the status of a configuration revision is INACTIVE
, it was deleted and can't be used. Inactive configuration revisions are permanently removed some time after they are deleted.
The current state of the auto scaling configuration. If the status of a configuration revision is INACTIVE
, it was deleted and can't be used. Inactive configuration revisions are permanently removed some time after they are deleted.
It's set to true
for the configuration with the highest Revision
among all configurations that share the same AutoScalingConfigurationName
. It's set to false
otherwise.
Set to true
to delete all of the revisions associated with the AutoScalingConfigurationArn
parameter value.
When DeleteAllRevisions
is set to true
, the only valid value for the Amazon Resource Name (ARN) is a partial ARN ending with: .../name
.
Specifies whether your App Runner service is publicly accessible. To make the service publicly accessible set it to True
. To make the service privately accessible, from only within an Amazon VPC set it to False
.
Set to true
to list only the latest revision for each requested configuration name.
Set to false
to list all revisions for each requested configuration name.
Default: true
Set to true
to list only the latest revision for each requested configuration name.
Set to false
to list all revisions for each requested configuration name.
Default: true
Indicates if this auto scaling configuration has an App Runner service associated with it. A value of true
indicates one or more services are associated. A value of false
indicates no services are associated.
Indicates if this auto scaling configuration has an App Runner service associated with it. A value of true
indicates one or more services are associated. A value of false
indicates no services are associated.
Describes the settings for the health check that App Runner performs to monitor the health of a service.
", "refs": { @@ -616,10 +636,6 @@ "Integer": { "base": null, "refs": { - "AutoScalingConfiguration$AutoScalingConfigurationRevision": "The revision of this auto scaling configuration. It's unique among all the active configurations (\"Status\": \"ACTIVE\"
) that share the same AutoScalingConfigurationName
.
The maximum number of concurrent requests that an instance processes. If the number of concurrent requests exceeds this limit, App Runner scales the service up.
", - "AutoScalingConfiguration$MinSize": "The minimum number of instances that App Runner provisions for a service. The service always has at least MinSize
provisioned instances. Some of them actively serve traffic. The rest of them (provisioned and inactive instances) are a cost-effective compute capacity reserve and are ready to be quickly activated. You pay for memory usage of all the provisioned instances. You pay for CPU usage of only the active subset.
App Runner temporarily doubles the number of provisioned instances during deployments, to maintain the same capacity for both old and new code.
", - "AutoScalingConfiguration$MaxSize": "The maximum number of instances that a service scales up to. At most MaxSize
instances actively serve traffic for your service.
The revision of this auto scaling configuration. It's unique among all the active configurations (\"Status\": \"ACTIVE\"
) with the same AutoScalingConfigurationName
.
The revision of this observability configuration. It's unique among all the active configurations (\"Status\": \"ACTIVE\"
) that share the same ObservabilityConfigurationName
.
The revision of this observability configuration. It's unique among all the active configurations (\"Status\": \"ACTIVE\"
) that share the same ObservabilityConfigurationName
.
Indicates if this auto scaling configuration should be used as the default for a new App Runner service that does not have an auto scaling configuration ARN specified during creation. Each account can have only one default AutoScalingConfiguration
per region. The default AutoScalingConfiguration
can be any revision under the same AutoScalingConfigurationName
.
Indicates if this auto scaling configuration should be used as the default for a new App Runner service that does not have an auto scaling configuration ARN specified during creation. Each account can have only one default AutoScalingConfiguration
per region. The default AutoScalingConfiguration
can be any revision under the same AutoScalingConfigurationName
.
The ARN of the KMS key that's used for encryption.
" } }, + "Latest": { + "base": null, + "refs": { + "AutoScalingConfiguration$Latest": "It's set to true
for the configuration with the highest Revision
among all configurations that share the same AutoScalingConfigurationName
. It's set to false
otherwise.
The maximum number of concurrent requests that an instance processes. If the number of concurrent requests exceeds this limit, App Runner scales the service up.
" + } + }, "MaxResults": { "base": null, "refs": { "ListAutoScalingConfigurationsRequest$MaxResults": "The maximum number of results to include in each response (result page). It's used for a paginated request.
If you don't specify MaxResults
, the request retrieves all available results in a single response.
The maximum number of results to include in each response (result page). Used for a paginated request.
If you don't specify MaxResults
, the request retrieves all available results in a single response.
The maximum number of results to include in each response (result page). It's used for a paginated request.
If you don't specify MaxResults
, the request retrieves all available results in a single response.
The maximum number of results to include in each response (result page). It's used for a paginated request.
If you don't specify MaxResults
, the request retrieves all available results in a single response.
The maximum number of results to include in each response (result page). It's used for a paginated request.
If you don't specify MaxResults
, the request retrieves all available results in a single response.
The maximum number of results to include in each response (result page). It's used for a paginated request.
If you don't specify MaxResults
, the request retrieves all available results in a single response.
The maximum number of instances that a service scales up to. At most MaxSize
instances actively serve traffic for your service.
The amount of memory, in MB or GB, reserved for each instance of your App Runner service.
Default: 2 GB
The minimum number of instances that App Runner provisions for a service. The service always has at least MinSize
provisioned instances. Some of them actively serve traffic. The rest of them (provisioned and inactive instances) are a cost-effective compute capacity reserve and are ready to be quickly activated. You pay for memory usage of all the provisioned instances. You pay for CPU usage of only the active subset.
App Runner temporarily doubles the number of provisioned instances during deployments, to maintain the same capacity for both old and new code.
" + } + }, "NetworkConfiguration": { "base": "Describes configuration settings related to network traffic of an App Runner service. Consists of embedded objects for each configurable network feature.
", "refs": { @@ -772,6 +830,8 @@ "ListConnectionsResponse$NextToken": "The token that you can pass in a subsequent request to get the next result page. Returned in a paginated request.
", "ListObservabilityConfigurationsRequest$NextToken": "A token from a previous result page. It's used for a paginated request. The request retrieves the next result page. All other parameter values must be identical to the ones that are specified in the initial request.
If you don't specify NextToken
, the request retrieves the first result page.
The token that you can pass in a subsequent request to get the next result page. It's returned in a paginated request.
", + "ListServicesForAutoScalingConfigurationRequest$NextToken": "A token from a previous result page. It's used for a paginated request. The request retrieves the next result page. All other parameter values must be identical to the ones specified in the initial request.
If you don't specify NextToken
, the request retrieves the first result page.
The token that you can pass in a subsequent request to get the next result page. It's returned in a paginated request.
", "ListVpcConnectorsRequest$NextToken": "A token from a previous result page. It's used for a paginated request. The request retrieves the next result page. All other parameter values must be identical to the ones that are specified in the initial request.
If you don't specify NextToken
, the request retrieves the first result page.
The token that you can pass in a subsequent request to get the next result page. It's returned in a paginated request.
", "ListVpcIngressConnectionsRequest$NextToken": "A token from a previous result page. It's used for a paginated request. The request retrieves the next result page. All other parameter values must be identical to the ones that are specified in the initial request.
If you don't specify NextToken
, the request retrieves the first result page.
A description of the App Runner service updated by this request. All configuration values in the returned Service
structure reflect configuration changes that are being applied by this request.
A list of service ARN records. In a paginated request, the request returns up to MaxResults
records for each call.
The time when the auto scaling configuration was created. It's in Unix time stamp format.
", "AutoScalingConfiguration$DeletedAt": "The time when the auto scaling configuration was deleted. It's in Unix time stamp format.
", + "AutoScalingConfigurationSummary$CreatedAt": "The time when the auto scaling configuration was created. It's in Unix time stamp format.
", "Connection$CreatedAt": "The App Runner connection creation time, expressed as a Unix time stamp.
", "ConnectionSummary$CreatedAt": "The App Runner connection creation time, expressed as a Unix time stamp.
", "ObservabilityConfiguration$CreatedAt": "The time when the observability configuration was created. It's in Unix time stamp format.
", @@ -1173,6 +1240,16 @@ "refs": { } }, + "UpdateDefaultAutoScalingConfigurationRequest": { + "base": null, + "refs": { + } + }, + "UpdateDefaultAutoScalingConfigurationResponse": { + "base": null, + "refs": { + } + }, "UpdateServiceRequest": { "base": null, "refs": { diff --git a/models/apis/apprunner/2020-05-15/paginators-1.json b/models/apis/apprunner/2020-05-15/paginators-1.json index e360ded2057..33d0f0f8cfa 100644 --- a/models/apis/apprunner/2020-05-15/paginators-1.json +++ b/models/apis/apprunner/2020-05-15/paginators-1.json @@ -30,6 +30,11 @@ "output_token": "NextToken", "limit_key": "MaxResults" }, + "ListServicesForAutoScalingConfiguration": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults" + }, "ListVpcConnectors": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/models/apis/codeartifact/2018-09-22/api-2.json b/models/apis/codeartifact/2018-09-22/api-2.json index 41a3c060c1e..be8f8362e27 100644 --- a/models/apis/codeartifact/2018-09-22/api-2.json +++ b/models/apis/codeartifact/2018-09-22/api-2.json @@ -2119,7 +2119,8 @@ "pypi", "maven", "nuget", - "generic" + "generic", + "swift" ] }, "PackageName":{ diff --git a/models/apis/codeartifact/2018-09-22/endpoint-rule-set-1.json b/models/apis/codeartifact/2018-09-22/endpoint-rule-set-1.json index 19c036173a9..a2bfb01a9e1 100644 --- a/models/apis/codeartifact/2018-09-22/endpoint-rule-set-1.json +++ b/models/apis/codeartifact/2018-09-22/endpoint-rule-set-1.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,224 +140,175 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://codeartifact-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://codeartifact-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://codeartifact-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://codeartifact-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://codeartifact.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://codeartifact.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://codeartifact.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://codeartifact.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/models/apis/kinesisvideo/2017-09-30/docs-2.json b/models/apis/kinesisvideo/2017-09-30/docs-2.json index 04f926105d9..bd0a48de875 100644 --- a/models/apis/kinesisvideo/2017-09-30/docs-2.json +++ b/models/apis/kinesisvideo/2017-09-30/docs-2.json @@ -10,7 +10,7 @@ "DescribeEdgeConfiguration": "Describes a stream’s edge configuration that was set using the StartEdgeConfigurationUpdate
API and the latest status of the edge agent's recorder and uploader jobs. Use this API to get the status of the configuration to determine if the configuration is in sync with the Edge Agent. Use this API to evaluate the health of the Edge Agent.
Gets the ImageGenerationConfiguration
for a given Kinesis video stream.
Returns the most current information about the stream. The streamName
or streamARN
should be provided in the input.
Returns the most current information about the channel. Specify the ChannelName
or ChannelARN
in the input.
This API is related to WebRTC Ingestion and is only available in the us-west-2
region.
Returns the most current information about the channel. Specify the ChannelName
or ChannelARN
in the input.
Gets the NotificationConfiguration
for a given Kinesis video stream.
Returns the most current information about the signaling channel. You must specify either the name or the Amazon Resource Name (ARN) of the channel that you want to describe.
", "DescribeStream": "Returns the most current information about the specified stream. You must specify either the StreamName
or the StreamARN
.
Returns an array of StreamInfo
objects. Each object describes a stream. To retrieve only streams that satisfy a specific condition, you can specify a StreamNameCondition
.
Returns a list of tags associated with the specified signaling channel.
", "ListTagsForStream": "Returns a list of tags associated with the specified stream.
In the request, you must specify either the StreamName
or the StreamARN
.
An asynchronous API that updates a stream’s existing edge configuration. The Kinesis Video Stream will sync the stream’s edge configuration with the Edge Agent IoT Greengrass component that runs on an IoT Hub Device, setup at your premise. The time to sync can vary and depends on the connectivity of the Hub Device. The SyncStatus
will be updated as the edge configuration is acknowledged, and synced with the Edge Agent.
If this API is invoked for the first time, a new edge configuration will be created for the stream, and the sync status will be set to SYNCING
. You will have to wait for the sync status to reach a terminal state such as: IN_SYNC
, or SYNC_FAILED
, before using this API again. If you invoke this API during the syncing process, a ResourceInUseException
will be thrown. The connectivity of the stream’s edge configuration and the Edge Agent will be retried for 15 minutes. After 15 minutes, the status will transition into the SYNC_FAILED
state.
An asynchronous API that updates a stream’s existing edge configuration. The Kinesis Video Stream will sync the stream’s edge configuration with the Edge Agent IoT Greengrass component that runs on an IoT Hub Device, setup at your premise. The time to sync can vary and depends on the connectivity of the Hub Device. The SyncStatus
will be updated as the edge configuration is acknowledged, and synced with the Edge Agent.
If this API is invoked for the first time, a new edge configuration will be created for the stream, and the sync status will be set to SYNCING
. You will have to wait for the sync status to reach a terminal state such as: IN_SYNC
, or SYNC_FAILED
, before using this API again. If you invoke this API during the syncing process, a ResourceInUseException
will be thrown. The connectivity of the stream’s edge configuration and the Edge Agent will be retried for 15 minutes. After 15 minutes, the status will transition into the SYNC_FAILED
state.
To move an edge configuration from one device to another, use DeleteEdgeConfiguration to delete the current edge configuration. You can then invoke StartEdgeConfigurationUpdate with an updated Hub Device ARN.
", "TagResource": "Adds one or more tags to a signaling channel. A tag is a key-value pair (the value is optional) that you can define and assign to Amazon Web Services resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. For more information, see Using Cost Allocation Tags in the Billing and Cost Management and Cost Management User Guide.
", "TagStream": "Adds one or more tags to a stream. A tag is a key-value pair (the value is optional) that you can define and assign to Amazon Web Services resources. If you specify a tag that already exists, the tag value is replaced with the value that you specify in the request. For more information, see Using Cost Allocation Tags in the Billing and Cost Management and Cost Management User Guide.
You must provide either the StreamName
or the StreamARN
.
This operation requires permission for the KinesisVideo:TagStream
action.
A Kinesis video stream can support up to 50 tags.
", "UntagResource": "Removes one or more tags from a signaling channel. In the request, specify only a tag key or keys; don't specify the value. If you specify a tag key that does not exist, it's ignored.
", "UntagStream": "Removes one or more tags from a stream. In the request, specify only a tag key or keys; don't specify the value. If you specify a tag key that does not exist, it's ignored.
In the request, you must provide the StreamName
or StreamARN
.
Increases or decreases the stream's data retention period by the value that you specify. To indicate whether you want to increase or decrease the data retention period, specify the Operation
parameter in the request body. In the request, you must specify either the StreamName
or the StreamARN
.
The retention period that you specify replaces the current value.
This operation requires permission for the KinesisVideo:UpdateDataRetention
action.
Changing the data retention period affects the data in the stream as follows:
If the data retention period is increased, existing data is retained for the new retention period. For example, if the data retention period is increased from one hour to seven hours, all existing data is retained for seven hours.
If the data retention period is decreased, existing data is retained for the new retention period. For example, if the data retention period is decreased from seven hours to one hour, all existing data is retained for one hour, and any data older than one hour is deleted immediately.
Updates the StreamInfo
and ImageProcessingConfiguration
fields.
Associates a SignalingChannel
to a stream to store the media. There are two signaling modes that can specified :
If the StorageStatus
is disabled, no data will be stored, and the StreamARN
parameter will not be needed.
If the StorageStatus
is enabled, the data will be stored in the StreamARN
provided.
If StorageStatus
is enabled, direct peer-to-peer (master-viewer) connections no longer occur. Peers connect directly to the storage session. You must call the JoinStorageSession
API to trigger an SDP offer send and establish a connection between a peer and the storage session.
This API is related to WebRTC Ingestion and is only available in the us-west-2
region.
Associates a SignalingChannel
to a stream to store the media. There are two signaling modes that can specified :
If the StorageStatus
is disabled, no data will be stored, and the StreamARN
parameter will not be needed.
If the StorageStatus
is enabled, the data will be stored in the StreamARN
provided.
If StorageStatus
is enabled, direct peer-to-peer (master-viewer) connections no longer occur. Peers connect directly to the storage session. You must call the JoinStorageSession
API to trigger an SDP offer send and establish a connection between a peer and the storage session.
Updates the notification information for a stream.
", "UpdateSignalingChannel": "Updates the existing signaling channel. This is an asynchronous operation and takes time to complete.
If the MessageTtlSeconds
value is updated (either increased or reduced), it only applies to new messages sent via this channel after it's been updated. Existing messages are still expired as per the previous MessageTtlSeconds
value.
Updates stream metadata, such as the device name and media type.
You must provide the stream name or the Amazon Resource Name (ARN) of the stream.
To make sure that you have the latest version of the stream before updating it, you can specify the stream version. Kinesis Video Streams assigns a version to each stream. When you update a stream, Kinesis Video Streams assigns a new version number. To get the latest stream version, use the DescribeStream
API.
UpdateStream
is an asynchronous operation, and takes time to complete.
The time interval in milliseconds (ms) at which the images need to be generated from the stream. The minimum value that can be provided is 33 ms, because a camera that generates content at 30 FPS would create a frame every 33.3 ms. If the timestamp range is less than the sampling interval, the Image from the StartTimestamp
will be returned if available.
The time interval in milliseconds (ms) at which the images need to be generated from the stream. The minimum value that can be provided is 200 ms. If the timestamp range is less than the sampling interval, the Image from the StartTimestamp
will be returned if available.
Associates the specified KMS key with either one log group in the account, or with all stored CloudWatch Logs query insights results in the account.
When you use AssociateKmsKey
, you specify either the logGroupName
parameter or the resourceIdentifier
parameter. You can't specify both of those parameters in the same operation.
Specify the logGroupName
parameter to cause all log events stored in the log group to be encrypted with that key. Only the log events ingested after the key is associated are encrypted with that key.
Associating a KMS key with a log group overrides any existing associations between the log group and a KMS key. After a KMS key is associated with a log group, all newly ingested data for the log group is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.
Associating a key with a log group does not cause the results of queries of that log group to be encrypted with that key. To have query results encrypted with a KMS key, you must use an AssociateKmsKey
operation with the resourceIdentifier
parameter that specifies a query-result
resource.
Specify the resourceIdentifier
parameter with a query-result
resource, to use that key to encrypt the stored results of all future StartQuery operations in the account. The response from a GetQueryResults operation will still return the query results in plain text.
Even if you have not associated a key with your query results, the query results are encrypted when stored, using the default CloudWatch Logs method.
If you run a query from a monitoring account that queries logs in a source account, the query results key from the monitoring account, if any, is used.
If you delete the key that is used to encrypt log events or log group query results, then all the associated stored log events or query results that were encrypted with that key will be unencryptable and unusable.
CloudWatch Logs supports only symmetric KMS keys. Do not use an associate an asymmetric KMS key with your log group or query results. For more information, see Using Symmetric and Asymmetric Keys.
It can take up to 5 minutes for this operation to take effect.
If you attempt to associate a KMS key with a log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException
error.
Cancels the specified export task.
The task must be in the PENDING
or RUNNING
state.
Creates an export task so that you can efficiently export data from a log group to an Amazon S3 bucket. When you perform a CreateExportTask
operation, you must use credentials that have permission to write to the S3 bucket that you specify as the destination.
Exporting log data to S3 buckets that are encrypted by KMS is supported. Exporting log data to Amazon S3 buckets that have S3 Object Lock enabled with a retention period is also supported.
Exporting to S3 buckets that are encrypted with AES-256 is supported.
This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING
or PENDING
) export task at a time. To cancel an export task, use CancelExportTask.
You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate log data for each export task, specify a prefix to be used as the Amazon S3 key prefix for all exported objects.
Time-based sorting on chunks of log data inside an exported file is not guaranteed. You can sort the exported log field data by using Linux utilities.
Creates a log group with the specified name. You can create up to 20,000 log groups per account.
You must use the following guidelines when naming a log group:
Log group names must be unique within a Region for an Amazon Web Services account.
Log group names can be between 1 and 512 characters long.
Log group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and '#' (number sign)
When you create a log group, by default the log events in the log group do not expire. To set a retention policy so that events expire and are deleted after a specified time, use PutRetentionPolicy.
If you associate an KMS key with the log group, ingested data is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.
If you attempt to associate a KMS key with the log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException
error.
CloudWatch Logs supports only symmetric KMS keys. Do not associate an asymmetric KMS key with your log group. For more information, see Using Symmetric and Asymmetric Keys.
Creates a log group with the specified name. You can create up to 1,000,000 log groups per Region per account.
You must use the following guidelines when naming a log group:
Log group names must be unique within a Region for an Amazon Web Services account.
Log group names can be between 1 and 512 characters long.
Log group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and '#' (number sign)
When you create a log group, by default the log events in the log group do not expire. To set a retention policy so that events expire and are deleted after a specified time, use PutRetentionPolicy.
If you associate an KMS key with the log group, ingested data is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested.
If you attempt to associate a KMS key with the log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException
error.
CloudWatch Logs supports only symmetric KMS keys. Do not associate an asymmetric KMS key with your log group. For more information, see Using Symmetric and Asymmetric Keys.
Creates a log stream for the specified log group. A log stream is a sequence of log events that originate from a single source, such as an application instance or a resource that is being monitored.
There is no limit on the number of log streams that you can create for a log group. There is a limit of 50 TPS on CreateLogStream
operations, after which transactions are throttled.
You must use the following guidelines when naming a log stream:
Log stream names must be unique within the log group.
Log stream names can be between 1 and 512 characters long.
Don't use ':' (colon) or '*' (asterisk) characters.
Deletes a CloudWatch Logs account policy.
To use this operation, you must be signed on with the logs:DeleteDataProtectionPolicy
and logs:DeleteAccountPolicy
permissions.
Deletes the data protection policy from the specified log group.
For more information about data protection policies, see PutDataProtectionPolicy.
", @@ -41,10 +41,10 @@ "PutDestination": "Creates or updates a destination. This operation is used only to create destinations for cross-account subscriptions.
A destination encapsulates a physical resource (such as an Amazon Kinesis stream). With a destination, you can subscribe to a real-time stream of log events for a different account, ingested using PutLogEvents.
Through an access policy, a destination controls what is written to it. By default, PutDestination
does not set any access policy with the destination, which means a cross-account user cannot call PutSubscriptionFilter against this destination. To enable this, the destination owner must call PutDestinationPolicy after PutDestination
.
To perform a PutDestination
operation, you must also have the iam:PassRole
permission.
Creates or updates an access policy associated with an existing destination. An access policy is an IAM policy document that is used to authorize claims to register a subscription filter against a given destination.
", "PutLogEvents": "Uploads a batch of log events to the specified log stream.
The sequence token is now ignored in PutLogEvents
actions. PutLogEvents
actions are always accepted and never return InvalidSequenceTokenException
or DataAlreadyAcceptedException
even if the sequence token is not valid. You can use parallel PutLogEvents
actions on the same log stream.
The batch of events must satisfy the following constraints:
The maximum batch size is 1,048,576 bytes. This size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event.
None of the log events in the batch can be more than 2 hours in the future.
None of the log events in the batch can be more than 14 days in the past. Also, none of the log events can be from earlier than the retention period of the log group.
The log events in the batch must be in chronological order by their timestamp. The timestamp is the time that the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC
. (In Amazon Web Services Tools for PowerShell and the Amazon Web Services SDK for .NET, the timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss
. For example, 2017-09-15T13:45:30
.)
A batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails.
Each log event can be no larger than 256 KB.
The maximum number of log events in a batch is 10,000.
The quota of five requests per second per log stream has been removed. Instead, PutLogEvents
actions are throttled based on a per-second per-account quota. You can request an increase to the per-second throttling quota by using the Service Quotas service.
If a call to PutLogEvents
returns \"UnrecognizedClientException\" the most likely cause is a non-valid Amazon Web Services access key ID or secret key.
Creates or updates a metric filter and associates it with the specified log group. With metric filters, you can configure rules to extract metric data from log events ingested through PutLogEvents.
The maximum number of metric filters that can be associated with a log group is 100.
When you create a metric filter, you can also optionally assign a unit and dimensions to the metric that is created.
Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress
or requestID
as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric.
CloudWatch Logs disables a metric filter if it generates 1,000 different name/value pairs for your specified dimensions within a certain amount of time. This helps to prevent accidental high charges.
You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.
Creates or updates a metric filter and associates it with the specified log group. With metric filters, you can configure rules to extract metric data from log events ingested through PutLogEvents.
The maximum number of metric filters that can be associated with a log group is 100.
When you create a metric filter, you can also optionally assign a unit and dimensions to the metric that is created.
Metrics extracted from log events are charged as custom metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as IPAddress
or requestID
as dimensions. Each different value found for a dimension is treated as a separate metric and accrues charges as a separate custom metric.
CloudWatch Logs might disable a metric filter if it generates 1,000 different name/value pairs for your specified dimensions within one hour.
You can also set up a billing alarm to alert you if your charges are higher than expected. For more information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services Charges.
Creates or updates a query definition for CloudWatch Logs Insights. For more information, see Analyzing Log Data with CloudWatch Logs Insights.
To update a query definition, specify its queryDefinitionId
in your request. The values of name
, queryString
, and logGroupNames
are changed to the values that you specify in your update operation. No current values are retained from the current query definition. For example, imagine updating a current query definition that includes log groups. If you don't specify the logGroupNames
parameter in your update operation, the query definition changes to contain no log groups.
You must have the logs:PutQueryDefinition
permission to be able to perform this operation.
Creates or updates a resource policy allowing other Amazon Web Services services to put log events to this account, such as Amazon Route 53. An account can have up to 10 resource policies per Amazon Web Services Region.
", - "PutRetentionPolicy": "Sets the retention of the specified log group. With a retention policy, you can configure the number of days for which to retain log events in the specified log group.
CloudWatch Logs doesn’t immediately delete log events when they reach their retention setting. It typically takes up to 72 hours after that before log events are deleted, but in rare situations might take longer.
To illustrate, imagine that you change a log group to have a longer retention setting when it contains log events that are past the expiration date, but haven’t been deleted. Those log events will take up to 72 hours to be deleted after the new retention date is reached. To make sure that log data is deleted permanently, keep a log group at its lower retention setting until 72 hours after the previous retention period ends. Alternatively, wait to change the retention setting until you confirm that the earlier log events are deleted.
Sets the retention of the specified log group. With a retention policy, you can configure the number of days for which to retain log events in the specified log group.
CloudWatch Logs doesn’t immediately delete log events when they reach their retention setting. It typically takes up to 72 hours after that before log events are deleted, but in rare situations might take longer.
To illustrate, imagine that you change a log group to have a longer retention setting when it contains log events that are past the expiration date, but haven’t been deleted. Those log events will take up to 72 hours to be deleted after the new retention date is reached. To make sure that log data is deleted permanently, keep a log group at its lower retention setting until 72 hours after the previous retention period ends. Alternatively, wait to change the retention setting until you confirm that the earlier log events are deleted.
When log events reach their retention setting they are marked for deletion. After they are marked for deletion, they do not add to your archival storage costs anymore, even if they are not actually deleted until later. These log events marked for deletion are also not included when you use an API to retrieve the storedBytes
value to see how many bytes a log group is storing.
Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
The following destinations are supported for subscription filters:
An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery.
A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Kinesis Data Firehose as logical destinations.
An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery.
An Lambda function that belongs to the same account as the subscription filter, for same-account delivery.
Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName
.
To perform a PutSubscriptionFilter
operation for any destination except a Lambda function, you must also have the iam:PassRole
permission.
Schedules a query of a log group using CloudWatch Logs Insights. You specify the log group and time range to query and the query string to use.
For more information, see CloudWatch Logs Insights Query Syntax.
After you run a query using StartQuery
, the query results are stored by CloudWatch Logs. You can use GetQueryResults to retrieve the results of a query, using the queryId
that StartQuery
returns.
If you have associated a KMS key with the query results in this account, then StartQuery uses that key to encrypt the results when it stores them. If no key is associated with query results, the query results are encrypted with the default CloudWatch Logs encryption method.
Queries time out after 60 minutes of runtime. If your queries are timing out, reduce the time range being searched or partition your query into a number of queries.
If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start a query in a linked source account. For more information, see CloudWatch cross-account observability. For a cross-account StartQuery
operation, the query definition must be defined in the monitoring account.
You can have up to 30 concurrent CloudWatch Logs insights queries, including queries that have been added to dashboards.
", "StopQuery": "Stops a CloudWatch Logs Insights query that is in progress. If the query has already ended, the operation returns an error indicating that the specified query is not running.
", @@ -123,6 +123,12 @@ "refs": { } }, + "ClientToken": { + "base": null, + "refs": { + "PutQueryDefinitionRequest$clientToken": "Used as an idempotency token, to avoid returning an exception if the service receives the same request twice because of a network error.
" + } + }, "CreateExportTaskRequest": { "base": null, "refs": { diff --git a/models/apis/logs/2014-03-28/endpoint-rule-set-1.json b/models/apis/logs/2014-03-28/endpoint-rule-set-1.json index 29f4692677a..e54c13da834 100644 --- a/models/apis/logs/2014-03-28/endpoint-rule-set-1.json +++ b/models/apis/logs/2014-03-28/endpoint-rule-set-1.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,83 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://logs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://logs-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] } ], @@ -221,168 +225,128 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } + "ref": "Region" + }, + "us-gov-east-1" ] } ], - "type": "tree", - "rules": [ + "endpoint": { + "url": "https://logs.us-gov-east-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "us-gov-east-1" - ] - } - ], - "endpoint": { - "url": "https://logs.us-gov-east-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - }, + "fn": "stringEquals", + "argv": [ { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "Region" - }, - "us-gov-west-1" - ] - } - ], - "endpoint": { - "url": "https://logs.us-gov-west-1.amazonaws.com", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "ref": "Region" }, - { - "conditions": [], - "endpoint": { - "url": "https://logs-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "us-gov-west-1" ] } - ] + ], + "endpoint": { + "url": "https://logs.us-gov-west-1.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" + "endpoint": { + "url": "https://logs-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ { - "conditions": [], - "endpoint": { - "url": "https://logs.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsDualStack" ] } ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" } - ] - }, - { - "conditions": [], + ], "type": "tree", "rules": [ { "conditions": [], "endpoint": { - "url": "https://logs.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://logs.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://logs.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/models/apis/s3/2006-03-01/api-2.json b/models/apis/s3/2006-03-01/api-2.json index 0b74e51e202..ad5aec592e1 100644 --- a/models/apis/s3/2006-03-01/api-2.json +++ b/models/apis/s3/2006-03-01/api-2.json @@ -7843,7 +7843,7 @@ "flattened":true }, "Setting":{"type":"boolean"}, - "Size":{"type":"integer"}, + "Size":{"type":"long"}, "SkipValidation":{"type":"boolean"}, "SourceSelectionCriteria":{ "type":"structure", diff --git a/models/apis/s3/2006-03-01/docs-2.json b/models/apis/s3/2006-03-01/docs-2.json index 3a1c6c202fe..b4595d24f2a 100644 --- a/models/apis/s3/2006-03-01/docs-2.json +++ b/models/apis/s3/2006-03-01/docs-2.json @@ -4,8 +4,8 @@ "operations": { "AbortMultipartUpload": "This action aborts a multipart upload. After a multipart upload is aborted, no additional parts can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. However, if any part uploads are currently in progress, those part uploads might or might not succeed. As a result, it might be necessary to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts.
To verify that all parts have been removed, so you don't get charged for the part storage, you should call the ListParts action and ensure that the parts list is empty.
For information about permissions required to use the multipart upload, see Multipart Upload and Permissions.
The following operations are related to AbortMultipartUpload
:
Completes a multipart upload by assembling previously uploaded parts.
You first initiate the multipart upload and then upload all parts using the UploadPart operation. After successfully uploading all relevant parts of an upload, you call this action to complete the upload. Upon receiving this request, Amazon S3 concatenates all the parts in ascending order by part number to create a new object. In the Complete Multipart Upload request, you must provide the parts list. You must ensure that the parts list is complete. This action concatenates the parts that you provide in the list. For each part in the list, you must provide the part number and the ETag
value, returned after that part was uploaded.
Processing of a Complete Multipart Upload request could take several minutes to complete. After Amazon S3 begins processing the request, it sends an HTTP response header that specifies a 200 OK response. While processing is in progress, Amazon S3 periodically sends white space characters to keep the connection from timing out. A request could fail after the initial 200 OK response has been sent. This means that a 200 OK
response can contain either a success or an error. If you call the S3 API directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throws an exception (or, for the SDKs that don't use exceptions, they return the error).
Note that if CompleteMultipartUpload
fails, applications should be prepared to retry the failed requests. For more information, see Amazon S3 Error Best Practices.
You cannot use Content-Type: application/x-www-form-urlencoded
with Complete Multipart Upload requests. Also, if you do not provide a Content-Type
header, CompleteMultipartUpload
returns a 200 OK response.
For more information about multipart uploads, see Uploading Objects Using Multipart Upload.
For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions.
CompleteMultipartUpload
has the following special errors:
Error code: EntityTooSmall
Description: Your proposed upload is smaller than the minimum allowed object size. Each part must be at least 5 MB in size, except the last part.
400 Bad Request
Error code: InvalidPart
Description: One or more of the specified parts could not be found. The part might not have been uploaded, or the specified entity tag might not have matched the part's entity tag.
400 Bad Request
Error code: InvalidPartOrder
Description: The list of parts was not in ascending order. The parts list must be specified in order by part number.
400 Bad Request
Error code: NoSuchUpload
Description: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
404 Not Found
The following operations are related to CompleteMultipartUpload
:
Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.
All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK
response. This means that a 200 OK
response can contain either a success or an error. If you call the S3 API directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throws an exception (or, for the SDKs that don't use exceptions, they return the error).
If the copy is successful, you receive a response with information about the copied object.
If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.
The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. For pricing information, see Amazon S3 pricing.
Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
When copying an object, you can preserve all metadata (the default) or specify new metadata. However, the access control list (ACL) is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.
To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive
header. When you grant permissions, you can use the s3:x-amz-metadata-directive
condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.
x-amz-website-redirect-location
is unique to each object and must be specified in the request headers to copy the value.
To only copy an object under certain conditions, such as whether the Etag
matches or whether the object was modified before or after a specified date, use the following request parameters:
x-amz-copy-source-if-match
x-amz-copy-source-if-none-match
x-amz-copy-source-if-unmodified-since
x-amz-copy-source-if-modified-since
If both the x-amz-copy-source-if-match
and x-amz-copy-source-if-unmodified-since
headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK
and copies the data:
x-amz-copy-source-if-match
condition evaluates to true
x-amz-copy-source-if-unmodified-since
condition evaluates to false
If both the x-amz-copy-source-if-none-match
and x-amz-copy-source-if-modified-since
headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed
response code:
x-amz-copy-source-if-none-match
condition evaluates to false
x-amz-copy-source-if-modified-since
condition evaluates to true
All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed.
Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy.
When you perform a CopyObject
operation, if you want to use a different type of encryption setting for the target object, you can use other appropriate encryption-related headers to encrypt the target object with a KMS key, an Amazon S3 managed key, or a customer-provided key. With server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its data centers and decrypts the data when you access it. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If the source object for the copy is stored in Amazon S3 using SSE-C, you must provide the necessary encryption information in your request so that Amazon S3 can decrypt the object for copying. For more information about server-side encryption, see Using Server-Side Encryption.
If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.
When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups that are defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.
If the bucket that you're copying objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT
requests that don't specify an ACL or PUT
requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control
canned ACL or an equivalent form of this ACL expressed in the XML format.
For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.
If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner.
When copying an object, if it has a checksum, that checksum will be copied to the new object by default. When you copy the object over, you can optionally specify a different checksum algorithm to use with the x-amz-checksum-algorithm
header.
You can use the CopyObject
action to change the storage class of an object that is already stored in Amazon S3 by using the StorageClass
parameter. For more information, see Storage Classes in the Amazon S3 User Guide.
If the source object's storage class is GLACIER, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject. For more information, see Copying Objects.
By default, x-amz-copy-source
header identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId
subresource.
If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id
response header in the response.
If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.
The following operations are related to CopyObject
:
Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.
Not every string is an acceptable bucket name. For information about bucket naming restrictions, see Bucket naming rules.
If you want to create an Amazon S3 on Outposts bucket, see Create Bucket.
By default, the bucket is created in the US East (N. Virginia) Region. You can optionally specify a Region in the request body. You might choose a Region to optimize latency, minimize costs, or address regulatory requirements. For example, if you reside in Europe, you will probably find it advantageous to create buckets in the Europe (Ireland) Region. For more information, see Accessing a bucket.
If you send your create bucket request to the s3.amazonaws.com
endpoint, the request goes to the us-east-1
Region. Accordingly, the signature calculations in Signature Version 4 must use us-east-1
as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, see Virtual hosting of buckets.
In addition to s3:CreateBucket
, the following permissions are required when your CreateBucket
request includes specific headers:
Access control lists (ACLs) - If your CreateBucket
request specifies access control list (ACL) permissions and the ACL is public-read, public-read-write, authenticated-read, or if you specify access permissions explicitly through any other ACL, both s3:CreateBucket
and s3:PutBucketAcl
permissions are needed. If the ACL for the CreateBucket
request is private or if the request doesn't specify any ACLs, only s3:CreateBucket
permission is needed.
Object Lock - If ObjectLockEnabledForBucket
is set to true in your CreateBucket
request, s3:PutBucketObjectLockConfiguration
and s3:PutBucketVersioning
permissions are required.
S3 Object Ownership - If your CreateBucket
request includes the x-amz-object-ownership
header, then the s3:PutBucketOwnershipControls
permission is required. By default, ObjectOwnership
is set to BucketOWnerEnforced
and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon use cases where you must control access for each object individually. If you want to change the ObjectOwnership
setting, you can use the x-amz-object-ownership
header in your CreateBucket
request to set the ObjectOwnership
setting of your choice. For more information about S3 Object Ownership, see Controlling object ownership in the Amazon S3 User Guide.
S3 Block Public Access - If your specific use case requires granting public access to your S3 resources, you can disable Block Public Access. You can create a new bucket with Block Public Access enabled, then separately call the DeletePublicAccessBlock
API. To use this operation, you must have the s3:PutBucketPublicAccessBlock
permission. By default, all Block Public Access settings are enabled for new buckets. To avoid inadvertent exposure of your resources, we recommend keeping the S3 Block Public Access settings enabled. For more information about S3 Block Public Access, see Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.
If your CreateBucket
request sets BucketOwnerEnforced
for Amazon S3 Object Ownership and specifies a bucket ACL that provides access to an external Amazon Web Services account, your request fails with a 400
error and returns the InvalidBucketAcLWithObjectOwnership
error code. For more information, see Setting Object Ownership on an existing bucket in the Amazon S3 User Guide.
The following operations are related to CreateBucket
:
Creates a copy of an object that is already stored in Amazon S3.
You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.
All copy requests must be authenticated. Additionally, you must have read access to the source object and write access to the destination bucket. For more information, see REST Authentication. Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account.
A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. If the error occurs before the copy action starts, you receive a standard Amazon S3 error. If the error occurs during the copy operation, the error response is embedded in the 200 OK
response. This means that a 200 OK
response can contain either a success or an error. If you call the S3 API directly, make sure to design your application to parse the contents of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throws an exception (or, for the SDKs that don't use exceptions, they return the error).
If the copy is successful, you receive a response with information about the copied object.
If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not, it would not contain the content-length, and you would need to read the entire body.
The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. For pricing information, see Amazon S3 pricing.
Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request
error. For more information, see Transfer Acceleration.
When copying an object, you can preserve all metadata (the default) or specify new metadata. However, the access control list (ACL) is not preserved and is set to private for the user making the request. To override the default ACL setting, specify a new ACL when generating a copy request. For more information, see Using ACLs.
To specify whether you want the object metadata copied from the source object or replaced with metadata provided in the request, you can optionally add the x-amz-metadata-directive
header. When you grant permissions, you can use the s3:x-amz-metadata-directive
condition key to enforce certain metadata behavior when objects are uploaded. For more information, see Specifying Conditions in a Policy in the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for Amazon S3.
x-amz-website-redirect-location
is unique to each object and must be specified in the request headers to copy the value.
To only copy an object under certain conditions, such as whether the Etag
matches or whether the object was modified before or after a specified date, use the following request parameters:
x-amz-copy-source-if-match
x-amz-copy-source-if-none-match
x-amz-copy-source-if-unmodified-since
x-amz-copy-source-if-modified-since
If both the x-amz-copy-source-if-match
and x-amz-copy-source-if-unmodified-since
headers are present in the request and evaluate as follows, Amazon S3 returns 200 OK
and copies the data:
x-amz-copy-source-if-match
condition evaluates to true
x-amz-copy-source-if-unmodified-since
condition evaluates to false
If both the x-amz-copy-source-if-none-match
and x-amz-copy-source-if-modified-since
headers are present in the request and evaluate as follows, Amazon S3 returns the 412 Precondition Failed
response code:
x-amz-copy-source-if-none-match
condition evaluates to false
x-amz-copy-source-if-modified-since
condition evaluates to true
All headers with the x-amz-
prefix, including x-amz-copy-source
, must be signed.
Amazon S3 automatically encrypts all new objects that are copied to an S3 bucket. When copying an object, if you don't specify encryption information in your copy request, the encryption setting of the target object is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the target object copy.
When you perform a CopyObject
operation, if you want to use a different type of encryption setting for the target object, you can use other appropriate encryption-related headers to encrypt the target object with a KMS key, an Amazon S3 managed key, or a customer-provided key. With server-side encryption, Amazon S3 encrypts your data as it writes your data to disks in its data centers and decrypts the data when you access it. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If the source object for the copy is stored in Amazon S3 using SSE-C, you must provide the necessary encryption information in your request so that Amazon S3 can decrypt the object for copying. For more information about server-side encryption, see Using Server-Side Encryption.
If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.
When copying an object, you can optionally use headers to grant ACL-based permissions. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups that are defined by Amazon S3. These permissions are then added to the ACL on the object. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.
If the bucket that you're copying objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT
requests that don't specify an ACL or PUT
requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control
canned ACL or an equivalent form of this ACL expressed in the XML format.
For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.
If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner.
When copying an object, if it has a checksum, that checksum will be copied to the new object by default. When you copy the object over, you can optionally specify a different checksum algorithm to use with the x-amz-checksum-algorithm
header.
You can use the CopyObject
action to change the storage class of an object that is already stored in Amazon S3 by using the StorageClass
parameter. For more information, see Storage Classes in the Amazon S3 User Guide.
If the source object's storage class is GLACIER or DEEP_ARCHIVE, or the object's storage class is INTELLIGENT_TIERING and it's S3 Intelligent-Tiering access tier is Archive Access or Deep Archive Access, you must restore a copy of this object before you can use it as a source object for the copy operation. For more information, see RestoreObject. For more information, see Copying Objects.
By default, x-amz-copy-source
header identifies the current version of an object to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was deleted. To copy a different version, use the versionId
subresource.
If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for the object being copied. This version ID is different from the version ID of the source object. Amazon S3 returns the version ID of the copied object in the x-amz-version-id
response header in the response.
If you do not enable versioning or suspend it on the target bucket, the version ID that Amazon S3 generates is always null.
The following operations are related to CopyObject
:
Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.
Not every string is an acceptable bucket name. For information about bucket naming restrictions, see Bucket naming rules.
If you want to create an Amazon S3 on Outposts bucket, see Create Bucket.
By default, the bucket is created in the US East (N. Virginia) Region. You can optionally specify a Region in the request body. To constrain the bucket creation to a specific Region, you can use LocationConstraint
condition key. You might choose a Region to optimize latency, minimize costs, or address regulatory requirements. For example, if you reside in Europe, you will probably find it advantageous to create buckets in the Europe (Ireland) Region. For more information, see Accessing a bucket.
If you send your create bucket request to the s3.amazonaws.com
endpoint, the request goes to the us-east-1
Region. Accordingly, the signature calculations in Signature Version 4 must use us-east-1
as the Region, even if the location constraint in the request specifies another Region where the bucket is to be created. If you create a bucket in a Region other than US East (N. Virginia), your application must be able to handle 307 redirect. For more information, see Virtual hosting of buckets.
In addition to s3:CreateBucket
, the following permissions are required when your CreateBucket
request includes specific headers:
Access control lists (ACLs) - If your CreateBucket
request specifies access control list (ACL) permissions and the ACL is public-read, public-read-write, authenticated-read, or if you specify access permissions explicitly through any other ACL, both s3:CreateBucket
and s3:PutBucketAcl
permissions are needed. If the ACL for the CreateBucket
request is private or if the request doesn't specify any ACLs, only s3:CreateBucket
permission is needed.
Object Lock - If ObjectLockEnabledForBucket
is set to true in your CreateBucket
request, s3:PutBucketObjectLockConfiguration
and s3:PutBucketVersioning
permissions are required.
S3 Object Ownership - If your CreateBucket
request includes the x-amz-object-ownership
header, then the s3:PutBucketOwnershipControls
permission is required. By default, ObjectOwnership
is set to BucketOWnerEnforced
and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon use cases where you must control access for each object individually. If you want to change the ObjectOwnership
setting, you can use the x-amz-object-ownership
header in your CreateBucket
request to set the ObjectOwnership
setting of your choice. For more information about S3 Object Ownership, see Controlling object ownership in the Amazon S3 User Guide.
S3 Block Public Access - If your specific use case requires granting public access to your S3 resources, you can disable Block Public Access. You can create a new bucket with Block Public Access enabled, then separately call the DeletePublicAccessBlock
API. To use this operation, you must have the s3:PutBucketPublicAccessBlock
permission. By default, all Block Public Access settings are enabled for new buckets. To avoid inadvertent exposure of your resources, we recommend keeping the S3 Block Public Access settings enabled. For more information about S3 Block Public Access, see Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.
If your CreateBucket
request sets BucketOwnerEnforced
for Amazon S3 Object Ownership and specifies a bucket ACL that provides access to an external Amazon Web Services account, your request fails with a 400
error and returns the InvalidBucketAcLWithObjectOwnership
error code. For more information, see Setting Object Ownership on an existing bucket in the Amazon S3 User Guide.
The following operations are related to CreateBucket
:
This action initiates a multipart upload and returns an upload ID. This upload ID is used to associate all of the parts in the specific multipart upload. You specify this upload ID in each of your subsequent upload part requests (see UploadPart). You also include this upload ID in the final request to either complete or abort the multipart upload request.
For more information about multipart uploads, see Multipart Upload Overview.
If you have configured a lifecycle rule to abort incomplete multipart uploads, the upload must complete within the number of days specified in the bucket lifecycle configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort action and Amazon S3 aborts the multipart upload. For more information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration.
For information about the permissions required to use the multipart upload API, see Multipart Upload and Permissions.
For request signing, multipart upload is just a series of regular requests. You initiate a multipart upload, send one or more requests to upload parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4).
After you initiate a multipart upload and upload one or more parts, to stop being charged for storing the uploaded parts, you must either complete or abort the multipart upload. Amazon S3 frees up the space used to store the parts and stop charging you for storing them only after you either complete or abort a multipart upload.
Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. Amazon S3 automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, if you don't specify encryption information in your request, the encryption setting of the uploaded parts is set to the default encryption configuration of the destination bucket. By default, all buckets have a base level of encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default encryption configuration that uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use a different type of encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the object with a KMS key, an Amazon S3 managed key, or a customer-provided key. If the encryption setting in your request is different from the default encryption configuration of the destination bucket, the encryption setting in your request takes precedence. If you choose to provide your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the request to initiate the upload by using CreateMultipartUpload
. You can request that Amazon S3 save the uploaded parts encrypted with server-side encryption with an Amazon S3 managed key (SSE-S3), an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C).
To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to the kms:Decrypt
and kms:GenerateDataKey*
actions on the key. These permissions are required because Amazon S3 must decrypt and read data from the encrypted file parts before it completes the multipart upload. For more information, see Multipart upload API and permissions and Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide.
If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account as the KMS key, then you must have these permissions on the key policy. If your IAM user or role belongs to a different account than the key, then you must have the permissions on both the key policy and your IAM user or role.
For more information, see Protecting Data Using Server-Side Encryption.
When copying an object, you can optionally specify the accounts or groups that should be granted specific permissions on the new object. There are two ways to grant the permissions using the request headers:
Specify a canned ACL with the x-amz-acl
request header. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
Amazon S3 encrypts data by using server-side encryption with an Amazon S3 managed key (SSE-S3) by default. Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You can request that Amazon S3 encrypts data at rest by using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption keys (SSE-C).
Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (aws/s3
) and KMS customer managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, specify the following headers in the request.
x-amz-server-side-encryption
x-amz-server-side-encryption-aws-kms-key-id
x-amz-server-side-encryption-context
If you specify x-amz-server-side-encryption:aws:kms
, but don't provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services managed key (aws/s3
key) in KMS to protect the data.
All GET
and PUT
requests for an object protected by KMS fail if you don't make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version 4.
For more information about server-side encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption with KMS keys.
Use customer-provided encryption keys (SSE-C) – If you want to manage your own encryption keys, provide all the following headers in the request.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
For more information about server-side encryption with customer-provided encryption keys (SSE-C), see Protecting data using server-side encryption with customer-provided encryption keys (SSE-C).
You also can use the following access control–related headers with this operation. By default, all objects are private. Only the owner has full access control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the access control list (ACL) on the object. For more information, see Using ACLs. With this operation, you can grant access permissions using one of the following two methods:
Specify a canned ACL (x-amz-acl
) — Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. For more information, see Canned ACL.
Specify access permissions explicitly — To explicitly grant access permissions to specific Amazon Web Services accounts or groups, use the following headers. Each header maps to specific permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview. In the header, you specify a list of grantees who get the specific permission. To grant permissions explicitly, use:
x-amz-grant-read
x-amz-grant-write
x-amz-grant-read-acp
x-amz-grant-write-acp
x-amz-grant-full-control
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an Amazon Web Services account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (SĂŁo Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
For example, the following x-amz-grant-read
header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:
x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"
The following operations are related to CreateMultipartUpload
:
Deletes the S3 bucket. All objects (including all object versions and delete markers) in the bucket must be deleted before the bucket itself can be deleted.
The following operations are related to DeleteBucket
:
Deletes an analytics configuration for the bucket (specified by the analytics configuration ID).
To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
For information about the Amazon S3 analytics feature, see Amazon S3 Analytics – Storage Class Analysis.
The following operations are related to DeleteBucketAnalyticsConfiguration
:
Sets the permissions on an existing bucket using access control lists (ACL). For more information, see Using ACLs. To set the ACL of a bucket, you must have WRITE_ACP
permission.
You can use one of the following two ways to set a bucket's permissions:
Specify the ACL in the request body
Specify permissions using request headers
You cannot specify access permission using both the body and the request headers.
Depending on your application needs, you may choose to set the ACL on a bucket using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, then you can continue to use that approach.
If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported
error code. Requests to read ACLs are still supported. For more information, see Controlling object ownership in the Amazon S3 User Guide.
You can set access permissions by using one of the following methods:
Specify a canned ACL with the x-amz-acl
request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-acl
. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. When using these headers, you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use the x-amz-acl
header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an Amazon Web Services account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (SĂŁo Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
For example, the following x-amz-grant-write
header grants create, overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and two Amazon Web Services accounts identified by their email addresses.
x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\", id=\"111122223333\", id=\"555566667777\"
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:
By the person's ID:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
DisplayName is optional and ignored in the request
By URI:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
By Email address:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>&</Grantee>
The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (SĂŁo Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
The following operations are related to PutBucketAcl
:
Sets an analytics configuration for the bucket (specified by the analytics configuration ID). You can have up to 1,000 analytics configurations per bucket.
You can choose to have storage class analysis export analysis reports sent to a comma-separated values (CSV) flat file. See the DataExport
request element. Reports are updated daily and are based on the object filters that you configure. When selecting data export, you specify a destination bucket and an optional destination prefix where the file is written. You can export the data to a destination bucket in a different account. However, the destination bucket must be in the same Region as the bucket that you are making the PUT analytics configuration to. For more information, see Amazon S3 Analytics – Storage Class Analysis.
You must create a bucket policy on the destination bucket where the exported file is written to grant permissions to Amazon S3 to write objects to the bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
PutBucketAnalyticsConfiguration
has the following special errors:
HTTP Error: HTTP 400 Bad Request
Code: InvalidArgument
Cause: Invalid argument.
HTTP Error: HTTP 400 Bad Request
Code: TooManyConfigurations
Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
HTTP Error: HTTP 403 Forbidden
Code: AccessDenied
Cause: You are not the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration bucket permission to set the configuration on the bucket.
The following operations are related to PutBucketAnalyticsConfiguration
:
Sets the cors
configuration for your bucket. If the configuration exists, Amazon S3 replaces it.
To use this operation, you must be allowed to perform the s3:PutBucketCORS
action. By default, the bucket owner has this permission and can grant it to others.
You set this configuration on a bucket so that the bucket can service cross-origin requests. For example, you might want to enable a request whose origin is http://www.example.com
to access your Amazon S3 bucket at my.example.bucket.com
by using the browser's XMLHttpRequest
capability.
To enable cross-origin resource sharing (CORS) on a bucket, you add the cors
subresource to the bucket. The cors
subresource is an XML document in which you configure rules that identify origins and the HTTP methods that can be executed on your bucket. The document is limited to 64 KB in size.
When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) against a bucket, it evaluates the cors
configuration on the bucket and uses the first CORSRule
rule that matches the incoming browser request to enable a cross-origin request. For a rule to match, the following conditions must be met:
The request's Origin
header must match AllowedOrigin
elements.
The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method
header in case of a pre-flight OPTIONS
request must be one of the AllowedMethod
elements.
Every header specified in the Access-Control-Request-Headers
request header of a pre-flight request must match an AllowedHeader
element.
For more information about CORS, go to Enabling Cross-Origin Resource Sharing in the Amazon S3 User Guide.
The following operations are related to PutBucketCors
:
This action uses the encryption
subresource to configure default encryption and Amazon S3 Bucket Keys for an existing bucket.
By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with customer-provided keys (SSE-C). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. For information about bucket default encryption, see Amazon S3 bucket default encryption in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.
This action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4).
To use this operation, you must have permission to perform the s3:PutEncryptionConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
The following operations are related to PutBucketEncryption
:
This action uses the encryption
subresource to configure default encryption and Amazon S3 Bucket Keys for an existing bucket.
By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally configure default encryption for a bucket by using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate the KMS key ID provided in PutBucketEncryption requests.
This action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature Version 4).
To use this operation, you must have permission to perform the s3:PutEncryptionConfiguration
action. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
The following operations are related to PutBucketEncryption
:
Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can have up to 1,000 S3 Intelligent-Tiering configurations per bucket.
The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.
The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
Operations related to PutBucketIntelligentTieringConfiguration
include:
You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically move objects stored in the S3 Intelligent-Tiering storage class to the Archive Access or Deep Archive Access tier.
PutBucketIntelligentTieringConfiguration
has the following special errors:
Code: InvalidArgument
Cause: Invalid Argument
Code: TooManyConfigurations
Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
Cause: You are not the owner of the specified bucket, or you do not have the s3:PutIntelligentTieringConfiguration
bucket permission to set the configuration on the bucket.
This implementation of the PUT
action adds an inventory configuration (identified by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations per bucket.
Amazon S3 inventory generates inventories of the objects in the bucket on a daily or weekly basis, and the results are published to a flat file. The bucket that is inventoried is called the source bucket, and the bucket where the inventory flat file is stored is called the destination bucket. The destination bucket must be in the same Amazon Web Services Region as the source bucket.
When you configure an inventory for a source bucket, you specify the destination bucket where you want the inventory to be stored, and whether to generate the inventory daily or weekly. You can also configure what object metadata to include and whether to inventory all object versions or only current versions. For more information, see Amazon S3 Inventory in the Amazon S3 User Guide.
You must create a bucket policy on the destination bucket to grant permissions to Amazon S3 to write objects to the bucket in the defined location. For an example policy, see Granting Permissions for Amazon S3 Inventory and Storage Class Analysis.
To use this operation, you must have permission to perform the s3:PutInventoryConfiguration
action. The bucket owner has this permission by default and can grant this permission to others.
The s3:PutInventoryConfiguration
permission allows a user to create an S3 Inventory report that includes all object metadata fields available and to specify the destination bucket to store the inventory. A user with read access to objects in the destination bucket can also access all object metadata fields that are available in the inventory report.
To restrict access to an inventory report, see Restricting access to an Amazon S3 Inventory report in the Amazon S3 User Guide. For more information about the metadata fields available in S3 Inventory, see Amazon S3 Inventory lists in the Amazon S3 User Guide. For more information about permissions, see Permissions related to bucket subresource operations and Identity and access management in Amazon S3 in the Amazon S3 User Guide.
PutBucketInventoryConfiguration
has the following special errors:
Code: InvalidArgument
Cause: Invalid Argument
Code: TooManyConfigurations
Cause: You are attempting to create a new configuration but have already reached the 1,000-configuration limit.
Cause: You are not the owner of the specified bucket, or you do not have the s3:PutInventoryConfiguration
bucket permission to set the configuration on the bucket.
The following operations are related to PutBucketInventoryConfiguration
:
For an updated version of this API, see PutBucketLifecycleConfiguration. This version has been deprecated. Existing lifecycle configurations will work. For new lifecycle configurations, use the updated API.
Creates a new lifecycle configuration for the bucket or replaces an existing lifecycle configuration. For information about lifecycle configuration, see Object Lifecycle Management in the Amazon S3 User Guide.
By default, all Amazon S3 resources, including buckets, objects, and related subresources (for example, lifecycle configuration and website configuration) are private. Only the resource owner, the Amazon Web Services account that created the resource, can access it. The resource owner can optionally grant access permissions to others by writing an access policy. For this operation, users must get the s3:PutLifecycleConfiguration
permission.
You can also explicitly deny permissions. Explicit denial also supersedes any other permissions. If you want to prevent users or accounts from removing or deleting objects from your bucket, you must deny them permissions for the following actions:
s3:DeleteObject
s3:DeleteObjectVersion
s3:PutLifecycleConfiguration
For more information about permissions, see Managing Access Permissions to your Amazon S3 Resources in the Amazon S3 User Guide.
For more examples of transitioning objects to storage classes such as STANDARD_IA or ONEZONE_IA, see Examples of Lifecycle Configuration.
The following operations are related to PutBucketLifecycle
:
GetBucketLifecycle(Deprecated)
By default, a resource owner—in this case, a bucket owner, which is the Amazon Web Services account that created the bucket—can perform any of the operations. A resource owner can also grant others permission to perform the operation. For more information, see the following topics in the Amazon S3 User Guide:
Enables notifications of specified events for a bucket. For more information about event notifications, see Configuring Event Notifications.
Using this API, you can replace an existing notification configuration. The configuration is an XML file that defines the event types that you want Amazon S3 to publish and the destination where you want Amazon S3 to publish an event notification when it detects an event of the specified type.
By default, your bucket has no event notifications configured. That is, the notification configuration will be an empty NotificationConfiguration
.
<NotificationConfiguration>
</NotificationConfiguration>
This action replaces the existing notification configuration with the configuration you include in the request body.
After Amazon S3 receives this request, it first verifies that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon SQS) destination exists, and that the bucket owner has permission to publish to it by sending a test notification. In the case of Lambda destinations, Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For more information, see Configuring Notifications for Amazon S3 Events.
You can disable notifications by adding the empty NotificationConfiguration element.
For more information about the number of event notification configurations that you can create per bucket, see Amazon S3 service quotas in Amazon Web Services General Reference.
By default, only the bucket owner can configure notifications on a bucket. However, bucket owners can use a bucket policy to grant permission to other users to set this configuration with the required s3:PutBucketNotification
permission.
The PUT notification is an atomic operation. For example, suppose your notification configuration includes SNS topic, SQS queue, and Lambda function configurations. When you send a PUT request with this configuration, Amazon S3 sends test messages to your SNS topic. If the message fails, the entire PUT action will fail, and Amazon S3 will not add the configuration to your bucket.
If the configuration in the request body includes only one TopicConfiguration
specifying only the s3:ReducedRedundancyLostObject
event type, the response will also include the x-amz-sns-test-message-id
header containing the message ID of the test notification sent to the topic.
The following action is related to PutBucketNotificationConfiguration
:
Creates or modifies OwnershipControls
for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketOwnershipControls
permission. For more information about Amazon S3 permissions, see Specifying permissions in a policy.
For information about Amazon S3 Object Ownership, see Using object ownership.
The following operations are related to PutBucketOwnershipControls
:
Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must have the PutBucketPolicy
permissions on the specified bucket and belong to the bucket owner's account in order to use this operation.
If you don't have PutBucketPolicy
permissions, Amazon S3 returns a 403 Access Denied
error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed
error.
To ensure that bucket owners don't inadvertently lock themselves out of their own buckets, the root principal in a bucket owner's Amazon Web Services account can perform the GetBucketPolicy
, PutBucketPolicy
, and DeleteBucketPolicy
API actions, even if their bucket policy explicitly denies the root principal's access. Bucket owner root principals can only be blocked from performing these API actions by VPC endpoint policies and Amazon Web Services Organizations policies.
For more information, see Bucket policy examples.
The following operations are related to PutBucketPolicy
:
Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 User Guide.
Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket or buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information.
A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset.
To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication
, Status
, and Priority
.
If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility.
For information about enabling versioning on a bucket, see Using Versioning.
By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with KMS keys. To replicate Amazon Web Services KMS-encrypted objects, add the following: SourceSelectionCriteria
, SseKmsEncryptedObjects
, Status
, EncryptionConfiguration
, and ReplicaKmsKeyID
. For information about replication configuration, see Replicating Objects Created with SSE Using KMS keys.
For information on PutBucketReplication
errors, see List of replication-related error codes
To create a PutBucketReplication
request, you must have s3:PutReplicationConfiguration
permissions for the bucket.
By default, a resource owner, in this case the Amazon Web Services account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.
To perform this operation, the user or role performing the action must have the iam:PassRole permission.
The following operations are related to PutBucketReplication
:
Creates a replication configuration or replaces an existing one. For more information, see Replication in the Amazon S3 User Guide.
Specify the replication configuration in the request body. In the replication configuration, you provide the name of the destination bucket or buckets where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your behalf, and other relevant information. You can invoke this request for a specific Amazon Web Services Region by using the aws:RequestedRegion
condition key.
A replication configuration must include at least one rule, and can contain a maximum of 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in the source bucket. To choose additional subsets of objects to replicate, add a rule for each subset.
To specify a subset of the objects in the source bucket to apply a replication rule to, add the Filter element as a child of the Rule element. You can filter objects based on an object key prefix, one or more object tags, or both. When you add the Filter element in the configuration, you must also add the following elements: DeleteMarkerReplication
, Status
, and Priority
.
If you are using an earlier version of the replication configuration, Amazon S3 handles replication of delete markers differently. For more information, see Backward Compatibility.
For information about enabling versioning on a bucket, see Using Versioning.
By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side encryption with KMS keys. To replicate Amazon Web Services KMS-encrypted objects, add the following: SourceSelectionCriteria
, SseKmsEncryptedObjects
, Status
, EncryptionConfiguration
, and ReplicaKmsKeyID
. For information about replication configuration, see Replicating Objects Created with SSE Using KMS keys.
For information on PutBucketReplication
errors, see List of replication-related error codes
To create a PutBucketReplication
request, you must have s3:PutReplicationConfiguration
permissions for the bucket.
By default, a resource owner, in this case the Amazon Web Services account that created the bucket, can perform this operation. The resource owner can also grant others permissions to perform the operation. For more information about permissions, see Specifying Permissions in a Policy and Managing Access Permissions to Your Amazon S3 Resources.
To perform this operation, the user or role performing the action must have the iam:PassRole permission.
The following operations are related to PutBucketReplication
:
Sets the request payment configuration for a bucket. By default, the bucket owner pays for downloads from the bucket. This configuration parameter enables the bucket owner (only) to specify that the person requesting the download will be charged for the download. For more information, see Requester Pays Buckets.
The following operations are related to PutBucketRequestPayment
:
Sets the tags for a bucket.
Use tags to organize your Amazon Web Services bill to reflect your own cost structure. To do this, sign up to get your Amazon Web Services account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging and Using Cost Allocation in Amazon S3 Bucket Tags.
When this operation sets the tags for a bucket, it will overwrite any current tags the bucket already has. You cannot use this operation to add tags to an existing list of tags.
To use this operation, you must have permissions to perform the s3:PutBucketTagging
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
PutBucketTagging
has the following special errors:
Error code: InvalidTagError
Description: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For information about tag restrictions, see User-Defined Tag Restrictions and Amazon Web Services-Generated Cost Allocation Tag Restrictions.
Error code: MalformedXMLError
Description: The XML provided does not match the schema.
Error code: OperationAbortedError
Description: A conflicting conditional action is currently in progress against this resource. Please try again.
Error code: InternalError
Description: The service was unable to apply the provided tag to the bucket.
The following operations are related to PutBucketTagging
:
Sets the tags for a bucket.
Use tags to organize your Amazon Web Services bill to reflect your own cost structure. To do this, sign up to get your Amazon Web Services account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging and Using Cost Allocation in Amazon S3 Bucket Tags.
When this operation sets the tags for a bucket, it will overwrite any current tags the bucket already has. You cannot use this operation to add tags to an existing list of tags.
To use this operation, you must have permissions to perform the s3:PutBucketTagging
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.
PutBucketTagging
has the following special errors. For more Amazon S3 errors see, Error Responses.
InvalidTag
- The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For more information, see Using Cost Allocation in Amazon S3 Bucket Tags.
MalformedXML
- The XML provided does not match the schema.
OperationAborted
- A conflicting conditional action is currently in progress against this resource. Please try again.
InternalError
- The service was unable to apply the provided tag to the bucket.
The following operations are related to PutBucketTagging
:
Sets the versioning state of an existing bucket.
You can set the versioning state with one of the following values:
Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket receive a unique version ID.
Suspended—Disables versioning for the objects in the bucket. All objects added to the bucket receive the version ID null.
If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value.
In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner and want to enable MFA Delete in the bucket versioning configuration, you must include the x-amz-mfa request
header and the Status
and the MfaDelete
request elements in a request to set the versioning state of the bucket.
If you have an object expiration lifecycle configuration in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will manage the deletes of the noncurrent object versions in the version-enabled bucket. (A version-enabled bucket maintains one current and zero or more noncurrent object versions.) For more information, see Lifecycle and Versioning.
The following operations are related to PutBucketVersioning
:
Sets the configuration of the website that is specified in the website
subresource. To configure a bucket as a website, you can add this subresource on the bucket with website configuration information such as the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.
This PUT action requires the S3:PutBucketWebsite
permission. By default, only the bucket owner can configure the website attached to a bucket; however, bucket owners can allow other users to set the website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite
permission.
To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the following elements. Because all requests are sent to another website, you don't need to provide index document name for the bucket.
WebsiteConfiguration
RedirectAllRequestsTo
HostName
Protocol
If you want granular control over redirects, you can use the following elements to add routing rules that describe conditions for redirecting requests and information about the redirect destination. In this case, the website configuration must provide an index document for the bucket, because some requests might not be redirected.
WebsiteConfiguration
IndexDocument
Suffix
ErrorDocument
Key
RoutingRules
RoutingRule
Condition
HttpErrorCodeReturnedEquals
KeyPrefixEquals
Redirect
Protocol
HostName
ReplaceKeyPrefixWith
ReplaceKeyWith
HttpRedirectCode
Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more than 50 routing rules, you can use object redirect. For more information, see Configuring an Object Redirect in the Amazon S3 User Guide.
", + "PutBucketWebsite": "Sets the configuration of the website that is specified in the website
subresource. To configure a bucket as a website, you can add this subresource on the bucket with website configuration information such as the file name of the index document and any redirect rules. For more information, see Hosting Websites on Amazon S3.
This PUT action requires the S3:PutBucketWebsite
permission. By default, only the bucket owner can configure the website attached to a bucket; however, bucket owners can allow other users to set the website configuration by writing a bucket policy that grants them the S3:PutBucketWebsite
permission.
To redirect all website requests sent to the bucket's website endpoint, you add a website configuration with the following elements. Because all requests are sent to another website, you don't need to provide index document name for the bucket.
WebsiteConfiguration
RedirectAllRequestsTo
HostName
Protocol
If you want granular control over redirects, you can use the following elements to add routing rules that describe conditions for redirecting requests and information about the redirect destination. In this case, the website configuration must provide an index document for the bucket, because some requests might not be redirected.
WebsiteConfiguration
IndexDocument
Suffix
ErrorDocument
Key
RoutingRules
RoutingRule
Condition
HttpErrorCodeReturnedEquals
KeyPrefixEquals
Redirect
Protocol
HostName
ReplaceKeyPrefixWith
ReplaceKeyWith
HttpRedirectCode
Amazon S3 has a limitation of 50 routing rules per website configuration. If you require more than 50 routing rules, you can use object redirect. For more information, see Configuring an Object Redirect in the Amazon S3 User Guide.
The maximum request length is limited to 128 KB.
", "PutObject": "Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object to it.
Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the entire object to the bucket. You cannot use PutObject
to only update a single piece of metadata for an existing object. You must put the entire object with updated metadata if you want to update some values.
Amazon S3 is a distributed system. If it receives multiple write requests for the same object simultaneously, it overwrites all but the last object written. To prevent objects from being deleted or overwritten, you can use Amazon S3 Object Lock.
To ensure that data is not corrupted traversing the network, use the Content-MD5
header. When you use this header, Amazon S3 checks the object against the provided MD5 value and, if they do not match, returns an error. Additionally, you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to the calculated MD5 value.
To successfully complete the PutObject
request, you must have the s3:PutObject
in your IAM permissions.
To successfully change the objects acl of your PutObject
request, you must have the s3:PutObjectAcl
in your IAM permissions.
To successfully set the tag-set with your PutObject
request, you must have the s3:PutObjectTagging
in your IAM permissions.
The Content-MD5
header is required for any request to upload an object with a retention period configured using Amazon S3 Object Lock. For more information about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview in the Amazon S3 User Guide.
You have four mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest by using server-side encryption with other key options. For more information, see Using Server-Side Encryption.
When adding a new object, you can use headers to grant ACL-based permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added to the ACL on the object. By default, all objects are private. Only the owner has full access control. For more information, see Access Control List (ACL) Overview and Managing ACLs Using the REST API.
If the bucket that you're uploading objects to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that use this setting only accept PUT requests that don't specify an ACL or PUT requests that specify bucket owner full control ACLs, such as the bucket-owner-full-control
canned ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that contain other ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a 400
error with the error code AccessControlListNotSupported
. For more information, see Controlling ownership of objects and disabling ACLs in the Amazon S3 User Guide.
If your bucket uses the bucket owner enforced setting for Object Ownership, all objects written to the bucket by any account will be owned by the bucket owner.
By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide.
If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID for the object being stored. Amazon S3 returns this ID in the response. When you enable versioning for a bucket, if Amazon S3 receives multiple write requests for the same object simultaneously, it stores all of the objects. For more information about versioning, see Adding Objects to Versioning-Enabled Buckets. For information about returning the versioning state of a bucket, see GetBucketVersioning.
For more information about related Amazon S3 APIs, see the following:
", "PutObjectAcl": "Uses the acl
subresource to set the access control list (ACL) permissions for a new or existing object in an S3 bucket. You must have WRITE_ACP
permission to set the ACL of an object. For more information, see What permissions can I grant? in the Amazon S3 User Guide.
This action is not supported by Amazon S3 on Outposts.
Depending on your application needs, you can choose to set the ACL on an object using either the request body or the headers. For example, if you have an existing application that updates a bucket ACL using the request body, you can continue to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 User Guide.
If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and return the AccessControlListNotSupported
error code. Requests to read ACLs are still supported. For more information, see Controlling object ownership in the Amazon S3 User Guide.
You can set access permissions using one of the following methods:
Specify a canned ACL with the x-amz-acl
request header. Amazon S3 supports a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and permissions. Specify the canned ACL name as the value of x-amz-ac
l. If you use this header, you cannot use other access control-specific headers in your request. For more information, see Canned ACL.
Specify access permissions explicitly with the x-amz-grant-read
, x-amz-grant-read-acp
, x-amz-grant-write-acp
, and x-amz-grant-full-control
headers. When using these headers, you specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the permission. If you use these ACL-specific headers, you cannot use x-amz-acl
header to set a canned ACL. These parameters map to the set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL) Overview.
You specify each grantee as a type=value pair, where the type is one of the following:
id
– if the value specified is the canonical user ID of an Amazon Web Services account
uri
– if you are granting permissions to a predefined group
emailAddress
– if the value specified is the email address of an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (SĂŁo Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
For example, the following x-amz-grant-read
header grants list objects permission to the two Amazon Web Services accounts identified by their email addresses.
x-amz-grant-read: emailAddress=\"xyz@amazon.com\", emailAddress=\"abc@amazon.com\"
You can use either a canned ACL or specify access permissions explicitly. You cannot do both.
You can specify the person (grantee) to whom you're assigning access rights (using request elements) in the following ways:
By the person's ID:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"CanonicalUser\"><ID><>ID<></ID><DisplayName><>GranteesEmail<></DisplayName> </Grantee>
DisplayName is optional and ignored in the request.
By URI:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"Group\"><URI><>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<></URI></Grantee>
By Email address:
<Grantee xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:type=\"AmazonCustomerByEmail\"><EmailAddress><>Grantees@email.com<></EmailAddress>lt;/Grantee>
The grantee is resolved to the CanonicalUser and, in a response to a GET Object acl request, appears as the CanonicalUser.
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
US East (N. Virginia)
US West (N. California)
US West (Oregon)
Asia Pacific (Singapore)
Asia Pacific (Sydney)
Asia Pacific (Tokyo)
Europe (Ireland)
South America (SĂŁo Paulo)
For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
The ACL of an object is set at the object version level. By default, PUT sets the ACL of the current version of an object. To set the ACL of a different version, use the versionId
subresource.
The following operations are related to PutObjectAcl
:
Applies a legal hold configuration to the specified object. For more information, see Locking Objects.
This action is not supported by Amazon S3 on Outposts.
", "PutObjectLockConfiguration": "Places an Object Lock configuration on the specified bucket. The rule specified in the Object Lock configuration will be applied by default to every new object placed in the specified bucket. For more information, see Locking Objects.
The DefaultRetention
settings require both a mode and a period.
The DefaultRetention
period can be either Days
or Years
but you must select one. You cannot specify Days
and Years
at the same time.
You can only enable Object Lock for new buckets. If you want to turn on Object Lock for an existing bucket, contact Amazon Web Services Support.
Places an Object Retention configuration on an object. For more information, see Locking Objects. Users or accounts require the s3:PutObjectRetention
permission in order to place an Object Retention configuration on objects. Bypassing a Governance Retention configuration requires the s3:BypassGovernanceRetention
permission.
This action is not supported by Amazon S3 on Outposts.
", - "PutObjectTagging": "Sets the supplied tag-set to an object that already exists in a bucket.
A tag is a key-value pair. You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.
For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.
To use this operation, you must have permission to perform the s3:PutObjectTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
To put tags of any other version, use the versionId
query parameter. You also need permission for the s3:PutObjectVersionTagging
action.
For information about the Amazon S3 object tagging feature, see Object Tagging.
PutObjectTagging
has the following special errors:
Code: InvalidTagError
Cause: The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For more information, see Object Tagging.
Code: MalformedXMLError
Cause: The XML provided does not match the schema.
Code: OperationAbortedError
Cause: A conflicting conditional action is currently in progress against this resource. Please try again.
Code: InternalError
Cause: The service was unable to apply the provided tag to the object.
The following operations are related to PutObjectTagging
:
Creates or modifies the PublicAccessBlock
configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
When Amazon S3 evaluates the PublicAccessBlock
configuration for a bucket or an object, it checks the PublicAccessBlock
configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock
configurations are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.
For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".
The following operations are related to PutPublicAccessBlock
:
Sets the supplied tag-set to an object that already exists in a bucket. A tag is a key-value pair. For more information, see Object Tagging.
You can associate tags with an object by sending a PUT request against the tagging subresource that is associated with the object. You can retrieve tags by sending a GET request. For more information, see GetObjectTagging.
For tagging-related restrictions related to characters and encodings, see Tag Restrictions. Note that Amazon S3 limits the maximum number of tags to 10 tags per object.
To use this operation, you must have permission to perform the s3:PutObjectTagging
action. By default, the bucket owner has this permission and can grant this permission to others.
To put tags of any other version, use the versionId
query parameter. You also need permission for the s3:PutObjectVersionTagging
action.
PutObjectTagging
has the following special errors. For more Amazon S3 errors see, Error Responses.
InvalidTag
- The tag provided was not a valid tag. This error can occur if the tag did not pass input validation. For more information, see Object Tagging.
MalformedXML
- The XML provided does not match the schema.
OperationAborted
- A conflicting conditional action is currently in progress against this resource. Please try again.
InternalError
- The service was unable to apply the provided tag to the object.
The following operations are related to PutObjectTagging
:
Creates or modifies the PublicAccessBlock
configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock
permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.
When Amazon S3 evaluates the PublicAccessBlock
configuration for a bucket or an object, it checks the PublicAccessBlock
configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock
configurations are different between the bucket and the account, S3 uses the most restrictive combination of the bucket-level and account-level settings.
For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".
The following operations are related to PutPublicAccessBlock
:
Restores an archived copy of an object back into Amazon S3
This action is not supported by Amazon S3 on Outposts.
This action performs the following types of requests:
select
- Perform a select query on an archived object
restore an archive
- Restore an archived object
For more information about the S3
structure in the request body, see the following:
Managing Access with ACLs in the Amazon S3 User Guide
Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide
Define the SQL expression for the SELECT
type of restoration for your query in the request body's SelectParameters
structure. You can use expressions like the following examples.
The following expression returns all records from the specified object.
SELECT * FROM Object
Assuming that you are not using any headers for data stored in the object, you can specify columns with positional headers.
SELECT s._1, s._2 FROM Object s WHERE s._3 > 100
If you have headers and you set the fileHeaderInfo
in the CSV
structure in the request body to USE
, you can specify headers in the query. (If you set the fileHeaderInfo
field to IGNORE
, the first row is skipped for the query.) You cannot mix ordinal positions with header column names.
SELECT s.Id, s.FirstName, s.SSN FROM S3Object s
When making a select request, you can also do the following:
To expedite your queries, specify the Expedited
tier. For more information about tiers, see \"Restoring Archives,\" later in this topic.
Specify details about the data serialization format of both the input object that is being queried and the serialization of the CSV-encoded query results.
The following are additional important facts about the select feature:
The output results are new Amazon S3 objects. Unlike archive retrievals, they are stored until explicitly deleted-manually or through a lifecycle configuration.
You can issue more than one select request on the same Amazon S3 object. Amazon S3 doesn't duplicate requests, so avoid issuing duplicate requests.
Amazon S3 accepts a select request even if the object has already been restored. A select request doesn’t return error response 409
.
To use this operation, you must have permissions to perform the s3:RestoreObject
action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.
Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first initiate a restore request, and then wait until a temporary copy of the object is available. If you want a permanent copy of the object, create a copy of it in the Amazon S3 Standard storage class in your S3 bucket. To access an archived object, you must restore the object for the duration (number of days) that you specify. For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, you must first initiate a restore request, and then wait until the object is moved into the Frequent Access tier.
To restore a specific object version, you can provide a version ID. If you don't provide a version ID, Amazon S3 restores the current version.
When restoring an archived object, you can specify one of the following data access tier options in the Tier
element of the request body:
Expedited
- Expedited retrievals allow you to quickly access your data stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier when occasional urgent requests for restoring archives are required. For all but the largest archived objects (250 MB+), data accessed using Expedited retrievals is typically made available within 1–5 minutes. Provisioned capacity ensures that retrieval capacity for Expedited retrievals is available when you need it. Expedited retrievals and provisioned capacity are not available for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.
Standard
- Standard retrievals allow you to access any of your archived objects within several hours. This is the default option for retrieval requests that do not specify the retrieval option. Standard retrievals typically finish within 3–5 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. They typically finish within 12 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects stored in S3 Intelligent-Tiering.
Bulk
- Bulk retrievals free for objects stored in the S3 Glacier Flexible Retrieval and S3 Intelligent-Tiering storage classes, enabling you to retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals typically finish within 5–12 hours for objects stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering Archive tier. Bulk retrievals are also the lowest-cost retrieval option when restoring objects from S3 Glacier Deep Archive. They typically finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier.
For more information about archive retrieval options and provisioned capacity for Expedited
data access, see Restoring Archived Objects in the Amazon S3 User Guide.
You can use Amazon S3 restore speed upgrade to change the restore speed to a faster speed while it is in progress. For more information, see Upgrading the speed of an in-progress restore in the Amazon S3 User Guide.
To get the status of object restoration, you can send a HEAD
request. Operations return the x-amz-restore
header, which provides information about the restoration status, in the response. You can use Amazon S3 event notifications to notify you when a restore is initiated or completed. For more information, see Configuring Amazon S3 Event Notifications in the Amazon S3 User Guide.
After restoring an archived object, you can update the restoration period by reissuing the request with a new period. Amazon S3 updates the restoration period relative to the current time and charges only for the request-there are no data transfer charges. You cannot update the restoration period when Amazon S3 is actively processing your current restore request for the object.
If your bucket has a lifecycle configuration with a rule that includes an expiration action, the object expiration overrides the life span that you specify in a restore request. For example, if you restore an object copy for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the object in 3 days. For more information about lifecycle configuration, see PutBucketLifecycleConfiguration and Object Lifecycle Management in Amazon S3 User Guide.
A successful action returns either the 200 OK
or 202 Accepted
status code.
If the object is not previously restored, then Amazon S3 returns 202 Accepted
in the response.
If the object is previously restored, Amazon S3 returns 200 OK
in the response.
Special errors:
Code: RestoreAlreadyInProgress
Cause: Object restore is already in progress. (This error does not apply to SELECT type requests.)
HTTP Status Code: 409 Conflict
SOAP Fault Code Prefix: Client
Code: GlacierExpeditedRetrievalNotAvailable
Cause: expedited retrievals are currently not available. Try again later. (Returned if there is insufficient capacity to process the Expedited request. This error applies only to Expedited retrievals and not to S3 Standard or Bulk retrievals.)
HTTP Status Code: 503
SOAP Fault Code Prefix: N/A
The following operations are related to RestoreObject
:
This action filters the contents of an Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You must also specify the data serialization format for the response.
This action is not supported by Amazon S3 on Outposts.
For more information about Amazon S3 Select, see Selecting Content from Objects and SELECT Command in the Amazon S3 User Guide.
You must have s3:GetObject
permission for this operation. Amazon S3 Select does not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy in the Amazon S3 User Guide.
You can use Amazon S3 Select to query objects that have the following format properties:
CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format.
UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports.
GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression for Parquet objects.
Server-side encryption - Amazon S3 Select supports querying objects that are protected with server-side encryption.
For objects that are encrypted with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided Encryption Keys) in the Amazon S3 User Guide.
For objects that are encrypted with Amazon S3 managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side encryption is handled transparently, so you don't need to specify anything. For more information about server-side encryption, including SSE-S3 and SSE-KMS, see Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide.
Given the response size is unknown, Amazon S3 Select streams the response as a series of messages and includes a Transfer-Encoding
header with chunked
as its value in the response. For more information, see Appendix: SelectObjectContent Response.
The SelectObjectContent
action does not support the following GetObject
functionality. For more information, see GetObject.
Range
: Although you can specify a scan range for an Amazon S3 Select request (see SelectObjectContentRequest - ScanRange in the request parameters), you cannot specify the range of bytes of an object to return.
The GLACIER
, DEEP_ARCHIVE
, and REDUCED_REDUNDANCY
storage classes, or the ARCHIVE_ACCESS
and DEEP_ARCHIVE_ACCESS
access tiers of the INTELLIGENT_TIERING
storage class: You cannot query objects in the GLACIER
, DEEP_ARCHIVE
, or REDUCED_REDUNDANCY
storage classes, nor objects in the ARCHIVE_ACCESS
or DEEP_ARCHIVE_ACCESS
access tiers of the INTELLIGENT_TIERING
storage class. For more information about storage classes, see Using Amazon S3 storage classes in the Amazon S3 User Guide.
For a list of special errors for this operation, see List of SELECT Object Content Error Codes
The following operations are related to SelectObjectContent
:
Uploads a part in a multipart upload.
In this operation, you provide part data in your request. However, you have an option to specify your existing Amazon S3 object as a data source for the part you are uploading. To upload a part from an existing object, you use the UploadPartCopy operation.
You must initiate a multipart upload (see CreateMultipartUpload) before you can upload any part. In response to your initiate request, Amazon S3 returns an upload ID, a unique identifier, that you must include in your upload part request.
Part numbers can be any number from 1 to 10,000, inclusive. A part number uniquely identifies a part and also defines its position within the object being created. If you upload a new part using the same part number that was used with a previous part, the previously uploaded part is overwritten.
For information about maximum and minimum part sizes and other multipart upload specifications, see Multipart upload limits in the Amazon S3 User Guide.
To ensure that data is not corrupted when traversing the network, specify the Content-MD5
header in the upload part request. Amazon S3 checks the part data against the provided MD5 value. If they do not match, Amazon S3 returns an error.
If the upload request is signed with Signature Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256
header as a checksum instead of Content-MD5
. For more information see Authenticating Requests: Using the Authorization Header (Amazon Web Services Signature Version 4).
Note: After you initiate multipart upload and upload one or more parts, you must either complete or abort multipart upload in order to stop getting charged for storage of the uploaded parts. Only after you either complete or abort multipart upload, Amazon S3 frees up the parts storage and stops charging you for the parts storage.
For more information on multipart uploads, go to Multipart Upload Overview in the Amazon S3 User Guide .
For information on the permissions required to use the multipart upload API, go to Multipart Upload and Permissions in the Amazon S3 User Guide.
Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to disks in its data centers and decrypts it when you access it. You have three mutually exclusive options to protect data using server-side encryption in Amazon S3, depending on how you choose to manage the encryption keys. Specifically, the encryption key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts data with server-side encryption using Amazon S3 managed keys (SSE-S3) by default. You can optionally tell Amazon S3 to encrypt data at rest using server-side encryption with other key options. The option you use depends on whether you want to use KMS keys (SSE-KMS) or provide your own encryption key (SSE-C). If you choose to provide your own encryption key, the request headers you provide in the request must match the headers you used in the request to initiate the upload by using CreateMultipartUpload. For more information, go to Using Server-Side Encryption in the Amazon S3 User Guide.
Server-side encryption is supported by the S3 Multipart Upload actions. Unless you are using a customer-provided encryption key (SSE-C), you don't need to specify the encryption parameters in each UploadPart request. Instead, you only need to specify the server-side encryption parameters in the initial Initiate Multipart request. For more information, see CreateMultipartUpload.
If you requested server-side encryption using a customer-provided encryption key (SSE-C) in your initiate multipart upload request, you must provide identical encryption information in each part upload using the following headers.
x-amz-server-side-encryption-customer-algorithm
x-amz-server-side-encryption-customer-key
x-amz-server-side-encryption-customer-key-MD5
UploadPart
has the following special errors:
Code: NoSuchUpload
Cause: The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed.
HTTP Status Code: 404 Not Found
SOAP Fault Code Prefix: Client
The following operations are related to UploadPart
:
Specifies whether the versioned object that was permanently deleted was (true) or was not (false) a delete marker.
", - "DeletedObject$DeleteMarker": "Specifies whether the versioned object that was permanently deleted was (true) or was not (false) a delete marker. In a simple DELETE, this header indicates whether (true) or not (false) a delete marker was created.
", + "DeleteObjectOutput$DeleteMarker": "Indicates whether the specified object version that was permanently deleted was (true) or was not (false) a delete marker before deletion. In a simple DELETE, this header indicates whether (true) or not (false) the current version of the object is a delete marker.
", + "DeletedObject$DeleteMarker": "Indicates whether the specified object version that was permanently deleted was (true) or was not (false) a delete marker before deletion. In a simple DELETE, this header indicates whether (true) or not (false) the current version of the object is a delete marker.
", "GetObjectAttributesOutput$DeleteMarker": "Specifies whether the object retrieved was (true
) or was not (false
) a delete marker. If false
, this response header does not appear in the response.
Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response.
", "HeadObjectOutput$DeleteMarker": "Specifies whether the object retrieved was (true) or was not (false) a Delete Marker. If false, this response header does not appear in the response.
", @@ -3609,7 +3609,7 @@ } }, "RequestPayer": { - "base": "Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. For information about downloading objects from Requester Pays buckets, see Downloading Objects in Requester Pays Buckets in the Amazon S3 User Guide.
", + "base": "Confirms that the requester knows that they will be charged for the request. Bucket owners need not specify this parameter in their requests. If either the source or destination Amazon S3 bucket has Requester Pays enabled, the requester will pay for corresponding charges to copy the object. For information about downloading objects from Requester Pays buckets, see Downloading Objects in Requester Pays Buckets in the Amazon S3 User Guide.
", "refs": { "AbortMultipartUploadRequest$RequestPayer": null, "CompleteMultipartUploadRequest$RequestPayer": null, @@ -3881,19 +3881,19 @@ "refs": { "CompleteMultipartUploadOutput$SSEKMSKeyId": "If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.
", "CopyObjectOutput$SSEKMSKeyId": "If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.
", - "CopyObjectRequest$SSEKMSKeyId": "Specifies the KMS key ID to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.
", + "CopyObjectRequest$SSEKMSKeyId": "Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.
", "CreateMultipartUploadOutput$SSEKMSKeyId": "If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.
", - "CreateMultipartUploadRequest$SSEKMSKeyId": "Specifies the ID of the symmetric encryption customer managed key to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.
", + "CreateMultipartUploadRequest$SSEKMSKeyId": "Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption customer managed key to use for object encryption. All GET and PUT requests for an object protected by KMS will fail if they're not made via SSL or using SigV4. For information about configuring any of the officially supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication in the Amazon S3 User Guide.
", "Encryption$KMSKeyId": "If the encryption type is aws:kms
, this optional value specifies the ID of the symmetric encryption customer managed key to use for encryption of job results. Amazon S3 only supports symmetric encryption KMS keys. For more information, see Asymmetric keys in KMS in the Amazon Web Services Key Management Service Developer Guide.
If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.
", "HeadObjectOutput$SSEKMSKeyId": "If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.
", "PutObjectOutput$SSEKMSKeyId": "If x-amz-server-side-encryption
has a valid value of aws:kms
or aws:kms:dsse
, this header specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.
If x-amz-server-side-encryption
has a valid value of aws:kms
or aws:kms:dsse
, this header specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. If you specify x-amz-server-side-encryption:aws:kms
or x-amz-server-side-encryption:aws:kms:dsse
, but do not provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services managed key (aws/s3
) to protect the data. If the KMS key does not exist in the same account that's issuing the command, you must use the full ARN and not just the ID.
If x-amz-server-side-encryption
has a valid value of aws:kms
or aws:kms:dsse
, this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object. If you specify x-amz-server-side-encryption:aws:kms
or x-amz-server-side-encryption:aws:kms:dsse
, but do not provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services managed key (aws/s3
) to protect the data. If the KMS key does not exist in the same account that's issuing the command, you must use the full ARN and not just the ID.
Specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key to use for encrypting inventory reports.
", - "ServerSideEncryptionByDefault$KMSMasterKeyID": "Amazon Web Services Key Management Service (KMS) customer Amazon Web Services KMS key ID to use for the default encryption. This parameter is allowed if and only if SSEAlgorithm
is set to aws:kms
.
You can specify the key ID or the Amazon Resource Name (ARN) of the KMS key. If you use a key ID, you can run into a LogDestination undeliverable error when creating a VPC flow log.
If you are using encryption with cross-account or Amazon Web Services service operations you must use a fully qualified KMS key ARN. For more information, see Using encryption for cross-account operations.
Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
Amazon S3 only supports symmetric encryption KMS keys. For more information, see Asymmetric keys in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer Guide.
Amazon Web Services Key Management Service (KMS) customer Amazon Web Services KMS key ID to use for the default encryption. This parameter is allowed if and only if SSEAlgorithm
is set to aws:kms
.
You can specify the key ID, key alias, or the Amazon Resource Name (ARN) of the KMS key.
Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
Key Alias: alias/alias-name
If you use a key ID, you can run into a LogDestination undeliverable error when creating a VPC flow log.
If you are using encryption with cross-account or Amazon Web Services service operations you must use a fully qualified KMS key ARN. For more information, see Using encryption for cross-account operations.
Amazon S3 only supports symmetric encryption KMS keys. For more information, see Asymmetric keys in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer Guide.
If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key that was used for the object.
", "UploadPartOutput$SSEKMSKeyId": "If present, specifies the ID of the Key Management Service (KMS) symmetric encryption customer managed key was used for the object.
", - "WriteGetObjectResponseRequest$SSEKMSKeyId": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that was used for stored in Amazon S3 object.
" + "WriteGetObjectResponseRequest$SSEKMSKeyId": "If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption customer managed key that was used for stored in Amazon S3 object.
" } }, "SSES3": { @@ -4044,7 +4044,7 @@ "StorageClass": { "base": null, "refs": { - "CopyObjectRequest$StorageClass": "By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide.
", + "CopyObjectRequest$StorageClass": "If the x-amz-storage-class
header is not used, the copied object will be stored in the STANDARD Storage Class by default. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide.
By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The STANDARD storage class provides high durability and high availability. Depending on performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information, see Storage Classes in the Amazon S3 User Guide.
", "Destination$StorageClass": "The storage class to use when replicating objects, such as S3 Standard or reduced redundancy. By default, Amazon S3 uses the storage class of the source object to create the object replica.
For valid values, see the StorageClass
element of the PUT Bucket replication action in the Amazon S3 API Reference.
Provides the storage class information of the object. Amazon S3 returns this header for all objects except for S3 Standard storage class objects.
For more information, see Storage Classes.
", diff --git a/models/apis/s3/2006-03-01/examples-1.json b/models/apis/s3/2006-03-01/examples-1.json index 66619afe495..75c697b58cf 100644 --- a/models/apis/s3/2006-03-01/examples-1.json +++ b/models/apis/s3/2006-03-01/examples-1.json @@ -84,10 +84,13 @@ "CreateBucket": [ { "input": { - "Bucket": "examplebucket" + "Bucket": "examplebucket", + "CreateBucketConfiguration": { + "LocationConstraint": "eu-west-1" + } }, "output": { - "Location": "/examplebucket" + "Location": "http://examplebucket.Deletes a namespace from the current account. If the namespace still contains one or more services, the request fails.
", "DeleteService": "Deletes a specified service. If the service still contains one or more registered instances, the request fails.
", "DeregisterInstance": "Deletes the Amazon Route 53 DNS records and health check, if any, that Cloud Map created for the specified instance.
", - "DiscoverInstances": "Discovers registered instances for a specified namespace and service. You can use DiscoverInstances
to discover instances for any type of namespace. For public and private DNS namespaces, you can also use DNS queries to discover instances.
Discovers registered instances for a specified namespace and service. You can use DiscoverInstances
to discover instances for any type of namespace. DiscoverInstances
returns a randomized list of instances allowing customers to distribute traffic evenly across instances. For public and private DNS namespaces, you can also use DNS queries to discover instances.
Discovers the increasing revision associated with an instance.
", "GetInstance": "Gets information about a specified instance.
", "GetInstancesHealthStatus": "Gets the current health status (Healthy
, Unhealthy
, or Unknown
) of one or more instances that are associated with a specified service.
There's a brief delay between when you register an instance and when the health status for the instance is available.
Gets information about a namespace.
", - "GetOperation": "Gets information about any operation that returns an operation ID in the response, such as a CreateService
request.
To get a list of operations that match specified criteria, see ListOperations.
Gets information about any operation that returns an operation ID in the response, such as a CreateHttpNamespace
request.
To get a list of operations that match specified criteria, see ListOperations.
Gets the settings for a specified service.
", "ListInstances": "Lists summary information about the instances that you registered by using a specified service.
", "ListNamespaces": "Lists summary information about the namespaces that were created by the current Amazon Web Services account.
", @@ -68,7 +69,7 @@ "HttpInstanceSummary$Attributes": "If you included any attributes when you registered the instance, the values of those attributes.
", "Instance$Attributes": "A string map that contains the following information for the service that you specify in ServiceId
:
The attributes that apply to the records that are defined in the service.
For each attribute, the applicable value.
Do not include sensitive information in the attributes if the namespace is discoverable by public DNS queries.
Supported attribute keys include the following:
If you want Cloud Map to create a Route 53 alias record that routes traffic to an Elastic Load Balancing load balancer, specify the DNS name that's associated with the load balancer. For information about how to get the DNS name, see AliasTarget->DNSName in the Route 53 API Reference.
Note the following:
The configuration for the service that's specified by ServiceId
must include settings for an A
record, an AAAA
record, or both.
In the service that's specified by ServiceId
, the value of RoutingPolicy
must be WEIGHTED
.
If the service that's specified by ServiceId
includes HealthCheckConfig
settings, Cloud Map creates the health check, but it won't associate the health check with the alias record.
Auto naming currently doesn't support creating alias records that route traffic to Amazon Web Services resources other than ELB load balancers.
If you specify a value for AWS_ALIAS_DNS_NAME
, don't specify values for any of the AWS_INSTANCE
attributes.
HTTP namespaces only. The Amazon EC2 instance ID for the instance. The AWS_INSTANCE_IPV4
attribute contains the primary private IPv4 address.
If the service configuration includes HealthCheckCustomConfig
, you can optionally use AWS_INIT_HEALTH_STATUS
to specify the initial status of the custom health check, HEALTHY
or UNHEALTHY
. If you don't specify a value for AWS_INIT_HEALTH_STATUS
, the initial status is HEALTHY
.
If the service configuration includes a CNAME
record, the domain name that you want Route 53 to return in response to DNS queries (for example, example.com
).
This value is required if the service specified by ServiceId
includes settings for an CNAME
record.
If the service configuration includes an A
record, the IPv4 address that you want Route 53 to return in response to DNS queries (for example, 192.0.2.44
).
This value is required if the service specified by ServiceId
includes settings for an A
record. If the service includes settings for an SRV
record, you must specify a value for AWS_INSTANCE_IPV4
, AWS_INSTANCE_IPV6
, or both.
If the service configuration includes an AAAA
record, the IPv6 address that you want Route 53 to return in response to DNS queries (for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345
).
This value is required if the service specified by ServiceId
includes settings for an AAAA
record. If the service includes settings for an SRV
record, you must specify a value for AWS_INSTANCE_IPV4
, AWS_INSTANCE_IPV6
, or both.
If the service includes an SRV
record, the value that you want Route 53 to return for the port.
If the service includes HealthCheckConfig
, the port on the endpoint that you want Route 53 to send requests to.
This value is required if you specified settings for an SRV
record or a Route 53 health check when you created the service.
A string map that contains the following information:
The attributes that are associated with the instance.
For each attribute, the applicable value.
Supported attribute keys include the following:
For an alias record that routes traffic to an Elastic Load Balancing load balancer, the DNS name that's associated with the load balancer.
The Amazon EC2 instance ID for the instance. When the AWS_EC2_INSTANCE_ID
attribute is specified, then the AWS_INSTANCE_IPV4
attribute contains the primary private IPv4 address.
If the service configuration includes HealthCheckCustomConfig
, you can optionally use AWS_INIT_HEALTH_STATUS
to specify the initial status of the custom health check, HEALTHY
or UNHEALTHY
. If you don't specify a value for AWS_INIT_HEALTH_STATUS
, the initial status is HEALTHY
.
For a CNAME
record, the domain name that Route 53 returns in response to DNS queries (for example, example.com
).
For an A
record, the IPv4 address that Route 53 returns in response to DNS queries (for example, 192.0.2.44
).
For an AAAA
record, the IPv6 address that Route 53 returns in response to DNS queries (for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345
).
For an SRV
record, the value that Route 53 returns for the port. In addition, if the service includes HealthCheckConfig
, the port on the endpoint that Route 53 sends requests to.
A string map that contains the following information for the service that you specify in ServiceId
:
The attributes that apply to the records that are defined in the service.
For each attribute, the applicable value.
Do not include sensitive information in the attributes if the namespace is discoverable by public DNS queries.
Supported attribute keys include the following:
If you want Cloud Map to create an Amazon Route 53 alias record that routes traffic to an Elastic Load Balancing load balancer, specify the DNS name that's associated with the load balancer. For information about how to get the DNS name, see \"DNSName\" in the topic AliasTarget in the Route 53 API Reference.
Note the following:
The configuration for the service that's specified by ServiceId
must include settings for an A
record, an AAAA
record, or both.
In the service that's specified by ServiceId
, the value of RoutingPolicy
must be WEIGHTED
.
If the service that's specified by ServiceId
includes HealthCheckConfig
settings, Cloud Map will create the Route 53 health check, but it doesn't associate the health check with the alias record.
Auto naming currently doesn't support creating alias records that route traffic to Amazon Web Services resources other than Elastic Load Balancing load balancers.
If you specify a value for AWS_ALIAS_DNS_NAME
, don't specify values for any of the AWS_INSTANCE
attributes.
HTTP namespaces only. The Amazon EC2 instance ID for the instance. If the AWS_EC2_INSTANCE_ID
attribute is specified, then the only other attribute that can be specified is AWS_INIT_HEALTH_STATUS
. When the AWS_EC2_INSTANCE_ID
attribute is specified, then the AWS_INSTANCE_IPV4
attribute will be filled out with the primary private IPv4 address.
If the service configuration includes HealthCheckCustomConfig
, you can optionally use AWS_INIT_HEALTH_STATUS
to specify the initial status of the custom health check, HEALTHY
or UNHEALTHY
. If you don't specify a value for AWS_INIT_HEALTH_STATUS
, the initial status is HEALTHY
.
If the service configuration includes a CNAME
record, the domain name that you want Route 53 to return in response to DNS queries (for example, example.com
).
This value is required if the service specified by ServiceId
includes settings for an CNAME
record.
If the service configuration includes an A
record, the IPv4 address that you want Route 53 to return in response to DNS queries (for example, 192.0.2.44
).
This value is required if the service specified by ServiceId
includes settings for an A
record. If the service includes settings for an SRV
record, you must specify a value for AWS_INSTANCE_IPV4
, AWS_INSTANCE_IPV6
, or both.
If the service configuration includes an AAAA
record, the IPv6 address that you want Route 53 to return in response to DNS queries (for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345
).
This value is required if the service specified by ServiceId
includes settings for an AAAA
record. If the service includes settings for an SRV
record, you must specify a value for AWS_INSTANCE_IPV4
, AWS_INSTANCE_IPV6
, or both.
If the service includes an SRV
record, the value that you want Route 53 to return for the port.
If the service includes HealthCheckConfig
, the port on the endpoint that you want Route 53 to send requests to.
This value is required if you specified settings for an SRV
record or a Route 53 health check when you created the service.
You can add up to 30 custom attributes. For each key-value pair, the maximum length of the attribute name is 255 characters, and the maximum length of the attribute value is 1,024 characters. The total size of all provided attributes (sum of all keys and values) must not exceed 5,000 characters.
A string map that contains the following information for the service that you specify in ServiceId
:
The attributes that apply to the records that are defined in the service.
For each attribute, the applicable value.
Do not include sensitive information in the attributes if the namespace is discoverable by public DNS queries.
Supported attribute keys include the following:
If you want Cloud Map to create an Amazon Route 53 alias record that routes traffic to an Elastic Load Balancing load balancer, specify the DNS name that's associated with the load balancer. For information about how to get the DNS name, see \"DNSName\" in the topic AliasTarget in the Route 53 API Reference.
Note the following:
The configuration for the service that's specified by ServiceId
must include settings for an A
record, an AAAA
record, or both.
In the service that's specified by ServiceId
, the value of RoutingPolicy
must be WEIGHTED
.
If the service that's specified by ServiceId
includes HealthCheckConfig
settings, Cloud Map will create the Route 53 health check, but it doesn't associate the health check with the alias record.
Cloud Map currently doesn't support creating alias records that route traffic to Amazon Web Services resources other than Elastic Load Balancing load balancers.
If you specify a value for AWS_ALIAS_DNS_NAME
, don't specify values for any of the AWS_INSTANCE
attributes.
HTTP namespaces only. The Amazon EC2 instance ID for the instance. If the AWS_EC2_INSTANCE_ID
attribute is specified, then the only other attribute that can be specified is AWS_INIT_HEALTH_STATUS
. When the AWS_EC2_INSTANCE_ID
attribute is specified, then the AWS_INSTANCE_IPV4
attribute will be filled out with the primary private IPv4 address.
If the service configuration includes HealthCheckCustomConfig
, you can optionally use AWS_INIT_HEALTH_STATUS
to specify the initial status of the custom health check, HEALTHY
or UNHEALTHY
. If you don't specify a value for AWS_INIT_HEALTH_STATUS
, the initial status is HEALTHY
.
If the service configuration includes a CNAME
record, the domain name that you want Route 53 to return in response to DNS queries (for example, example.com
).
This value is required if the service specified by ServiceId
includes settings for an CNAME
record.
If the service configuration includes an A
record, the IPv4 address that you want Route 53 to return in response to DNS queries (for example, 192.0.2.44
).
This value is required if the service specified by ServiceId
includes settings for an A
record. If the service includes settings for an SRV
record, you must specify a value for AWS_INSTANCE_IPV4
, AWS_INSTANCE_IPV6
, or both.
If the service configuration includes an AAAA
record, the IPv6 address that you want Route 53 to return in response to DNS queries (for example, 2001:0db8:85a3:0000:0000:abcd:0001:2345
).
This value is required if the service specified by ServiceId
includes settings for an AAAA
record. If the service includes settings for an SRV
record, you must specify a value for AWS_INSTANCE_IPV4
, AWS_INSTANCE_IPV6
, or both.
If the service includes an SRV
record, the value that you want Route 53 to return for the port.
If the service includes HealthCheckConfig
, the port on the endpoint that you want Route 53 to send requests to.
This value is required if you specified settings for an SRV
record or a Route 53 health check when you created the service.
You can add up to 30 custom attributes. For each key-value pair, the maximum length of the attribute name is 255 characters, and the maximum length of the attribute value is 1,024 characters. The total size of all provided attributes (sum of all keys and values) must not exceed 5,000 characters.
The HttpName
name of the namespace. It's found in the HttpProperties
member of the Properties
member of the namespace.
The HttpName
name of the namespace. It's found in the HttpProperties
member of the Properties
member of the namespace.
The name of an HTTP namespace.
", "Namespace$Name": "The name of the namespace, such as example.com
.
The name of the namespace. When you create a namespace, Cloud Map automatically creates a Route 53 hosted zone that has the same name as the namespace.
" @@ -858,6 +870,13 @@ "HealthCheckConfig$ResourcePath": "The path that you want Route 53 to request when performing health checks. The path can be any value that your endpoint returns an HTTP status code of a 2xx or 3xx format for when the endpoint is healthy. An example file is /docs/route53-health-check.html
. Route 53 automatically adds the DNS name for the service. If you don't specify a value for ResourcePath
, the default value is /
.
If you specify TCP
for Type
, you must not specify a value for ResourcePath
.
The increasing revision associated to the response Instances list. If a new instance is registered or deregistered, the InstancesRevision
updates. The health status updates don't update InstancesRevision
.
The increasing revision associated to the response Instances list. If a new instance is registered or deregistered, the InstancesRevision
updates. The health status updates don't update InstancesRevision
.
The name that you want to assign to the service.
Do not include sensitive information in the name if the namespace is discoverable by public DNS queries.
If you want Cloud Map to create an SRV
record when you register an instance and you're using a system that requires a specific SRV
format, such as HAProxy, specify the following for Name
:
Start the name with an underscore (_), such as _exampleservice
.
End the name with ._protocol, such as ._tcp
.
When you register an instance, Cloud Map creates an SRV
record and assigns a name to the record by concatenating the service name and the namespace name (for example,
_exampleservice._tcp.example.com
).
For services that are accessible by DNS queries, you can't create multiple services with names that differ only by case (such as EXAMPLE and example). Otherwise, these services have the same DNS name and can't be distinguished. However, if you use a namespace that's only accessible by API calls, then you can create services that with names that differ only by case.
The name of the service that you specified when you registered the instance.
", + "DiscoverInstancesRevisionRequest$ServiceName": "The name of the service that you specified when you registered the instance.
", "HttpInstanceSummary$ServiceName": "The name of the service that you specified when you registered the instance.
", "Service$Name": "The name of the service.
", "ServiceSummary$Name": "The name of the service.
" diff --git a/models/apis/servicediscovery/2017-03-14/endpoint-rule-set-1.json b/models/apis/servicediscovery/2017-03-14/endpoint-rule-set-1.json index f26f043db06..637ad5695ec 100644 --- a/models/apis/servicediscovery/2017-03-14/endpoint-rule-set-1.json +++ b/models/apis/servicediscovery/2017-03-14/endpoint-rule-set-1.json @@ -58,52 +58,56 @@ "type": "error" }, { - "conditions": [], - "type": "tree", - "rules": [ + "conditions": [ { - "conditions": [ + "fn": "booleanEquals", + "argv": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" + "ref": "UseDualStack" }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + true + ] } - ] + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, { - "conditions": [], + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], "type": "tree", "rules": [ { "conditions": [ { - "fn": "isSet", + "fn": "aws.partition", "argv": [ { "ref": "Region" } - ] + ], + "assign": "PartitionResult" } ], "type": "tree", @@ -111,13 +115,22 @@ { "conditions": [ { - "fn": "aws.partition", + "fn": "booleanEquals", "argv": [ { - "ref": "Region" - } - ], - "assign": "PartitionResult" + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], "type": "tree", @@ -127,92 +140,135 @@ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } ] }, { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseDualStack" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } ] } ], "type": "tree", "rules": [ { - "conditions": [ + "conditions": [], + "endpoint": { + "url": "https://servicediscovery-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, { - "fn": "booleanEquals", + "fn": "getAttr", "argv": [ - true, { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://servicediscovery-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } + "ref": "PartitionResult" + }, + "supportsFIPS" ] } ] - }, + } + ], + "type": "tree", + "rules": [ { "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" + "endpoint": { + "url": "https://servicediscovery-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ { "conditions": [ { "fn": "booleanEquals", "argv": [ + true, { - "ref": "UseFIPS" - }, - true + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } ] } ], @@ -221,130 +277,113 @@ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] } ] } ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://servicediscovery-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] + "endpoint": { + "url": "https://servicediscovery.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, + "aws-cn", { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] } ] } ], - "type": "tree", - "rules": [ + "endpoint": { + "url": "https://servicediscovery.{Region}.amazonaws.com.cn", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ { - "conditions": [], - "type": "tree", - "rules": [ + "fn": "stringEquals", + "argv": [ + "aws-us-gov", { - "conditions": [], - "endpoint": { - "url": "https://servicediscovery.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] } ] } - ] + ], + "endpoint": { + "url": "https://servicediscovery.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "type": "tree", - "rules": [ { "conditions": [], "endpoint": { - "url": "https://servicediscovery.{Region}.{PartitionResult#dnsSuffix}", + "url": "https://servicediscovery.{Region}.{PartitionResult#dualStackDnsSuffix}", "properties": {}, "headers": {} }, "type": "endpoint" } ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://servicediscovery.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ] } \ No newline at end of file diff --git a/models/apis/servicediscovery/2017-03-14/endpoint-tests-1.json b/models/apis/servicediscovery/2017-03-14/endpoint-tests-1.json index ae95943c83d..957d84922d7 100644 --- a/models/apis/servicediscovery/2017-03-14/endpoint-tests-1.json +++ b/models/apis/servicediscovery/2017-03-14/endpoint-tests-1.json @@ -8,9 +8,9 @@ } }, "params": { + "Region": "af-south-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "af-south-1" + "UseDualStack": false } }, { @@ -21,9 +21,9 @@ } }, "params": { + "Region": "ap-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-east-1" + "UseDualStack": false } }, { @@ -34,9 +34,9 @@ } }, "params": { + "Region": "ap-northeast-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-1" + "UseDualStack": false } }, { @@ -47,9 +47,9 @@ } }, "params": { + "Region": "ap-northeast-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-2" + "UseDualStack": false } }, { @@ -60,9 +60,9 @@ } }, "params": { + "Region": "ap-south-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-south-1" + "UseDualStack": false } }, { @@ -73,9 +73,9 @@ } }, "params": { + "Region": "ap-southeast-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-1" + "UseDualStack": false } }, { @@ -86,9 +86,9 @@ } }, "params": { + "Region": "ap-southeast-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-2" + "UseDualStack": false } }, { @@ -99,9 +99,9 @@ } }, "params": { + "Region": "ca-central-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "ca-central-1" + "UseDualStack": false } }, { @@ -112,9 +112,9 @@ } }, "params": { + "Region": "ca-central-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "ca-central-1" + "UseDualStack": false } }, { @@ -125,9 +125,9 @@ } }, "params": { + "Region": "eu-central-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-central-1" + "UseDualStack": false } }, { @@ -138,9 +138,9 @@ } }, "params": { + "Region": "eu-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-north-1" + "UseDualStack": false } }, { @@ -151,9 +151,9 @@ } }, "params": { + "Region": "eu-south-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-south-1" + "UseDualStack": false } }, { @@ -164,9 +164,9 @@ } }, "params": { + "Region": "eu-west-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-1" + "UseDualStack": false } }, { @@ -177,9 +177,9 @@ } }, "params": { + "Region": "eu-west-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-2" + "UseDualStack": false } }, { @@ -190,9 +190,9 @@ } }, "params": { + "Region": "eu-west-3", "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-3" + "UseDualStack": false } }, { @@ -203,9 +203,9 @@ } }, "params": { + "Region": "me-south-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "me-south-1" + "UseDualStack": false } }, { @@ -216,9 +216,9 @@ } }, "params": { + "Region": "sa-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "sa-east-1" + "UseDualStack": false } }, { @@ -229,9 +229,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -242,9 +242,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "UseDualStack": false } }, { @@ -255,9 +255,9 @@ } }, "params": { + "Region": "us-east-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-2" + "UseDualStack": false } }, { @@ -268,9 +268,9 @@ } }, "params": { + "Region": "us-east-2", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-2" + "UseDualStack": false } }, { @@ -281,9 +281,9 @@ } }, "params": { + "Region": "us-west-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-west-1" + "UseDualStack": false } }, { @@ -294,9 +294,9 @@ } }, "params": { + "Region": "us-west-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-west-1" + "UseDualStack": false } }, { @@ -307,9 +307,9 @@ } }, "params": { + "Region": "us-west-2", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -320,9 +320,9 @@ } }, "params": { + "Region": "us-west-2", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-west-2" + "UseDualStack": false } }, { @@ -333,22 +333,22 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://servicediscovery.us-east-1.api.aws" + "url": "https://servicediscovery.us-east-1.amazonaws.com" } }, "params": { + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "UseDualStack": true } }, { @@ -359,9 +359,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { @@ -372,9 +372,9 @@ } }, "params": { + "Region": "cn-northwest-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-northwest-1" + "UseDualStack": false } }, { @@ -385,9 +385,9 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -398,22 +398,22 @@ } }, "params": { + "Region": "cn-north-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-north-1" + "UseDualStack": false } }, { "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://servicediscovery.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://servicediscovery.cn-north-1.amazonaws.com.cn" } }, "params": { + "Region": "cn-north-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-north-1" + "UseDualStack": true } }, { @@ -424,9 +424,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -437,9 +437,9 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-east-1" + "UseDualStack": false } }, { @@ -450,9 +450,9 @@ } }, "params": { + "Region": "us-gov-west-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { @@ -463,9 +463,9 @@ } }, "params": { + "Region": "us-gov-west-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-west-1" + "UseDualStack": false } }, { @@ -476,22 +476,22 @@ } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://servicediscovery.us-gov-east-1.api.aws" + "url": "https://servicediscovery.us-gov-east-1.amazonaws.com" } }, "params": { + "Region": "us-gov-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-east-1" + "UseDualStack": true } }, { @@ -500,9 +500,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -513,9 +513,9 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -524,9 +524,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-iso-east-1" + "UseDualStack": true } }, { @@ -537,9 +537,9 @@ } }, "params": { + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" + "UseDualStack": false } }, { @@ -548,9 +548,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -561,9 +561,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": true, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -572,9 +572,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": true, - "Region": "us-isob-east-1" + "UseDualStack": true } }, { @@ -585,9 +585,9 @@ } }, "params": { + "Region": "us-isob-east-1", "UseFIPS": false, - "UseDualStack": false, - "Region": "us-isob-east-1" + "UseDualStack": false } }, { @@ -598,9 +598,9 @@ } }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -623,9 +623,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": true, "UseDualStack": false, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -635,9 +635,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": true, - "Region": "us-east-1", "Endpoint": "https://example.com" } }, diff --git a/models/apis/sso-oidc/2019-06-10/endpoint-rule-set-1.json b/models/apis/sso-oidc/2019-06-10/endpoint-rule-set-1.json new file mode 100644 index 00000000000..5e4f62c9a9f --- /dev/null +++ b/models/apis/sso-oidc/2019-06-10/endpoint-rule-set-1.json @@ -0,0 +1,339 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "String" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "Boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "Boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "String" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://oidc-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ + "aws-us-gov", + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + } + ] + } + ], + "endpoint": { + "url": "https://oidc.{Region}.amazonaws.com", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://oidc-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ] + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "type": "tree", + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://oidc.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ] + }, + { + "conditions": [], + "endpoint": { + "url": "https://oidc.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ] + } + ] + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ] +} \ No newline at end of file diff --git a/models/apis/sso-oidc/2019-06-10/endpoint-tests-1.json b/models/apis/sso-oidc/2019-06-10/endpoint-tests-1.json new file mode 100644 index 00000000000..638ae14a528 --- /dev/null +++ b/models/apis/sso-oidc/2019-06-10/endpoint-tests-1.json @@ -0,0 +1,561 @@ +{ + "testCases": [ + { + "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.ap-east-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.ap-northeast-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.ap-northeast-2.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-northeast-3 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.ap-northeast-3.amazonaws.com" + } + }, + "params": { + "Region": "ap-northeast-3", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.ap-south-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.ap-southeast-1.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.ap-southeast-2.amazonaws.com" + } + }, + "params": { + "Region": "ap-southeast-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.ca-central-1.amazonaws.com" + } + }, + "params": { + "Region": "ca-central-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.eu-central-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-central-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.eu-north-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.eu-south-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.eu-west-1.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.eu-west-2.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.eu-west-3.amazonaws.com" + } + }, + "params": { + "Region": "eu-west-3", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region me-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.me-south-1.amazonaws.com" + } + }, + "params": { + "Region": "me-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.sa-east-1.amazonaws.com" + } + }, + "params": { + "Region": "sa-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.us-east-2.amazonaws.com" + } + }, + "params": { + "Region": "us-east-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.us-west-2.amazonaws.com" + } + }, + "params": { + "Region": "us-west-2", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://oidc-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://oidc.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://oidc-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://oidc.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.us-gov-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://oidc-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://oidc.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "error": "FIPS and DualStack are enabled, but this partition does not support one or both" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "error": "DualStack is enabled but this partition does not support DualStack" + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://oidc.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 3361daa110f..5249f64a2ba 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -12182,13 +12182,16 @@ "ap-northeast-2" : { }, "ap-northeast-3" : { }, "ap-south-1" : { }, + "ap-south-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ap-southeast-3" : { }, "ca-central-1" : { }, "eu-central-1" : { }, + "eu-central-2" : { }, "eu-north-1" : { }, "eu-south-1" : { }, + "eu-south-2" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "eu-west-3" : { }, @@ -19559,6 +19562,12 @@ "cn-northwest-1" : { } } }, + "sso" : { + "endpoints" : { + "cn-north-1" : { }, + "cn-northwest-1" : { } + } + }, "states" : { "endpoints" : { "cn-north-1" : { }, diff --git a/service/appconfig/api.go b/service/appconfig/api.go index beecc95ad98..6221ea43db1 100644 --- a/service/appconfig/api.go +++ b/service/appconfig/api.go @@ -75,6 +75,23 @@ func (c *AppConfig) CreateApplicationRequest(input *CreateApplicationInput) (req // The input fails to satisfy the constraints specified by an Amazon Web Services // service. // +// - ServiceQuotaExceededException +// The number of one more AppConfig resources exceeds the maximum allowed. Verify +// that your environment doesn't exceed the following service quotas: +// +// Applications: 100 max +// +// Deployment strategies: 20 max +// +// Configuration profiles: 100 max per application +// +// Environments: 20 max per application +// +// To resolve this issue, you can delete one or more resources and try again. +// Or, you can request a quota increase. For more information about quotas and +// to request an increase, see Service quotas for AppConfig (https://docs.aws.amazon.com/general/latest/gr/appconfig.html#limits_appconfig) +// in the Amazon Web Services General Reference. +// // - InternalServerException // There was an internal failure in the AppConfig service. // @@ -196,6 +213,23 @@ func (c *AppConfig) CreateConfigurationProfileRequest(input *CreateConfiguration // - InternalServerException // There was an internal failure in the AppConfig service. // +// - ServiceQuotaExceededException +// The number of one more AppConfig resources exceeds the maximum allowed. Verify +// that your environment doesn't exceed the following service quotas: +// +// Applications: 100 max +// +// Deployment strategies: 20 max +// +// Configuration profiles: 100 max per application +// +// Environments: 20 max per application +// +// To resolve this issue, you can delete one or more resources and try again. +// Or, you can request a quota increase. For more information about quotas and +// to request an increase, see Service quotas for AppConfig (https://docs.aws.amazon.com/general/latest/gr/appconfig.html#limits_appconfig) +// in the Amazon Web Services General Reference. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/appconfig-2019-10-09/CreateConfigurationProfile func (c *AppConfig) CreateConfigurationProfile(input *CreateConfigurationProfileInput) (*CreateConfigurationProfileOutput, error) { req, out := c.CreateConfigurationProfileRequest(input) @@ -279,6 +313,23 @@ func (c *AppConfig) CreateDeploymentStrategyRequest(input *CreateDeploymentStrat // - InternalServerException // There was an internal failure in the AppConfig service. // +// - ServiceQuotaExceededException +// The number of one more AppConfig resources exceeds the maximum allowed. Verify +// that your environment doesn't exceed the following service quotas: +// +// Applications: 100 max +// +// Deployment strategies: 20 max +// +// Configuration profiles: 100 max per application +// +// Environments: 20 max per application +// +// To resolve this issue, you can delete one or more resources and try again. +// Or, you can request a quota increase. For more information about quotas and +// to request an increase, see Service quotas for AppConfig (https://docs.aws.amazon.com/general/latest/gr/appconfig.html#limits_appconfig) +// in the Amazon Web Services General Reference. +// // - BadRequestException // The input fails to satisfy the constraints specified by an Amazon Web Services // service. @@ -375,6 +426,23 @@ func (c *AppConfig) CreateEnvironmentRequest(input *CreateEnvironmentInput) (req // The input fails to satisfy the constraints specified by an Amazon Web Services // service. // +// - ServiceQuotaExceededException +// The number of one more AppConfig resources exceeds the maximum allowed. Verify +// that your environment doesn't exceed the following service quotas: +// +// Applications: 100 max +// +// Deployment strategies: 20 max +// +// Configuration profiles: 100 max per application +// +// Environments: 20 max per application +// +// To resolve this issue, you can delete one or more resources and try again. +// Or, you can request a quota increase. For more information about quotas and +// to request an increase, see Service quotas for AppConfig (https://docs.aws.amazon.com/general/latest/gr/appconfig.html#limits_appconfig) +// in the Amazon Web Services General Reference. +// // See also, https://docs.aws.amazon.com/goto/WebAPI/appconfig-2019-10-09/CreateEnvironment func (c *AppConfig) CreateEnvironment(input *CreateEnvironmentInput) (*CreateEnvironmentOutput, error) { req, out := c.CreateEnvironmentRequest(input) @@ -445,10 +513,23 @@ func (c *AppConfig) CreateExtensionRequest(input *CreateExtensionInput) (req *re // or deploying a configuration. // // You can create your own extensions or use the Amazon Web Services authored -// extensions provided by AppConfig. For most use cases, to create your own -// extension, you must create an Lambda function to perform any computation -// and processing defined in the extension. For more information about extensions, -// see Working with AppConfig extensions (https://docs.aws.amazon.com/appconfig/latest/userguide/working-with-appconfig-extensions.html) +// extensions provided by AppConfig. For an AppConfig extension that uses Lambda, +// you must create a Lambda function to perform any computation and processing +// defined in the extension. If you plan to create custom versions of the Amazon +// Web Services authored notification extensions, you only need to specify an +// Amazon Resource Name (ARN) in the Uri field for the new extension version. +// +// - For a custom EventBridge notification extension, enter the ARN of the +// EventBridge default events in the Uri field. +// +// - For a custom Amazon SNS notification extension, enter the ARN of an +// Amazon SNS topic in the Uri field. +// +// - For a custom Amazon SQS notification extension, enter the ARN of an +// Amazon SQS message queue in the Uri field. +// +// For more information about extensions, see Working with AppConfig extensions +// (https://docs.aws.amazon.com/appconfig/latest/userguide/working-with-appconfig-extensions.html) // in the AppConfig User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -469,8 +550,21 @@ func (c *AppConfig) CreateExtensionRequest(input *CreateExtensionInput) (req *re // of the resource. // // - ServiceQuotaExceededException -// The number of hosted configuration versions exceeds the limit for the AppConfig -// hosted configuration store. Delete one or more versions and try again. +// The number of one more AppConfig resources exceeds the maximum allowed. Verify +// that your environment doesn't exceed the following service quotas: +// +// Applications: 100 max +// +// Deployment strategies: 20 max +// +// Configuration profiles: 100 max per application +// +// Environments: 20 max per application +// +// To resolve this issue, you can delete one or more resources and try again. +// Or, you can request a quota increase. For more information about quotas and +// to request an increase, see Service quotas for AppConfig (https://docs.aws.amazon.com/general/latest/gr/appconfig.html#limits_appconfig) +// in the Amazon Web Services General Reference. // // - InternalServerException // There was an internal failure in the AppConfig service. @@ -573,8 +667,21 @@ func (c *AppConfig) CreateExtensionAssociationRequest(input *CreateExtensionAsso // There was an internal failure in the AppConfig service. // // - ServiceQuotaExceededException -// The number of hosted configuration versions exceeds the limit for the AppConfig -// hosted configuration store. Delete one or more versions and try again. +// The number of one more AppConfig resources exceeds the maximum allowed. Verify +// that your environment doesn't exceed the following service quotas: +// +// Applications: 100 max +// +// Deployment strategies: 20 max +// +// Configuration profiles: 100 max per application +// +// Environments: 20 max per application +// +// To resolve this issue, you can delete one or more resources and try again. +// Or, you can request a quota increase. For more information about quotas and +// to request an increase, see Service quotas for AppConfig (https://docs.aws.amazon.com/general/latest/gr/appconfig.html#limits_appconfig) +// in the Amazon Web Services General Reference. // // See also, https://docs.aws.amazon.com/goto/WebAPI/appconfig-2019-10-09/CreateExtensionAssociation func (c *AppConfig) CreateExtensionAssociation(input *CreateExtensionAssociationInput) (*CreateExtensionAssociationOutput, error) { @@ -657,8 +764,21 @@ func (c *AppConfig) CreateHostedConfigurationVersionRequest(input *CreateHostedC // service. // // - ServiceQuotaExceededException -// The number of hosted configuration versions exceeds the limit for the AppConfig -// hosted configuration store. Delete one or more versions and try again. +// The number of one more AppConfig resources exceeds the maximum allowed. Verify +// that your environment doesn't exceed the following service quotas: +// +// Applications: 100 max +// +// Deployment strategies: 20 max +// +// Configuration profiles: 100 max per application +// +// Environments: 20 max per application +// +// To resolve this issue, you can delete one or more resources and try again. +// Or, you can request a quota increase. For more information about quotas and +// to request an increase, see Service quotas for AppConfig (https://docs.aws.amazon.com/general/latest/gr/appconfig.html#limits_appconfig) +// in the Amazon Web Services General Reference. // // - ResourceNotFoundException // The requested resource could not be found. @@ -4987,8 +5107,11 @@ type CreateConfigurationProfileInput struct { // specify either the parameter name in the format ssm-parameter://