From c3e4bf62c35fa39db711d39af441577e5d7735ec Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Wed, 28 Feb 2024 19:04:24 +0000 Subject: [PATCH 1/2] Update to latest models --- .../next-release/api-change-batch-53438.json | 5 + .../api-change-bedrockagentruntime-83702.json | 5 + .../next-release/api-change-ce-75342.json | 5 + .../next-release/api-change-ec2-21286.json | 5 + .../next-release/api-change-iot-4567.json | 5 + .../next-release/api-change-wafv2-88300.json | 5 + botocore/data/batch/2016-08-10/service-2.json | 519 ++++++++++++++++-- .../2023-07-26/service-2.json | 30 +- botocore/data/ce/2017-10-25/service-2.json | 72 +++ botocore/data/ec2/2016-11-15/service-2.json | 7 +- botocore/data/iot/2015-05-28/service-2.json | 24 +- botocore/data/wafv2/2019-07-29/service-2.json | 5 + 12 files changed, 621 insertions(+), 66 deletions(-) create mode 100644 .changes/next-release/api-change-batch-53438.json create mode 100644 .changes/next-release/api-change-bedrockagentruntime-83702.json create mode 100644 .changes/next-release/api-change-ce-75342.json create mode 100644 .changes/next-release/api-change-ec2-21286.json create mode 100644 .changes/next-release/api-change-iot-4567.json create mode 100644 .changes/next-release/api-change-wafv2-88300.json diff --git a/.changes/next-release/api-change-batch-53438.json b/.changes/next-release/api-change-batch-53438.json new file mode 100644 index 0000000000..a2b3f3a7fe --- /dev/null +++ b/.changes/next-release/api-change-batch-53438.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``batch``", + "description": "This release adds Batch support for configuration of multicontainer jobs in ECS, Fargate, and EKS. This support is available for all types of jobs, including both array jobs and multi-node parallel jobs." +} diff --git a/.changes/next-release/api-change-bedrockagentruntime-83702.json b/.changes/next-release/api-change-bedrockagentruntime-83702.json new file mode 100644 index 0000000000..1f9873ad58 --- /dev/null +++ b/.changes/next-release/api-change-bedrockagentruntime-83702.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``bedrock-agent-runtime``", + "description": "This release adds support to override search strategy performed by the Retrieve and RetrieveAndGenerate APIs for Amazon Bedrock Agents" +} diff --git a/.changes/next-release/api-change-ce-75342.json b/.changes/next-release/api-change-ce-75342.json new file mode 100644 index 0000000000..03dd5495e3 --- /dev/null +++ b/.changes/next-release/api-change-ce-75342.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``ce``", + "description": "This release introduces the new API 'GetApproximateUsageRecords', which retrieves estimated usage records for hourly granularity or resource-level data at daily granularity." +} diff --git a/.changes/next-release/api-change-ec2-21286.json b/.changes/next-release/api-change-ec2-21286.json new file mode 100644 index 0000000000..5043a8d47e --- /dev/null +++ b/.changes/next-release/api-change-ec2-21286.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``ec2``", + "description": "This release increases the range of MaxResults for GetNetworkInsightsAccessScopeAnalysisFindings to 1,000." +} diff --git a/.changes/next-release/api-change-iot-4567.json b/.changes/next-release/api-change-iot-4567.json new file mode 100644 index 0000000000..21acca1964 --- /dev/null +++ b/.changes/next-release/api-change-iot-4567.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``iot``", + "description": "This release reduces the maximum results returned per query invocation from 500 to 100 for the SearchIndex API. This change has no implications as long as the API is invoked until the nextToken is NULL." +} diff --git a/.changes/next-release/api-change-wafv2-88300.json b/.changes/next-release/api-change-wafv2-88300.json new file mode 100644 index 0000000000..acf37e06c0 --- /dev/null +++ b/.changes/next-release/api-change-wafv2-88300.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``wafv2``", + "description": "AWS WAF now supports configurable time windows for request aggregation with rate-based rules. Customers can now select time windows of 1 minute, 2 minutes or 10 minutes, in addition to the previously supported 5 minutes." +} diff --git a/botocore/data/batch/2016-08-10/service-2.json b/botocore/data/batch/2016-08-10/service-2.json index 37005ba16b..62331f378f 100644 --- a/botocore/data/batch/2016-08-10/service-2.json +++ b/botocore/data/batch/2016-08-10/service-2.json @@ -641,7 +641,7 @@ "documentation":"

The Amazon Resource Name (ARN) of the compute environment.

" } }, - "documentation":"

The order that compute environments are tried in for job placement within a queue. Compute environments are tried in ascending order. For example, if two compute environments are associated with a job queue, the compute environment with a lower order integer value is tried for job placement first. Compute environments must be in the VALID state before you can associate them with a job queue. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments can't be mixed.

All compute environments that are associated with a job queue must share the same architecture. Batch doesn't support mixing compute environment architecture types in a single job queue.

" + "documentation":"

The order that compute environments are tried in for job placement within a queue. Compute environments are tried in ascending order. For example, if two compute environments are associated with a job queue, the compute environment with a lower order integer value is tried for job placement first. Compute environments must be in the VALID state before you can associate them with a job queue. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); Amazon EC2 and Fargate compute environments can't be mixed.

All compute environments that are associated with a job queue must share the same architecture. Batch doesn't support mixing compute environment architecture types in a single job queue.

" }, "ComputeEnvironmentOrders":{ "type":"list", @@ -703,7 +703,7 @@ }, "tags":{ "shape":"TagsMap", - "documentation":"

Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For Batch, these take the form of \"String1\": \"String2\", where String1 is the tag key and String2 is the tag value-for example, { \"Name\": \"Batch Instance - C4OnDemand\" }. This is helpful for recognizing your Batch instances in the Amazon EC2 console. Updating these tags requires an infrastructure update to the compute environment. For more information, see Updating compute environments in the Batch User Guide. These tags aren't seen when using the Batch ListTagsForResource API operation.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

" + "documentation":"

Key-value pair tags to be applied to Amazon EC2 resources that are launched in the compute environment. For Batch, these take the form of \"String1\": \"String2\", where String1 is the tag key and String2 is the tag value-for example, { \"Name\": \"Batch Instance - C4OnDemand\" }. This is helpful for recognizing your Batch instances in the Amazon EC2 console. Updating these tags requires an infrastructure update to the compute environment. For more information, see Updating compute environments in the Batch User Guide. These tags aren't seen when using the Batch ListTagsForResource API operation.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

" }, "placementGroup":{ "shape":"String", @@ -719,11 +719,11 @@ }, "launchTemplate":{ "shape":"LaunchTemplateSpecification", - "documentation":"

The launch template to use for your compute resources. Any other compute resource parameters that you specify in a CreateComputeEnvironment API operation override the same parameters in the launch template. You must specify either the launch template ID or launch template name in the request, but not both. For more information, see Launch template support in the Batch User Guide.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

" + "documentation":"

The launch template to use for your compute resources. Any other compute resource parameters that you specify in a CreateComputeEnvironment API operation override the same parameters in the launch template. You must specify either the launch template ID or launch template name in the request, but not both. For more information, see Launch template support in the Batch User Guide.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

" }, "ec2Configuration":{ "shape":"Ec2ConfigurationList", - "documentation":"

Provides information that's used to select Amazon Machine Images (AMIs) for EC2 instances in the compute environment. If Ec2Configuration isn't specified, the default is ECS_AL2.

One or two values can be provided.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

" + "documentation":"

Provides information that's used to select Amazon Machine Images (AMIs) for Amazon EC2 instances in the compute environment. If Ec2Configuration isn't specified, the default is ECS_AL2.

One or two values can be provided.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

" } }, "documentation":"

An object that represents an Batch compute resource. For more information, see Compute environments in the Batch User Guide.

" @@ -745,11 +745,11 @@ }, "subnets":{ "shape":"StringList", - "documentation":"

The VPC subnets where the compute resources are launched. Fargate compute resources can contain up to 16 subnets. For Fargate compute resources, providing an empty list will be handled as if this parameter wasn't specified and no change is made. For EC2 compute resources, providing an empty list removes the VPC subnets from the compute resource. For more information, see VPCs and subnets in the Amazon VPC User Guide.

When updating a compute environment, changing the VPC subnets requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide.

Batch on Amazon EC2 and Batch on Amazon EKS support Local Zones. For more information, see Local Zones in the Amazon EC2 User Guide for Linux Instances, Amazon EKS and Amazon Web Services Local Zones in the Amazon EKS User Guide and Amazon ECS clusters in Local Zones, Wavelength Zones, and Amazon Web Services Outposts in the Amazon ECS Developer Guide.

Batch on Fargate doesn't currently support Local Zones.

" + "documentation":"

The VPC subnets where the compute resources are launched. Fargate compute resources can contain up to 16 subnets. For Fargate compute resources, providing an empty list will be handled as if this parameter wasn't specified and no change is made. For Amazon EC2 compute resources, providing an empty list removes the VPC subnets from the compute resource. For more information, see VPCs and subnets in the Amazon VPC User Guide.

When updating a compute environment, changing the VPC subnets requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide.

Batch on Amazon EC2 and Batch on Amazon EKS support Local Zones. For more information, see Local Zones in the Amazon EC2 User Guide for Linux Instances, Amazon EKS and Amazon Web Services Local Zones in the Amazon EKS User Guide and Amazon ECS clusters in Local Zones, Wavelength Zones, and Amazon Web Services Outposts in the Amazon ECS Developer Guide.

Batch on Fargate doesn't currently support Local Zones.

" }, "securityGroupIds":{ "shape":"StringList", - "documentation":"

The Amazon EC2 security groups that are associated with instances launched in the compute environment. This parameter is required for Fargate compute resources, where it can contain up to 5 security groups. For Fargate compute resources, providing an empty list is handled as if this parameter wasn't specified and no change is made. For EC2 compute resources, providing an empty list removes the security groups from the compute resource.

When updating a compute environment, changing the EC2 security groups requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide.

" + "documentation":"

The Amazon EC2 security groups that are associated with instances launched in the compute environment. This parameter is required for Fargate compute resources, where it can contain up to 5 security groups. For Fargate compute resources, providing an empty list is handled as if this parameter wasn't specified and no change is made. For Amazon EC2 compute resources, providing an empty list removes the security groups from the compute resource.

When updating a compute environment, changing the Amazon EC2 security groups requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide.

" }, "allocationStrategy":{ "shape":"CRUpdateAllocationStrategy", @@ -761,7 +761,7 @@ }, "ec2KeyPair":{ "shape":"String", - "documentation":"

The Amazon EC2 key pair that's used for instances launched in the compute environment. You can use this key pair to log in to your instances with SSH. To remove the Amazon EC2 key pair, set this value to an empty string.

When updating a compute environment, changing the EC2 key pair requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

" + "documentation":"

The Amazon EC2 key pair that's used for instances launched in the compute environment. You can use this key pair to log in to your instances with SSH. To remove the Amazon EC2 key pair, set this value to an empty string.

When updating a compute environment, changing the Amazon EC2 key pair requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

" }, "instanceRole":{ "shape":"String", @@ -769,7 +769,7 @@ }, "tags":{ "shape":"TagsMap", - "documentation":"

Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For Batch, these take the form of \"String1\": \"String2\", where String1 is the tag key and String2 is the tag value-for example, { \"Name\": \"Batch Instance - C4OnDemand\" }. This is helpful for recognizing your Batch instances in the Amazon EC2 console. These tags aren't seen when using the Batch ListTagsForResource API operation.

When updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

" + "documentation":"

Key-value pair tags to be applied to Amazon EC2 resources that are launched in the compute environment. For Batch, these take the form of \"String1\": \"String2\", where String1 is the tag key and String2 is the tag value-for example, { \"Name\": \"Batch Instance - C4OnDemand\" }. This is helpful for recognizing your Batch instances in the Amazon EC2 console. These tags aren't seen when using the Batch ListTagsForResource API operation.

When updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

" }, "placementGroup":{ "shape":"String", @@ -785,7 +785,7 @@ }, "ec2Configuration":{ "shape":"Ec2ConfigurationList", - "documentation":"

Provides information used to select Amazon Machine Images (AMIs) for EC2 instances in the compute environment. If Ec2Configuration isn't specified, the default is ECS_AL2.

When updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide. To remove the EC2 configuration and any custom AMI ID specified in imageIdOverride, set this value to an empty string.

One or two values can be provided.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

" + "documentation":"

Provides information used to select Amazon Machine Images (AMIs) for Amazon EC2 instances in the compute environment. If Ec2Configuration isn't specified, the default is ECS_AL2.

When updating a compute environment, changing this setting requires an infrastructure update of the compute environment. For more information, see Updating compute environments in the Batch User Guide. To remove the Amazon EC2 configuration and any custom AMI ID specified in imageIdOverride, set this value to an empty string.

One or two values can be provided.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

" }, "updateToLatestImageVersion":{ "shape":"Boolean", @@ -811,11 +811,11 @@ }, "vcpus":{ "shape":"Integer", - "documentation":"

The number of vCPUs reserved for the container. For jobs that run on EC2 resources, you can specify the vCPU requirement for the job using resourceRequirements, but you can't specify the vCPU requirements in both the vcpus and resourceRequirements object. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU. This is required but can be specified in several places. It must be specified for each node at least once.

This parameter isn't applicable to jobs that run on Fargate resources. For jobs that run on Fargate resources, you must specify the vCPU requirement for the job using resourceRequirements.

" + "documentation":"

The number of vCPUs reserved for the container. For jobs that run on Amazon EC2 resources, you can specify the vCPU requirement for the job using resourceRequirements, but you can't specify the vCPU requirements in both the vcpus and resourceRequirements object. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU. This is required but can be specified in several places. It must be specified for each node at least once.

This parameter isn't applicable to jobs that run on Fargate resources. For jobs that run on Fargate resources, you must specify the vCPU requirement for the job using resourceRequirements.

" }, "memory":{ "shape":"Integer", - "documentation":"

For jobs running on EC2 resources that didn't specify memory requirements using resourceRequirements, the number of MiB of memory reserved for the job. For other jobs, including all run on Fargate resources, see resourceRequirements.

" + "documentation":"

For jobs running on Amazon EC2 resources that didn't specify memory requirements using resourceRequirements, the number of MiB of memory reserved for the job. For other jobs, including all run on Fargate resources, see resourceRequirements.

" }, "command":{ "shape":"StringList", @@ -859,7 +859,7 @@ }, "exitCode":{ "shape":"Integer", - "documentation":"

The exit code to return upon completion.

" + "documentation":"

The exit code returned upon completion.

" }, "reason":{ "shape":"String", @@ -895,7 +895,7 @@ }, "logConfiguration":{ "shape":"LogConfiguration", - "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance. Or, alternatively, it must be configured on a different log server for remote logging options. For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers might be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS container agent configuration in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However, the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance. Or, alternatively, it must be configured on a different log server for remote logging options. For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers might be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS container agent configuration in the Amazon Elastic Container Service Developer Guide.

" }, "secrets":{ "shape":"SecretList", @@ -903,11 +903,11 @@ }, "networkConfiguration":{ "shape":"NetworkConfiguration", - "documentation":"

The network configuration for jobs that are running on Fargate resources. Jobs that are running on EC2 resources must not specify this parameter.

" + "documentation":"

The network configuration for jobs that are running on Fargate resources. Jobs that are running on Amazon EC2 resources must not specify this parameter.

" }, "fargatePlatformConfiguration":{ "shape":"FargatePlatformConfiguration", - "documentation":"

The platform configuration for jobs that are running on Fargate resources. Jobs that are running on EC2 resources must not specify this parameter.

" + "documentation":"

The platform configuration for jobs that are running on Fargate resources. Jobs that are running on Amazon EC2 resources must not specify this parameter.

" }, "ephemeralStorage":{ "shape":"EphemeralStorage", @@ -929,13 +929,13 @@ "members":{ "vcpus":{ "shape":"Integer", - "documentation":"

This parameter is deprecated, use resourceRequirements to override the vcpus parameter that's set in the job definition. It's not supported for jobs running on Fargate resources. For jobs that run on EC2 resources, it overrides the vcpus parameter set in the job definition, but doesn't override any vCPU requirement specified in the resourceRequirements structure in the job definition. To override vCPU requirements that are specified in the resourceRequirements structure in the job definition, resourceRequirements must be specified in the SubmitJob request, with type set to VCPU and value set to the new value. For more information, see Can't override job definition resource requirements in the Batch User Guide.

", + "documentation":"

This parameter is deprecated, use resourceRequirements to override the vcpus parameter that's set in the job definition. It's not supported for jobs running on Fargate resources. For jobs that run on Amazon EC2 resources, it overrides the vcpus parameter set in the job definition, but doesn't override any vCPU requirement specified in the resourceRequirements structure in the job definition. To override vCPU requirements that are specified in the resourceRequirements structure in the job definition, resourceRequirements must be specified in the SubmitJob request, with type set to VCPU and value set to the new value. For more information, see Can't override job definition resource requirements in the Batch User Guide.

", "deprecated":true, "deprecatedMessage":"This field is deprecated, use resourceRequirements instead." }, "memory":{ "shape":"Integer", - "documentation":"

This parameter is deprecated, use resourceRequirements to override the memory requirements specified in the job definition. It's not supported for jobs running on Fargate resources. For jobs that run on EC2 resources, it overrides the memory parameter set in the job definition, but doesn't override any memory requirement that's specified in the resourceRequirements structure in the job definition. To override memory requirements that are specified in the resourceRequirements structure in the job definition, resourceRequirements must be specified in the SubmitJob request, with type set to MEMORY and value set to the new value. For more information, see Can't override job definition resource requirements in the Batch User Guide.

", + "documentation":"

This parameter is deprecated, use resourceRequirements to override the memory requirements specified in the job definition. It's not supported for jobs running on Fargate resources. For jobs that run on Amazon EC2 resources, it overrides the memory parameter set in the job definition, but doesn't override any memory requirement that's specified in the resourceRequirements structure in the job definition. To override memory requirements that are specified in the resourceRequirements structure in the job definition, resourceRequirements must be specified in the SubmitJob request, with type set to MEMORY and value set to the new value. For more information, see Can't override job definition resource requirements in the Batch User Guide.

", "deprecated":true, "deprecatedMessage":"This field is deprecated, use resourceRequirements instead." }, @@ -967,13 +967,13 @@ }, "vcpus":{ "shape":"Integer", - "documentation":"

This parameter is deprecated, use resourceRequirements to specify the vCPU requirements for the job definition. It's not supported for jobs running on Fargate resources. For jobs running on EC2 resources, it specifies the number of vCPUs reserved for the job.

Each vCPU is equivalent to 1,024 CPU shares. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. The number of vCPUs must be specified but can be specified in several places. You must specify it at least once for each node.

", + "documentation":"

This parameter is deprecated, use resourceRequirements to specify the vCPU requirements for the job definition. It's not supported for jobs running on Fargate resources. For jobs running on Amazon EC2 resources, it specifies the number of vCPUs reserved for the job.

Each vCPU is equivalent to 1,024 CPU shares. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. The number of vCPUs must be specified but can be specified in several places. You must specify it at least once for each node.

", "deprecated":true, "deprecatedMessage":"This field is deprecated, use resourceRequirements instead." }, "memory":{ "shape":"Integer", - "documentation":"

This parameter is deprecated, use resourceRequirements to specify the memory requirements for the job definition. It's not supported for jobs running on Fargate resources. For jobs that run on EC2 resources, it specifies the memory hard limit (in MiB) for a container. If your container attempts to exceed the specified number, it's terminated. You must specify at least 4 MiB of memory for a job using this parameter. The memory hard limit can be specified in several places. It must be specified for each node at least once.

", + "documentation":"

This parameter is deprecated, use resourceRequirements to specify the memory requirements for the job definition. It's not supported for jobs running on Fargate resources. For jobs that run on Amazon EC2 resources, it specifies the memory hard limit (in MiB) for a container. If your container attempts to exceed the specified number, it's terminated. You must specify at least 4 MiB of memory for a job using this parameter. The memory hard limit can be specified in several places. It must be specified for each node at least once.

", "deprecated":true, "deprecatedMessage":"This field is deprecated, use resourceRequirements instead." }, @@ -1031,7 +1031,7 @@ }, "logConfiguration":{ "shape":"LogConfiguration", - "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type).

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS container agent configuration in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run. By default, containers use the same logging driver that the Docker daemon uses. However the container might use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information on the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Batch currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type).

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version | grep \"Server API version\"

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS container agent configuration in the Amazon Elastic Container Service Developer Guide.

" }, "secrets":{ "shape":"SecretList", @@ -1039,11 +1039,11 @@ }, "networkConfiguration":{ "shape":"NetworkConfiguration", - "documentation":"

The network configuration for jobs that are running on Fargate resources. Jobs that are running on EC2 resources must not specify this parameter.

" + "documentation":"

The network configuration for jobs that are running on Fargate resources. Jobs that are running on Amazon EC2 resources must not specify this parameter.

" }, "fargatePlatformConfiguration":{ "shape":"FargatePlatformConfiguration", - "documentation":"

The platform configuration for jobs that are running on Fargate resources. Jobs that are running on EC2 resources must not specify this parameter.

" + "documentation":"

The platform configuration for jobs that are running on Fargate resources. Jobs that are running on Amazon EC2 resources must not specify this parameter.

" }, "ephemeralStorage":{ "shape":"EphemeralStorage", @@ -1534,12 +1534,144 @@ "type":"list", "member":{"shape":"Ec2Configuration"} }, + "EcsProperties":{ + "type":"structure", + "required":["taskProperties"], + "members":{ + "taskProperties":{ + "shape":"ListEcsTaskProperties", + "documentation":"

An object that contains the properties for the Amazon ECS task definition of a job.

This object is currently limited to one element.

" + } + }, + "documentation":"

An object that contains the properties for the Amazon ECS resources of a job.

" + }, + "EcsPropertiesDetail":{ + "type":"structure", + "members":{ + "taskProperties":{ + "shape":"ListEcsTaskDetails", + "documentation":"

The properties for the Amazon ECS task definition of a job.

" + } + }, + "documentation":"

An object that contains the details for the Amazon ECS resources of a job.

" + }, + "EcsPropertiesOverride":{ + "type":"structure", + "members":{ + "taskProperties":{ + "shape":"ListTaskPropertiesOverride", + "documentation":"

The overrides for the Amazon ECS task definition of a job.

This object is currently limited to one element.

" + } + }, + "documentation":"

An object that contains overrides for the Amazon ECS task definition of a job.

" + }, + "EcsTaskDetails":{ + "type":"structure", + "members":{ + "containers":{ + "shape":"ListTaskContainerDetails", + "documentation":"

A list of containers that are included in the taskProperties list.

" + }, + "containerInstanceArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the container instance that hosts the task.

" + }, + "taskArn":{ + "shape":"String", + "documentation":"

The ARN of the Amazon ECS task.

" + }, + "ephemeralStorage":{ + "shape":"EphemeralStorage", + "documentation":"

The amount of ephemeral storage allocated for the task.

" + }, + "executionRoleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the execution role that Batch can assume. For more information, see Batch execution IAM role in the Batch User Guide.

" + }, + "platformVersion":{ + "shape":"String", + "documentation":"

The Fargate platform version where the jobs are running.

" + }, + "ipcMode":{ + "shape":"String", + "documentation":"

The IPC resource namespace to use for the containers in the task.

" + }, + "taskRoleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that the container can assume for Amazon Web Services permissions. For more information, see IAM roles for tasks in the Amazon Elastic Container Service Developer Guide.

This is object is comparable to ContainerProperties:jobRoleArn.

" + }, + "pidMode":{ + "shape":"String", + "documentation":"

The process namespace to use for the containers in the task.

" + }, + "networkConfiguration":{ + "shape":"NetworkConfiguration", + "documentation":"

The network configuration for jobs that are running on Fargate resources. Jobs that are running on Amazon EC2 resources must not specify this parameter.

" + }, + "runtimePlatform":{ + "shape":"RuntimePlatform", + "documentation":"

An object that represents the compute environment architecture for Batch jobs on Fargate.

" + }, + "volumes":{ + "shape":"Volumes", + "documentation":"

A list of data volumes used in a job.

" + } + }, + "documentation":"

The details of a task definition that describes the container and volume definitions of an Amazon ECS task.

" + }, + "EcsTaskProperties":{ + "type":"structure", + "required":["containers"], + "members":{ + "containers":{ + "shape":"ListTaskContainerProperties", + "documentation":"

This object is a list of containers.

" + }, + "ephemeralStorage":{ + "shape":"EphemeralStorage", + "documentation":"

The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on Fargate.

" + }, + "executionRoleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the execution role that Batch can assume. For jobs that run on Fargate resources, you must provide an execution role. For more information, see Batch execution IAM role in the Batch User Guide.

" + }, + "platformVersion":{ + "shape":"String", + "documentation":"

The Fargate platform version where the jobs are running. A platform version is specified only for jobs that are running on Fargate resources. If one isn't specified, the LATEST platform version is used by default. This uses a recent, approved version of the Fargate platform for compute resources. For more information, see Fargate platform versions in the Amazon Elastic Container Service Developer Guide.

" + }, + "ipcMode":{ + "shape":"String", + "documentation":"

The IPC resource namespace to use for the containers in the task. The valid values are host, task, or none.

If host is specified, all containers within the tasks that specified the host IPC mode on the same container instance share the same IPC resources with the host Amazon EC2 instance.

If task is specified, all containers within the specified task share the same IPC resources.

If none is specified, the IPC resources within the containers of a task are private, and are not shared with other containers in a task or on the container instance.

If no value is specified, then the IPC resource namespace sharing depends on the Docker daemon setting on the container instance. For more information, see IPC settings in the Docker run reference.

" + }, + "taskRoleArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) that's associated with the Amazon ECS task.

This is object is comparable to ContainerProperties:jobRoleArn.

" + }, + "pidMode":{ + "shape":"String", + "documentation":"

The process namespace to use for the containers in the task. The valid values are host or task. For example, monitoring sidecars might need pidMode to access information about other containers running in the same task.

If host is specified, all containers within the tasks that specified the host PID mode on the same container instance share the process namespace with the host Amazon EC2 instance.

If task is specified, all containers within the specified task share the same process namespace.

If no value is specified, the default is a private namespace for each container. For more information, see PID settings in the Docker run reference.

" + }, + "networkConfiguration":{ + "shape":"NetworkConfiguration", + "documentation":"

The network configuration for jobs that are running on Fargate resources. Jobs that are running on Amazon EC2 resources must not specify this parameter.

" + }, + "runtimePlatform":{ + "shape":"RuntimePlatform", + "documentation":"

An object that represents the compute environment architecture for Batch jobs on Fargate.

" + }, + "volumes":{ + "shape":"Volumes", + "documentation":"

A list of volumes that are associated with the job.

" + } + }, + "documentation":"

The properties for a task definition that describes the container and volume definitions of an Amazon ECS task. You can specify which Docker images to use, the required resources, and other configurations related to launching the task definition through an Amazon ECS service or task.

" + }, "EksAttemptContainerDetail":{ "type":"structure", "members":{ "exitCode":{ "shape":"Integer", - "documentation":"

The exit code for the job attempt. A non-zero exit code is considered failed.

" + "documentation":"

The exit code returned for the job attempt. A non-zero exit code is considered failed.

" }, "reason":{ "shape":"String", @@ -1559,6 +1691,10 @@ "shape":"EksAttemptContainerDetails", "documentation":"

The details for the final status of the containers for this job attempt.

" }, + "initContainers":{ + "shape":"EksAttemptContainerDetails", + "documentation":"

The details for the init containers.

" + }, "podName":{ "shape":"String", "documentation":"

The name of the pod for this job attempt.

" @@ -1626,7 +1762,7 @@ }, "args":{ "shape":"StringList", - "documentation":"

An array of arguments to the entrypoint. If this isn't specified, the CMD of the container image is used. This corresponds to the args member in the Entrypoint portion of the Pod in Kubernetes. Environment variable references are expanded using the container's environment.

If the referenced environment variable doesn't exist, the reference in the command isn't changed. For example, if the reference is to \"$(NAME1)\" and the NAME1 environment variable doesn't exist, the command string will remain \"$(NAME1).\" $$ is replaced with $, and the resulting string isn't expanded. For example, $$(VAR_NAME) is passed as $(VAR_NAME) whether or not the VAR_NAME environment variable exists. For more information, see CMD in the Dockerfile reference and Define a command and arguments for a pod in the Kubernetes documentation.

" + "documentation":"

An array of arguments to the entrypoint. If this isn't specified, the CMD of the container image is used. This corresponds to the args member in the Entrypoint portion of the Pod in Kubernetes. Environment variable references are expanded using the container's environment.

If the referenced environment variable doesn't exist, the reference in the command isn't changed. For example, if the reference is to \"$(NAME1)\" and the NAME1 environment variable doesn't exist, the command string will remain \"$(NAME1).\" $$ is replaced with $, and the resulting string isn't expanded. For example, $$(VAR_NAME) is passed as $(VAR_NAME) whether or not the VAR_NAME environment variable exists. For more information, see Dockerfile reference: CMD and Define a command and arguments for a pod in the Kubernetes documentation.

" }, "env":{ "shape":"EksContainerEnvironmentVariables", @@ -1668,7 +1804,7 @@ }, "args":{ "shape":"StringList", - "documentation":"

An array of arguments to the entrypoint. If this isn't specified, the CMD of the container image is used. This corresponds to the args member in the Entrypoint portion of the Pod in Kubernetes. Environment variable references are expanded using the container's environment.

If the referenced environment variable doesn't exist, the reference in the command isn't changed. For example, if the reference is to \"$(NAME1)\" and the NAME1 environment variable doesn't exist, the command string will remain \"$(NAME1)\". $$ is replaced with $ and the resulting string isn't expanded. For example, $$(VAR_NAME) is passed as $(VAR_NAME) whether or not the VAR_NAME environment variable exists. For more information, see CMD in the Dockerfile reference and Define a command and arguments for a pod in the Kubernetes documentation.

" + "documentation":"

An array of arguments to the entrypoint. If this isn't specified, the CMD of the container image is used. This corresponds to the args member in the Entrypoint portion of the Pod in Kubernetes. Environment variable references are expanded using the container's environment.

If the referenced environment variable doesn't exist, the reference in the command isn't changed. For example, if the reference is to \"$(NAME1)\" and the NAME1 environment variable doesn't exist, the command string will remain \"$(NAME1)\". $$ is replaced with $ and the resulting string isn't expanded. For example, $$(VAR_NAME) is passed as $(VAR_NAME) whether or not the VAR_NAME environment variable exists. For more information, see Dockerfile reference: CMD and Define a command and arguments for a pod in the Kubernetes documentation.

" }, "env":{ "shape":"EksContainerEnvironmentVariables", @@ -1680,7 +1816,7 @@ }, "exitCode":{ "shape":"Integer", - "documentation":"

The exit code for the job attempt. A non-zero exit code is considered failed.

" + "documentation":"

The exit code returned for the job attempt. A non-zero exit code is considered failed.

" }, "reason":{ "shape":"String", @@ -1723,6 +1859,10 @@ "EksContainerOverride":{ "type":"structure", "members":{ + "name":{ + "shape":"String", + "documentation":"

A pointer to the container that you want to override. The name must match a unique container name that you wish to override.

" + }, "image":{ "shape":"String", "documentation":"

The override of the Docker image that's used to start the container.

" @@ -1733,7 +1873,7 @@ }, "args":{ "shape":"StringList", - "documentation":"

The arguments to the entrypoint to send to the container that overrides the default arguments from the Docker image or the job definition. For more information, see CMD in the Dockerfile reference and Define a command an arguments for a pod in the Kubernetes documentation.

" + "documentation":"

The arguments to the entrypoint to send to the container that overrides the default arguments from the Docker image or the job definition. For more information, see Dockerfile reference: CMD and Define a command an arguments for a pod in the Kubernetes documentation.

" }, "env":{ "shape":"EksContainerEnvironmentVariables", @@ -1744,7 +1884,7 @@ "documentation":"

The type and amount of resources to assign to a container. These override the settings in the job definition. The supported resources include memory, cpu, and nvidia.com/gpu. For more information, see Resource management for pods and containers in the Kubernetes documentation.

" } }, - "documentation":"

Object representing any Kubernetes overrides to a job definition that's used in a SubmitJob API operation.

" + "documentation":"

Object representing any Kubernetes overrides to a job definition that's used in a SubmitJob API operation.

" }, "EksContainerOverrideList":{ "type":"list", @@ -1879,6 +2019,10 @@ "shape":"EksContainers", "documentation":"

The properties of the container that's used on the Amazon EKS pod.

" }, + "initContainers":{ + "shape":"EksContainers", + "documentation":"

These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see Init Containers in the Kubernetes documentation.

This object is limited to 10 elements

" + }, "volumes":{ "shape":"EksVolumes", "documentation":"

Specifies the volumes for a job definition that uses Amazon EKS resources.

" @@ -1886,6 +2030,10 @@ "metadata":{ "shape":"EksMetadata", "documentation":"

Metadata about the Kubernetes pod. For more information, see Understanding Kubernetes Objects in the Kubernetes documentation.

" + }, + "shareProcessNamespace":{ + "shape":"Boolean", + "documentation":"

Indicates if the processes in a container are shared, or visible, to other containers in the same pod. For more information, see Share Process Namespace between Containers in a Pod.

" } }, "documentation":"

The properties for the pod.

" @@ -1909,6 +2057,10 @@ "shape":"EksContainerDetails", "documentation":"

The properties of the container that's used on the Amazon EKS pod.

" }, + "initContainers":{ + "shape":"EksContainerDetails", + "documentation":"

The container registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store.

" + }, "volumes":{ "shape":"EksVolumes", "documentation":"

Specifies the volumes for a job definition using Amazon EKS resources.

" @@ -1924,6 +2076,10 @@ "metadata":{ "shape":"EksMetadata", "documentation":"

Describes and uniquely identifies Kubernetes resources. For example, the compute environment that a pod runs in or the jobID for a job running in the pod. For more information, see Understanding Kubernetes Objects in the Kubernetes documentation.

" + }, + "shareProcessNamespace":{ + "shape":"Boolean", + "documentation":"

Indicates if the processes in a container are shared, or visible, to other containers in the same pod. For more information, see Share Process Namespace between Containers in a Pod.

" } }, "documentation":"

The details for the pod.

" @@ -1935,6 +2091,10 @@ "shape":"EksContainerOverrideList", "documentation":"

The overrides for the container that's used on the Amazon EKS pod.

" }, + "initContainers":{ + "shape":"EksContainerOverrideList", + "documentation":"

The overrides for the conatainers defined in the Amazon EKS pod. These containers run before application containers, always runs to completion, and must complete successfully before the next container starts. These containers are registered with the Amazon EKS Connector agent and persists the registration information in the Kubernetes backend data store. For more information, see Init Containers in the Kubernetes documentation.

This object is limited to 10 elements

" + }, "metadata":{ "shape":"EksMetadata", "documentation":"

Metadata about the overrides for the container that's used on the Amazon EKS pod.

" @@ -2087,7 +2247,7 @@ "documentation":"

The Fargate platform version where the jobs are running. A platform version is specified only for jobs that are running on Fargate resources. If one isn't specified, the LATEST platform version is used by default. This uses a recent, approved version of the Fargate platform for compute resources. For more information, see Fargate platform versions in the Amazon Elastic Container Service Developer Guide.

" } }, - "documentation":"

The platform configuration for jobs that are running on Fargate resources. Jobs that run on EC2 resources must not specify this parameter.

" + "documentation":"

The platform configuration for jobs that are running on Fargate resources. Jobs that run on Amazon EC2 resources must not specify this parameter.

" }, "Float":{"type":"float"}, "Host":{ @@ -2172,7 +2332,7 @@ }, "containerProperties":{ "shape":"ContainerProperties", - "documentation":"

An object with various properties specific to Amazon ECS based jobs. Valid values are containerProperties, eksProperties, and nodeProperties. Only one can be specified.

" + "documentation":"

An object with properties specific to Amazon ECS-based jobs. When containerProperties is used in the job definition, it can't be used in addition to eksProperties, ecsProperties, or nodeProperties.

" }, "timeout":{ "shape":"JobTimeout", @@ -2180,7 +2340,7 @@ }, "nodeProperties":{ "shape":"NodeProperties", - "documentation":"

An object with various properties that are specific to multi-node parallel jobs. Valid values are containerProperties, eksProperties, and nodeProperties. Only one can be specified.

If the job runs on Fargate resources, don't specify nodeProperties. Use containerProperties instead.

" + "documentation":"

An object with properties that are specific to multi-node parallel jobs. When nodeProperties is used in the job definition, it can't be used in addition to containerProperties, ecsProperties, or eksProperties.

If the job runs on Fargate resources, don't specify nodeProperties. Use containerProperties instead.

" }, "tags":{ "shape":"TagrisTagsMap", @@ -2194,9 +2354,13 @@ "shape":"PlatformCapabilityList", "documentation":"

The platform capabilities required by the job definition. If no value is specified, it defaults to EC2. Jobs run on Fargate resources specify FARGATE.

" }, + "ecsProperties":{ + "shape":"EcsProperties", + "documentation":"

An object that contains the properties for the Amazon ECS resources of a job.When ecsProperties is used in the job definition, it can't be used in addition to containerProperties, eksProperties, or nodeProperties.

" + }, "eksProperties":{ "shape":"EksProperties", - "documentation":"

An object with various properties that are specific to Amazon EKS based jobs. Valid values are containerProperties, eksProperties, and nodeProperties. Only one can be specified.

" + "documentation":"

An object with properties that are specific to Amazon EKS-based jobs. When eksProperties is used in the job definition, it can't be used in addition to containerProperties, ecsProperties, or nodeProperties.

" }, "containerOrchestrationType":{ "shape":"OrchestrationType", @@ -2283,7 +2447,7 @@ }, "createdAt":{ "shape":"Long", - "documentation":"

The Unix timestamp (in milliseconds) for when the job was created. For non-array jobs and parent array jobs, this is when the job entered the SUBMITTED state. This is specifically at the time SubmitJob was called. For array child jobs, this is when the child job was spawned by its parent and entered the PENDING state.

" + "documentation":"

The Unix timestamp (in milliseconds) for when the job was created. For non-array jobs and parent array jobs, this is when the job entered the SUBMITTED state. This is specifically at the time SubmitJob was called. For array child jobs, this is when the child job was spawned by its parent and entered the PENDING state.

" }, "retryStrategy":{ "shape":"RetryStrategy", @@ -2311,7 +2475,7 @@ }, "container":{ "shape":"ContainerDetail", - "documentation":"

An object that represents the details for the container that's associated with the job.

" + "documentation":"

An object that represents the details for the container that's associated with the job. If the details are for a multiple-container job, this object will be empty.

" }, "nodeDetails":{ "shape":"NodeDetails", @@ -2343,12 +2507,16 @@ }, "eksProperties":{ "shape":"EksPropertiesDetail", - "documentation":"

An object with various properties that are specific to Amazon EKS based jobs. Only one of container, eksProperties, or nodeDetails is specified.

" + "documentation":"

An object with various properties that are specific to Amazon EKS based jobs.

" }, "eksAttempts":{ "shape":"EksAttemptDetails", "documentation":"

A list of job attempts that are associated with this job.

" }, + "ecsProperties":{ + "shape":"EcsPropertiesDetail", + "documentation":"

An object with properties that are specific to Amazon ECS-based jobs.

" + }, "isCancelled":{ "shape":"Boolean", "documentation":"

Indicates whether the job is canceled.

" @@ -2405,7 +2573,7 @@ }, "priority":{ "shape":"Integer", - "documentation":"

The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order. For example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT). EC2 and Fargate compute environments can't be mixed.

" + "documentation":"

The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order. For example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1. All of the compute environments must be either Amazon EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT). Amazon EC2 and Fargate compute environments can't be mixed.

" }, "computeEnvironmentOrder":{ "shape":"ComputeEnvironmentOrders", @@ -2455,7 +2623,7 @@ }, "createdAt":{ "shape":"Long", - "documentation":"

The Unix timestamp (in milliseconds) for when the job was created. For non-array jobs and parent array jobs, this is when the job entered the SUBMITTED state (at the time SubmitJob was called). For array child jobs, this is when the child job was spawned by its parent and entered the PENDING state.

" + "documentation":"

The Unix timestamp (in milliseconds) for when the job was created. For non-array jobs and parent array jobs, this is when the job entered the SUBMITTED state (at the time SubmitJob was called). For array child jobs, this is when the child job was spawned by its parent and entered the PENDING state.

" }, "status":{ "shape":"JobStatus", @@ -2587,6 +2755,14 @@ }, "documentation":"

Linux-specific modifications that are applied to the container, such as details for device mappings.

" }, + "ListEcsTaskDetails":{ + "type":"list", + "member":{"shape":"EcsTaskDetails"} + }, + "ListEcsTaskProperties":{ + "type":"list", + "member":{"shape":"EcsTaskProperties"} + }, "ListJobsFilterList":{ "type":"list", "member":{"shape":"KeyValuesPair"} @@ -2688,6 +2864,22 @@ } } }, + "ListTaskContainerDetails":{ + "type":"list", + "member":{"shape":"TaskContainerDetails"} + }, + "ListTaskContainerOverrides":{ + "type":"list", + "member":{"shape":"TaskContainerOverrides"} + }, + "ListTaskContainerProperties":{ + "type":"list", + "member":{"shape":"TaskContainerProperties"} + }, + "ListTaskPropertiesOverride":{ + "type":"list", + "member":{"shape":"TaskPropertiesOverride"} + }, "LogConfiguration":{ "type":"structure", "required":["logDriver"], @@ -2755,7 +2947,7 @@ "documentation":"

Indicates whether the job has a public IP address. For a job that's running on Fargate resources in a private subnet to send outbound traffic to the internet (for example, to pull container images), the private subnet requires a NAT gateway be attached to route requests to the internet. For more information, see Amazon ECS task networking in the Amazon Elastic Container Service Developer Guide. The default value is \"DISABLED\".

" } }, - "documentation":"

The network configuration for jobs that are running on Fargate resources. Jobs that are running on EC2 resources must not specify this parameter.

" + "documentation":"

The network configuration for jobs that are running on Fargate resources. Jobs that are running on Amazon EC2 resources must not specify this parameter.

" }, "NetworkInterface":{ "type":"structure", @@ -2805,7 +2997,7 @@ "documentation":"

The node property overrides for the job.

" } }, - "documentation":"

An object that represents any node overrides to a job definition that's used in a SubmitJob API operation.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't provide it for these jobs. Rather, use containerOverrides instead.

" + "documentation":"

An object that represents any node overrides to a job definition that's used in a SubmitJob API operation.

This parameter isn't applicable to jobs that are running on Fargate resources. Don't provide it for these jobs. Rather, use containerOverrides instead.

" }, "NodeProperties":{ "type":"structure", @@ -2859,9 +3051,17 @@ "containerOverrides":{ "shape":"ContainerOverrides", "documentation":"

The overrides that are sent to a node range.

" + }, + "ecsPropertiesOverride":{ + "shape":"EcsPropertiesOverride", + "documentation":"

An object that contains the properties that you want to replace for the existing Amazon ECS resources of a job.

" + }, + "instanceTypes":{ + "shape":"StringList", + "documentation":"

An object that contains the instance types that you want to replace for the existing resources of a job.

" } }, - "documentation":"

The object that represents any node overrides to a job definition that's used in a SubmitJob API operation.

" + "documentation":"

The object that represents any node overrides to a job definition that's used in a SubmitJob API operation.

" }, "NodePropertyOverrides":{ "type":"list", @@ -2882,9 +3082,17 @@ "container":{ "shape":"ContainerProperties", "documentation":"

The container details for the node range.

" + }, + "instanceTypes":{ + "shape":"StringList", + "documentation":"

The instance types of the underlying host infrastructure of a multi-node parallel job.

This parameter isn't applicable to jobs that are running on Fargate resources.

In addition, this list object is currently limited to one element.

" + }, + "ecsProperties":{ + "shape":"EcsProperties", + "documentation":"

This is an object that represents the properties of the node range for a multi-node parallel job.

" } }, - "documentation":"

An object that represents the properties of the node range for a multi-node parallel job.

" + "documentation":"

This is an object that represents the properties of the node range for a multi-node parallel job.

" }, "OrchestrationType":{ "type":"string", @@ -2927,7 +3135,7 @@ }, "type":{ "shape":"JobDefinitionType", - "documentation":"

The type of job definition. For more information about multi-node parallel jobs, see Creating a multi-node parallel job definition in the Batch User Guide.

If the job is run on Fargate resources, then multinode isn't supported.

" + "documentation":"

The type of job definition. For more information about multi-node parallel jobs, see Creating a multi-node parallel job definition in the Batch User Guide.

If the job is run on Fargate resources, then multinode isn't supported.

" }, "parameters":{ "shape":"ParametersMap", @@ -2939,11 +3147,11 @@ }, "containerProperties":{ "shape":"ContainerProperties", - "documentation":"

An object with various properties specific to Amazon ECS based single-node container-based jobs. If the job definition's type parameter is container, then you must specify either containerProperties or nodeProperties. This must not be specified for Amazon EKS based job definitions.

If the job runs on Fargate resources, then you must not specify nodeProperties; use only containerProperties.

" + "documentation":"

An object with properties specific to Amazon ECS-based single-node container-based jobs. If the job definition's type parameter is container, then you must specify either containerProperties or nodeProperties. This must not be specified for Amazon EKS-based job definitions.

If the job runs on Fargate resources, then you must not specify nodeProperties; use only containerProperties.

" }, "nodeProperties":{ "shape":"NodeProperties", - "documentation":"

An object with various properties specific to multi-node parallel jobs. If you specify node properties for a job, it becomes a multi-node parallel job. For more information, see Multi-node Parallel Jobs in the Batch User Guide. If the job definition's type parameter is container, then you must specify either containerProperties or nodeProperties.

If the job runs on Fargate resources, then you must not specify nodeProperties; use containerProperties instead.

If the job runs on Amazon EKS resources, then you must not specify nodeProperties.

" + "documentation":"

An object with properties specific to multi-node parallel jobs. If you specify node properties for a job, it becomes a multi-node parallel job. For more information, see Multi-node Parallel Jobs in the Batch User Guide.

If the job runs on Fargate resources, then you must not specify nodeProperties; use containerProperties instead.

If the job runs on Amazon EKS resources, then you must not specify nodeProperties.

" }, "retryStrategy":{ "shape":"RetryStrategy", @@ -2967,7 +3175,11 @@ }, "eksProperties":{ "shape":"EksProperties", - "documentation":"

An object with various properties that are specific to Amazon EKS based jobs. This must not be specified for Amazon ECS based job definitions.

" + "documentation":"

An object with properties that are specific to Amazon EKS-based jobs. This must not be specified for Amazon ECS based job definitions.

" + }, + "ecsProperties":{ + "shape":"EcsProperties", + "documentation":"

An object with properties that are specific to Amazon ECS-based jobs. This must not be specified for Amazon EKS-based job definitions.

" } }, "documentation":"

Contains the parameters for RegisterJobDefinition.

" @@ -3014,7 +3226,7 @@ "members":{ "value":{ "shape":"String", - "documentation":"

The quantity of the specified resource to reserve for the container. The values vary based on the type specified.

type=\"GPU\"

The number of physical GPUs to reserve for the container. Make sure that the number of GPUs reserved for all containers in a job doesn't exceed the number of available GPUs on the compute resource that the job is launched on.

GPUs aren't available for jobs that are running on Fargate resources.

type=\"MEMORY\"

The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are running on EC2 resources. If your container attempts to exceed the memory specified, the container is terminated. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory management in the Batch User Guide.

For jobs that are running on Fargate resources, then value is the hard limit (in MiB), and must match one of the supported values and the VCPU values must be one of the values supported for that memory value.

value = 512

VCPU = 0.25

value = 1024

VCPU = 0.25 or 0.5

value = 2048

VCPU = 0.25, 0.5, or 1

value = 3072

VCPU = 0.5, or 1

value = 4096

VCPU = 0.5, 1, or 2

value = 5120, 6144, or 7168

VCPU = 1 or 2

value = 8192

VCPU = 1, 2, or 4

value = 9216, 10240, 11264, 12288, 13312, 14336, or 15360

VCPU = 2 or 4

value = 16384

VCPU = 2, 4, or 8

value = 17408, 18432, 19456, 21504, 22528, 23552, 25600, 26624, 27648, 29696, or 30720

VCPU = 4

value = 20480, 24576, or 28672

VCPU = 4 or 8

value = 36864, 45056, 53248, or 61440

VCPU = 8

value = 32768, 40960, 49152, or 57344

VCPU = 8 or 16

value = 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880

VCPU = 16

type=\"VCPU\"

The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be specified for each node at least once.

The default for the Fargate On-Demand vCPU resource count quota is 6 vCPUs. For more information about Fargate quotas, see Fargate quotas in the Amazon Web Services General Reference.

For jobs that are running on Fargate resources, then value must match one of the supported values and the MEMORY values must be one of the values supported for that VCPU value. The supported values are 0.25, 0.5, 1, 2, 4, 8, and 16

value = 0.25

MEMORY = 512, 1024, or 2048

value = 0.5

MEMORY = 1024, 2048, 3072, or 4096

value = 1

MEMORY = 2048, 3072, 4096, 5120, 6144, 7168, or 8192

value = 2

MEMORY = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384

value = 4

MEMORY = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720

value = 8

MEMORY = 16384, 20480, 24576, 28672, 32768, 36864, 40960, 45056, 49152, 53248, 57344, or 61440

value = 16

MEMORY = 32768, 40960, 49152, 57344, 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880

" + "documentation":"

The quantity of the specified resource to reserve for the container. The values vary based on the type specified.

type=\"GPU\"

The number of physical GPUs to reserve for the container. Make sure that the number of GPUs reserved for all containers in a job doesn't exceed the number of available GPUs on the compute resource that the job is launched on.

GPUs aren't available for jobs that are running on Fargate resources.

type=\"MEMORY\"

The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are running on Amazon EC2 resources. If your container attempts to exceed the memory specified, the container is terminated. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory management in the Batch User Guide.

For jobs that are running on Fargate resources, then value is the hard limit (in MiB), and must match one of the supported values and the VCPU values must be one of the values supported for that memory value.

value = 512

VCPU = 0.25

value = 1024

VCPU = 0.25 or 0.5

value = 2048

VCPU = 0.25, 0.5, or 1

value = 3072

VCPU = 0.5, or 1

value = 4096

VCPU = 0.5, 1, or 2

value = 5120, 6144, or 7168

VCPU = 1 or 2

value = 8192

VCPU = 1, 2, or 4

value = 9216, 10240, 11264, 12288, 13312, 14336, or 15360

VCPU = 2 or 4

value = 16384

VCPU = 2, 4, or 8

value = 17408, 18432, 19456, 21504, 22528, 23552, 25600, 26624, 27648, 29696, or 30720

VCPU = 4

value = 20480, 24576, or 28672

VCPU = 4 or 8

value = 36864, 45056, 53248, or 61440

VCPU = 8

value = 32768, 40960, 49152, or 57344

VCPU = 8 or 16

value = 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880

VCPU = 16

type=\"VCPU\"

The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. For Amazon EC2 resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be specified for each node at least once.

The default for the Fargate On-Demand vCPU resource count quota is 6 vCPUs. For more information about Fargate quotas, see Fargate quotas in the Amazon Web Services General Reference.

For jobs that are running on Fargate resources, then value must match one of the supported values and the MEMORY values must be one of the values supported for that VCPU value. The supported values are 0.25, 0.5, 1, 2, 4, 8, and 16

value = 0.25

MEMORY = 512, 1024, or 2048

value = 0.5

MEMORY = 1024, 2048, 3072, or 4096

value = 1

MEMORY = 2048, 3072, 4096, 5120, 6144, 7168, or 8192

value = 2

MEMORY = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384

value = 4

MEMORY = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720

value = 8

MEMORY = 16384, 20480, 24576, 28672, 32768, 36864, 40960, 45056, 49152, 53248, 57344, or 61440

value = 16

MEMORY = 32768, 40960, 49152, 57344, 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880

" }, "type":{ "shape":"ResourceType", @@ -3213,7 +3425,7 @@ }, "containerOverrides":{ "shape":"ContainerOverrides", - "documentation":"

An object with various properties that override the defaults for the job definition that specify the name of a container in the specified job definition and the overrides it should receive. You can override the default command for a container, which is specified in the job definition or the Docker image, with a command override. You can also override existing environment variables on a container or add new environment variables to it with an environment override.

" + "documentation":"

An object with properties that override the defaults for the job definition that specify the name of a container in the specified job definition and the overrides it should receive. You can override the default command for a container, which is specified in the job definition or the Docker image, with a command override. You can also override existing environment variables on a container or add new environment variables to it with an environment override.

" }, "nodeOverrides":{ "shape":"NodeOverrides", @@ -3237,7 +3449,11 @@ }, "eksPropertiesOverride":{ "shape":"EksPropertiesOverride", - "documentation":"

An object that can only be specified for jobs that are run on Amazon EKS resources with various properties that override defaults for the job definition.

" + "documentation":"

An object, with properties that override defaults for the job definition, can only be specified for jobs that are run on Amazon EKS resources.

" + }, + "ecsPropertiesOverride":{ + "shape":"EcsPropertiesOverride", + "documentation":"

An object, with properties that override defaults for the job definition, can only be specified for jobs that are run on Amazon ECS resources.

" } }, "documentation":"

Contains the parameters for SubmitJob.

" @@ -3315,6 +3531,213 @@ "key":{"shape":"String"}, "value":{"shape":"String"} }, + "TaskContainerDependency":{ + "type":"structure", + "members":{ + "containerName":{ + "shape":"String", + "documentation":"

A unique identifier for the container.

" + }, + "condition":{ + "shape":"String", + "documentation":"

The dependency condition of the container. The following are the available conditions and their behavior:

" + } + }, + "documentation":"

A list of containers that this task depends on.

" + }, + "TaskContainerDependencyList":{ + "type":"list", + "member":{"shape":"TaskContainerDependency"} + }, + "TaskContainerDetails":{ + "type":"structure", + "members":{ + "command":{ + "shape":"StringList", + "documentation":"

The command that's passed to the container. This parameter maps to Cmd in the Create a container section of the Docker Remote API and the COMMAND parameter to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#cmd.

" + }, + "dependsOn":{ + "shape":"TaskContainerDependencyList", + "documentation":"

A list of containers that this container depends on.

" + }, + "environment":{ + "shape":"EnvironmentVariables", + "documentation":"

The environment variables to pass to a container. This parameter maps to Env in the Create a container section of the Docker Remote API and the --env option to docker run.

We don't recommend using plaintext environment variables for sensitive information, such as credential data.

" + }, + "essential":{ + "shape":"Boolean", + "documentation":"

If the essential parameter of a container is marked as true, and that container fails or stops for any reason, all other containers that are part of the task are stopped. If the essential parameter of a container is marked as false, its failure doesn't affect the rest of the containers in a task. If this parameter is omitted, a container is assumed to be essential.

All tasks must have at least one essential container. If you have an application that's composed of multiple containers, group containers that are used for a common purpose into components, and separate the different components into multiple task definitions. For more information, see Application Architecture in the Amazon Elastic Container Service Developer Guide.

" + }, + "image":{ + "shape":"String", + "documentation":"

The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either repository-url/image:tag or repository-url/image@digest. Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of the docker run .

" + }, + "linuxParameters":{ + "shape":"LinuxParameters", + "documentation":"

Linux-specific modifications that are applied to the container, such as Linux kernel capabilities. For more information, see KernelCapabilities.

This parameter is not supported for Windows containers.

" + }, + "logConfiguration":{ + "shape":"LogConfiguration", + "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run.

By default, containers use the same logging driver that the Docker daemon uses. However the container can use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information about the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS container agent configuration in the Amazon Elastic Container Service Developer Guide.

" + }, + "mountPoints":{ + "shape":"MountPoints", + "documentation":"

The mount points for data volumes in your container.

This parameter maps to Volumes in the Create a container section of the Docker Remote API and the --volume option to docker run.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers can't mount directories on a different drive, and mount point can't be across drives.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of a container.

" + }, + "privileged":{ + "shape":"Boolean", + "documentation":"

When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run.

This parameter is not supported for Windows containers or tasks run on Fargate.

" + }, + "readonlyRootFilesystem":{ + "shape":"Boolean", + "documentation":"

When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option to docker run.

This parameter is not supported for Windows containers.

" + }, + "repositoryCredentials":{ + "shape":"RepositoryCredentials", + "documentation":"

The private repository authentication credentials to use.

" + }, + "resourceRequirements":{ + "shape":"ResourceRequirements", + "documentation":"

The type and amount of a resource to assign to a container. The only supported resource is a GPU.

" + }, + "secrets":{ + "shape":"SecretList", + "documentation":"

The secrets to pass to the container. For more information, see Specifying Sensitive Data in the Amazon Elastic Container Service Developer Guide.

" + }, + "ulimits":{ + "shape":"Ulimits", + "documentation":"

A list of ulimits to set in the container. If a ulimit value is specified in a task definition, it overrides the default values set by Docker. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run.

Amazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile resource limit parameter which Fargate overrides. The nofile resource limit sets a restriction on the number of open files that a container can use. The default nofile soft limit is 1024 and the default hard limit is 65535.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

This parameter is not supported for Windows containers.

" + }, + "user":{ + "shape":"String", + "documentation":"

The user to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.

When running tasks using the host network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security.

You can specify the user using the following formats. If specifying a UID or GID, you must specify it as a positive integer.

This parameter is not supported for Windows containers.

" + }, + "exitCode":{ + "shape":"Integer", + "documentation":"

The exit code returned upon completion.

" + }, + "reason":{ + "shape":"String", + "documentation":"

A short (255 max characters) human-readable string to provide additional details for a running or stopped container.

" + }, + "logStreamName":{ + "shape":"String", + "documentation":"

The name of the CloudWatch Logs log stream that's associated with the container. The log group for Batch jobs is /aws/batch/job. Each container attempt receives a log stream name when they reach the RUNNING status.

" + }, + "networkInterfaces":{ + "shape":"NetworkInterfaceList", + "documentation":"

The network interfaces that are associated with the job.

" + } + }, + "documentation":"

The details for the container in this task attempt.

" + }, + "TaskContainerOverrides":{ + "type":"structure", + "members":{ + "command":{ + "shape":"StringList", + "documentation":"

The command to send to the container that overrides the default command from the Docker image or the job definition.

This parameter can't contain an empty string.

" + }, + "environment":{ + "shape":"EnvironmentVariables", + "documentation":"

The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing environment variables from the Docker image or the job definition.

Environment variables cannot start with AWS_BATCH. This naming convention is reserved for variables that Batch sets.

" + }, + "name":{ + "shape":"String", + "documentation":"

A pointer to the container that you want to override. The container's name provides a unique identifier for the container being used.

" + }, + "resourceRequirements":{ + "shape":"ResourceRequirements", + "documentation":"

The type and amount of resources to assign to a container. This overrides the settings in the job definition. The supported resources include GPU, MEMORY, and VCPU.

" + } + }, + "documentation":"

The overrides that should be sent to a container.

For information about using Batch overrides when you connect event sources to targets, see BatchContainerOverrides.

" + }, + "TaskContainerProperties":{ + "type":"structure", + "required":["image"], + "members":{ + "command":{ + "shape":"StringList", + "documentation":"

The command that's passed to the container. This parameter maps to Cmd in the Create a container section of the Docker Remote API and the COMMAND parameter to docker run. For more information, see Dockerfile reference: CMD.

" + }, + "dependsOn":{ + "shape":"TaskContainerDependencyList", + "documentation":"

A list of containers that this container depends on.

" + }, + "environment":{ + "shape":"EnvironmentVariables", + "documentation":"

The environment variables to pass to a container. This parameter maps to Env inthe Create a container section of the Docker Remote API and the --env parameter to docker run.

We don't recommend using plaintext environment variables for sensitive information, such as credential data.

Environment variables cannot start with AWS_BATCH. This naming convention is reserved for variables that Batch sets.

" + }, + "essential":{ + "shape":"Boolean", + "documentation":"

If the essential parameter of a container is marked as true, and that container fails or stops for any reason, all other containers that are part of the task are stopped. If the essential parameter of a container is marked as false, its failure doesn't affect the rest of the containers in a task. If this parameter is omitted, a container is assumed to be essential.

All tasks must have at least one essential container. If you have an application that's composed of multiple containers, group containers that are used for a common purpose into components, and separate the different components into multiple task definitions. For more information, see Application Architecture in the Amazon Elastic Container Service Developer Guide.

" + }, + "image":{ + "shape":"String", + "documentation":"

The image used to start a container. This string is passed directly to the Docker daemon. By default, images in the Docker Hub registry are available. Other repositories are specified with either repository-url/image:tag or repository-url/image@digest. Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image in the Create a container section of the Docker Remote API and the IMAGE parameter of the docker run .

" + }, + "linuxParameters":{ + "shape":"LinuxParameters", + "documentation":"

Linux-specific modifications that are applied to the container, such as Linux kernel capabilities. For more information, see KernelCapabilities.

" + }, + "logConfiguration":{ + "shape":"LogConfiguration", + "documentation":"

The log configuration specification for the container.

This parameter maps to LogConfig in the Create a container section of the Docker Remote API and the --log-driver option to docker run.

By default, containers use the same logging driver that the Docker daemon uses. However the container can use a different logging driver than the Docker daemon by specifying a log driver with this parameter in the container definition. To use a different logging driver for a container, the log system must be configured properly on the container instance (or on a different log server for remote logging options). For more information about the options for different supported log drivers, see Configure logging drivers in the Docker documentation.

Amazon ECS currently supports a subset of the logging drivers available to the Docker daemon (shown in the LogConfiguration data type). Additional log drivers may be available in future releases of the Amazon ECS container agent.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

The Amazon ECS container agent running on a container instance must register the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS environment variable before containers placed on that instance can use these log configuration options. For more information, see Amazon ECS container agent configuration in the Amazon Elastic Container Service Developer Guide.

" + }, + "mountPoints":{ + "shape":"MountPoints", + "documentation":"

The mount points for data volumes in your container.

This parameter maps to Volumes in the Create a container section of the Docker Remote API and the --volume option to docker run.

Windows containers can mount whole directories on the same drive as $env:ProgramData. Windows containers can't mount directories on a different drive, and mount point can't be across drives.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of a container. The name can be used as a unique identifier to target your dependsOn and Overrides objects.

" + }, + "privileged":{ + "shape":"Boolean", + "documentation":"

When this parameter is true, the container is given elevated privileges on the host container instance (similar to the root user). This parameter maps to Privileged in the Create a container section of the Docker Remote API and the --privileged option to docker run.

This parameter is not supported for Windows containers or tasks run on Fargate.

" + }, + "readonlyRootFilesystem":{ + "shape":"Boolean", + "documentation":"

When this parameter is true, the container is given read-only access to its root file system. This parameter maps to ReadonlyRootfs in the Create a container section of the Docker Remote API and the --read-only option to docker run.

This parameter is not supported for Windows containers.

" + }, + "repositoryCredentials":{ + "shape":"RepositoryCredentials", + "documentation":"

The private repository authentication credentials to use.

" + }, + "resourceRequirements":{ + "shape":"ResourceRequirements", + "documentation":"

The type and amount of a resource to assign to a container. The only supported resource is a GPU.

" + }, + "secrets":{ + "shape":"SecretList", + "documentation":"

The secrets to pass to the container. For more information, see Specifying Sensitive Data in the Amazon Elastic Container Service Developer Guide.

" + }, + "ulimits":{ + "shape":"Ulimits", + "documentation":"

A list of ulimits to set in the container. If a ulimit value is specified in a task definition, it overrides the default values set by Docker. This parameter maps to Ulimits in the Create a container section of the Docker Remote API and the --ulimit option to docker run.

Amazon ECS tasks hosted on Fargate use the default resource limit values set by the operating system with the exception of the nofile resource limit parameter which Fargate overrides. The nofile resource limit sets a restriction on the number of open files that a container can use. The default nofile soft limit is 1024 and the default hard limit is 65535.

This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'

This parameter is not supported for Windows containers.

" + }, + "user":{ + "shape":"String", + "documentation":"

The user to use inside the container. This parameter maps to User in the Create a container section of the Docker Remote API and the --user option to docker run.

When running tasks using the host network mode, don't run containers using the root user (UID 0). We recommend using a non-root user for better security.

You can specify the user using the following formats. If specifying a UID or GID, you must specify it as a positive integer.

This parameter is not supported for Windows containers.

" + } + }, + "documentation":"

Container properties are used for Amazon ECS-based job definitions. These properties to describe the container that's launched as part of a job.

" + }, + "TaskPropertiesOverride":{ + "type":"structure", + "members":{ + "containers":{ + "shape":"ListTaskContainerOverrides", + "documentation":"

The overrides for the container definition of a job.

" + } + }, + "documentation":"

An object that contains overrides for the task definition of a job.

" + }, "TerminateJobRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json b/botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json index 9677842f01..8e6520f01b 100644 --- a/botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json +++ b/botocore/data/bedrock-agent-runtime/2023-07-26/service-2.json @@ -465,17 +465,22 @@ ], "members":{ "knowledgeBaseId":{"shape":"KnowledgeBaseId"}, - "modelArn":{"shape":"BedrockModelArn"} + "modelArn":{"shape":"BedrockModelArn"}, + "retrievalConfiguration":{"shape":"KnowledgeBaseRetrievalConfiguration"} }, "documentation":"

Configurations for retrieval and generation for knowledge base.

" }, "KnowledgeBaseVectorSearchConfiguration":{ "type":"structure", - "required":["numberOfResults"], "members":{ "numberOfResults":{ "shape":"KnowledgeBaseVectorSearchConfigurationNumberOfResultsInteger", - "documentation":"

Top-K results to retrieve from knowledge base.

" + "documentation":"

Top-K results to retrieve from knowledge base.

", + "box":true + }, + "overrideSearchType":{ + "shape":"SearchType", + "documentation":"

Override the type of query to be performed on data store

" } }, "documentation":"

Knowledge base vector search configuration

" @@ -483,7 +488,7 @@ "KnowledgeBaseVectorSearchConfigurationNumberOfResultsInteger":{ "type":"integer", "box":true, - "max":10, + "max":25, "min":1 }, "LambdaArn":{ @@ -748,7 +753,8 @@ "documentation":"

Content of a retrieval result in text

" } }, - "documentation":"

Content of a retrieval result.

" + "documentation":"

Content of a retrieval result.

", + "sensitive":true }, "RetrievalResultLocation":{ "type":"structure", @@ -757,7 +763,8 @@ "type":{"shape":"RetrievalResultLocationType"}, "s3Location":{"shape":"RetrievalResultS3Location"} }, - "documentation":"

The source location of a retrieval result.

" + "documentation":"

The source location of a retrieval result.

", + "sensitive":true }, "RetrievalResultLocationType":{ "type":"string", @@ -888,6 +895,14 @@ "member":{"shape":"RetrievedReference"}, "documentation":"

list of retrieved references

" }, + "SearchType":{ + "type":"string", + "documentation":"

Query type to be performed on data store.

", + "enum":[ + "HYBRID", + "SEMANTIC" + ] + }, "ServiceQuotaExceededException":{ "type":"structure", "members":{ @@ -985,7 +1000,8 @@ }, "span":{"shape":"Span"} }, - "documentation":"

Text response part

" + "documentation":"

Text response part

", + "sensitive":true }, "ThrottlingException":{ "type":"structure", diff --git a/botocore/data/ce/2017-10-25/service-2.json b/botocore/data/ce/2017-10-25/service-2.json index d87c8946a2..fc06568320 100644 --- a/botocore/data/ce/2017-10-25/service-2.json +++ b/botocore/data/ce/2017-10-25/service-2.json @@ -155,6 +155,20 @@ ], "documentation":"

Retrieves the cost anomaly subscription objects for your account. You can filter using a list of cost anomaly monitor Amazon Resource Names (ARNs).

" }, + "GetApproximateUsageRecords":{ + "name":"GetApproximateUsageRecords", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetApproximateUsageRecordsRequest"}, + "output":{"shape":"GetApproximateUsageRecordsResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"DataUnavailableException"} + ], + "documentation":"

Retrieves estimated usage records for hourly granularity or resource-level data at daily granularity.

" + }, "GetCostAndUsage":{ "name":"GetCostAndUsage", "http":{ @@ -780,6 +794,18 @@ "type":"list", "member":{"shape":"AnomalySubscription"} }, + "ApproximateUsageRecordsPerService":{ + "type":"map", + "key":{"shape":"GenericString"}, + "value":{"shape":"NonNegativeLong"} + }, + "ApproximationDimension":{ + "type":"string", + "enum":[ + "SERVICE", + "RESOURCE" + ] + }, "Arn":{ "type":"string", "max":2048, @@ -2063,6 +2089,44 @@ } } }, + "GetApproximateUsageRecordsRequest":{ + "type":"structure", + "required":[ + "Granularity", + "ApproximationDimension" + ], + "members":{ + "Granularity":{ + "shape":"Granularity", + "documentation":"

How granular you want the data to be. You can enable data at hourly or daily granularity.

" + }, + "Services":{ + "shape":"UsageServices", + "documentation":"

The service metadata for the service or services you want to query. If not specified, all elements are returned.

" + }, + "ApproximationDimension":{ + "shape":"ApproximationDimension", + "documentation":"

The service to evaluate for the usage records. You can choose resource-level data at daily granularity, or hourly granularity with or without resource-level data.

" + } + } + }, + "GetApproximateUsageRecordsResponse":{ + "type":"structure", + "members":{ + "Services":{ + "shape":"ApproximateUsageRecordsPerService", + "documentation":"

The service metadata for the service or services in the response.

" + }, + "TotalRecords":{ + "shape":"NonNegativeLong", + "documentation":"

The total number of usage records for all services in the services list.

" + }, + "LookbackPeriod":{ + "shape":"DateInterval", + "documentation":"

The lookback period that's used for the estimation.

" + } + } + }, "GetCostAndUsageRequest":{ "type":"structure", "required":[ @@ -3297,6 +3361,10 @@ "type":"integer", "min":0 }, + "NonNegativeLong":{ + "type":"long", + "min":0 + }, "NullableNonNegativeDouble":{ "type":"double", "min":0.0 @@ -4995,6 +5063,10 @@ } } }, + "UsageServices":{ + "type":"list", + "member":{"shape":"GenericString"} + }, "UtilizationByTime":{ "type":"structure", "members":{ diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index df6be36f33..7d418574c6 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -31197,6 +31197,11 @@ } } }, + "GetNetworkInsightsAccessScopeAnalysisFindingsMaxResults":{ + "type":"integer", + "max":1000, + "min":1 + }, "GetNetworkInsightsAccessScopeAnalysisFindingsRequest":{ "type":"structure", "required":["NetworkInsightsAccessScopeAnalysisId"], @@ -31206,7 +31211,7 @@ "documentation":"

The ID of the Network Access Scope analysis.

" }, "MaxResults":{ - "shape":"NetworkInsightsMaxResults", + "shape":"GetNetworkInsightsAccessScopeAnalysisFindingsMaxResults", "documentation":"

The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.

" }, "NextToken":{ diff --git a/botocore/data/iot/2015-05-28/service-2.json b/botocore/data/iot/2015-05-28/service-2.json index 9a6d1ee3b8..dfd8d6af25 100644 --- a/botocore/data/iot/2015-05-28/service-2.json +++ b/botocore/data/iot/2015-05-28/service-2.json @@ -4881,7 +4881,7 @@ }, "namespaceId":{ "shape":"NamespaceId", - "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is in public preview.

", + "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is only supported by IoT Greengrass at this time. For more information, see Setting up IoT Greengrass core devices.

", "location":"querystring", "locationName":"namespaceId" } @@ -7217,7 +7217,7 @@ }, "namespaceId":{ "shape":"NamespaceId", - "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is in public preview.

" + "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is only supported by IoT Greengrass at this time. For more information, see Setting up IoT Greengrass core devices.

" }, "jobTemplateArn":{ "shape":"JobTemplateArn", @@ -8541,7 +8541,7 @@ }, "namespaceId":{ "shape":"NamespaceId", - "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is in public preview.

", + "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is only supported by IoT Greengrass at this time. For more information, see Setting up IoT Greengrass core devices.

", "location":"querystring", "locationName":"namespaceId" } @@ -8565,7 +8565,7 @@ }, "namespaceId":{ "shape":"NamespaceId", - "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is in public preview.

", + "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is only supported by IoT Greengrass at this time. For more information, see Setting up IoT Greengrass core devices.

", "location":"querystring", "locationName":"namespaceId" } @@ -12312,7 +12312,7 @@ }, "namespaceId":{ "shape":"NamespaceId", - "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is in public preview.

" + "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is only supported by IoT Greengrass at this time. For more information, see Setting up IoT Greengrass core devices.

" }, "jobTemplateArn":{ "shape":"JobTemplateArn", @@ -13753,7 +13753,7 @@ }, "namespaceId":{ "shape":"NamespaceId", - "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is in public preview.

", + "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is only supported by IoT Greengrass at this time. For more information, see Setting up IoT Greengrass core devices.

", "location":"querystring", "locationName":"namespaceId" }, @@ -13861,7 +13861,7 @@ }, "namespaceId":{ "shape":"NamespaceId", - "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is in public preview.

", + "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is only supported by IoT Greengrass at this time. For more information, see Setting up IoT Greengrass core devices.

", "location":"querystring", "locationName":"namespaceId" } @@ -17515,8 +17515,8 @@ "documentation":"

The token used to get the next set of results, or null if there are no additional results.

" }, "maxResults":{ - "shape":"QueryMaxResults", - "documentation":"

The maximum number of results to return per page at one time. The response might contain fewer results but will never contain more.

" + "shape":"SearchQueryMaxResults", + "documentation":"

The maximum number of results to return per page at one time. This maximum number cannot exceed 100. The response might contain fewer results but will never contain more. You can use nextToken to retrieve the next set of results until nextToken returns NULL.

" }, "queryVersion":{ "shape":"QueryVersion", @@ -17541,6 +17541,10 @@ } } }, + "SearchQueryMaxResults":{ + "type":"integer", + "min":1 + }, "SearchableAttributes":{ "type":"list", "member":{"shape":"AttributeName"} @@ -20017,7 +20021,7 @@ }, "namespaceId":{ "shape":"NamespaceId", - "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is in public preview.

", + "documentation":"

The namespace used to indicate that a job is a customer-managed job.

When you specify a value for this parameter, Amazon Web Services IoT Core sends jobs notifications to MQTT topics that contain the value in the following format.

$aws/things/THING_NAME/jobs/JOB_ID/notify-namespace-NAMESPACE_ID/

The namespaceId feature is only supported by IoT Greengrass at this time. For more information, see Setting up IoT Greengrass core devices.

", "location":"querystring", "locationName":"namespaceId" }, diff --git a/botocore/data/wafv2/2019-07-29/service-2.json b/botocore/data/wafv2/2019-07-29/service-2.json index a75899c9d4..401c71b432 100644 --- a/botocore/data/wafv2/2019-07-29/service-2.json +++ b/botocore/data/wafv2/2019-07-29/service-2.json @@ -2379,6 +2379,7 @@ }, "ErrorMessage":{"type":"string"}, "ErrorReason":{"type":"string"}, + "EvaluationWindowSec":{"type":"long"}, "ExcludedRule":{ "type":"structure", "required":["Name"], @@ -4546,6 +4547,10 @@ "shape":"RateLimit", "documentation":"

The limit on requests per 5-minute period for a single aggregation instance for the rate-based rule. If the rate-based statement includes a ScopeDownStatement, this limit is applied only to the requests that match the statement.

Examples:

" }, + "EvaluationWindowSec":{ + "shape":"EvaluationWindowSec", + "documentation":"

The amount of time, in seconds, that WAF should include in its request counts, looking back from the current time. For example, for a setting of 120, when WAF checks the rate, it counts the requests for the 2 minutes immediately preceding the current time. Valid settings are 60, 120, 300, and 600.

This setting doesn't determine how often WAF checks the rate, but how far back it looks each time it checks. WAF checks the rate about every 10 seconds.

Default: 300 (5 minutes)

" + }, "AggregateKeyType":{ "shape":"RateBasedStatementAggregateKeyType", "documentation":"

Setting that indicates how to aggregate the request counts.

Web requests that are missing any of the components specified in the aggregation keys are omitted from the rate-based rule evaluation and handling.

" From 9aa9f073e4b1d6fd3d83767080bcdcf9c256c5ce Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Wed, 28 Feb 2024 19:05:30 +0000 Subject: [PATCH 2/2] Bumping version to 1.34.52 --- .changes/1.34.52.json | 32 +++++++++++++++++++ .../next-release/api-change-batch-53438.json | 5 --- .../api-change-bedrockagentruntime-83702.json | 5 --- .../next-release/api-change-ce-75342.json | 5 --- .../next-release/api-change-ec2-21286.json | 5 --- .../next-release/api-change-iot-4567.json | 5 --- .../next-release/api-change-wafv2-88300.json | 5 --- CHANGELOG.rst | 11 +++++++ botocore/__init__.py | 2 +- docs/source/conf.py | 2 +- 10 files changed, 45 insertions(+), 32 deletions(-) create mode 100644 .changes/1.34.52.json delete mode 100644 .changes/next-release/api-change-batch-53438.json delete mode 100644 .changes/next-release/api-change-bedrockagentruntime-83702.json delete mode 100644 .changes/next-release/api-change-ce-75342.json delete mode 100644 .changes/next-release/api-change-ec2-21286.json delete mode 100644 .changes/next-release/api-change-iot-4567.json delete mode 100644 .changes/next-release/api-change-wafv2-88300.json diff --git a/.changes/1.34.52.json b/.changes/1.34.52.json new file mode 100644 index 0000000000..ec83519201 --- /dev/null +++ b/.changes/1.34.52.json @@ -0,0 +1,32 @@ +[ + { + "category": "``batch``", + "description": "This release adds Batch support for configuration of multicontainer jobs in ECS, Fargate, and EKS. This support is available for all types of jobs, including both array jobs and multi-node parallel jobs.", + "type": "api-change" + }, + { + "category": "``bedrock-agent-runtime``", + "description": "This release adds support to override search strategy performed by the Retrieve and RetrieveAndGenerate APIs for Amazon Bedrock Agents", + "type": "api-change" + }, + { + "category": "``ce``", + "description": "This release introduces the new API 'GetApproximateUsageRecords', which retrieves estimated usage records for hourly granularity or resource-level data at daily granularity.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "This release increases the range of MaxResults for GetNetworkInsightsAccessScopeAnalysisFindings to 1,000.", + "type": "api-change" + }, + { + "category": "``iot``", + "description": "This release reduces the maximum results returned per query invocation from 500 to 100 for the SearchIndex API. This change has no implications as long as the API is invoked until the nextToken is NULL.", + "type": "api-change" + }, + { + "category": "``wafv2``", + "description": "AWS WAF now supports configurable time windows for request aggregation with rate-based rules. Customers can now select time windows of 1 minute, 2 minutes or 10 minutes, in addition to the previously supported 5 minutes.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/next-release/api-change-batch-53438.json b/.changes/next-release/api-change-batch-53438.json deleted file mode 100644 index a2b3f3a7fe..0000000000 --- a/.changes/next-release/api-change-batch-53438.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``batch``", - "description": "This release adds Batch support for configuration of multicontainer jobs in ECS, Fargate, and EKS. This support is available for all types of jobs, including both array jobs and multi-node parallel jobs." -} diff --git a/.changes/next-release/api-change-bedrockagentruntime-83702.json b/.changes/next-release/api-change-bedrockagentruntime-83702.json deleted file mode 100644 index 1f9873ad58..0000000000 --- a/.changes/next-release/api-change-bedrockagentruntime-83702.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``bedrock-agent-runtime``", - "description": "This release adds support to override search strategy performed by the Retrieve and RetrieveAndGenerate APIs for Amazon Bedrock Agents" -} diff --git a/.changes/next-release/api-change-ce-75342.json b/.changes/next-release/api-change-ce-75342.json deleted file mode 100644 index 03dd5495e3..0000000000 --- a/.changes/next-release/api-change-ce-75342.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``ce``", - "description": "This release introduces the new API 'GetApproximateUsageRecords', which retrieves estimated usage records for hourly granularity or resource-level data at daily granularity." -} diff --git a/.changes/next-release/api-change-ec2-21286.json b/.changes/next-release/api-change-ec2-21286.json deleted file mode 100644 index 5043a8d47e..0000000000 --- a/.changes/next-release/api-change-ec2-21286.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``ec2``", - "description": "This release increases the range of MaxResults for GetNetworkInsightsAccessScopeAnalysisFindings to 1,000." -} diff --git a/.changes/next-release/api-change-iot-4567.json b/.changes/next-release/api-change-iot-4567.json deleted file mode 100644 index 21acca1964..0000000000 --- a/.changes/next-release/api-change-iot-4567.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``iot``", - "description": "This release reduces the maximum results returned per query invocation from 500 to 100 for the SearchIndex API. This change has no implications as long as the API is invoked until the nextToken is NULL." -} diff --git a/.changes/next-release/api-change-wafv2-88300.json b/.changes/next-release/api-change-wafv2-88300.json deleted file mode 100644 index acf37e06c0..0000000000 --- a/.changes/next-release/api-change-wafv2-88300.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``wafv2``", - "description": "AWS WAF now supports configurable time windows for request aggregation with rate-based rules. Customers can now select time windows of 1 minute, 2 minutes or 10 minutes, in addition to the previously supported 5 minutes." -} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 69735ac374..b321762a07 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,17 @@ CHANGELOG ========= +1.34.52 +======= + +* api-change:``batch``: This release adds Batch support for configuration of multicontainer jobs in ECS, Fargate, and EKS. This support is available for all types of jobs, including both array jobs and multi-node parallel jobs. +* api-change:``bedrock-agent-runtime``: This release adds support to override search strategy performed by the Retrieve and RetrieveAndGenerate APIs for Amazon Bedrock Agents +* api-change:``ce``: This release introduces the new API 'GetApproximateUsageRecords', which retrieves estimated usage records for hourly granularity or resource-level data at daily granularity. +* api-change:``ec2``: This release increases the range of MaxResults for GetNetworkInsightsAccessScopeAnalysisFindings to 1,000. +* api-change:``iot``: This release reduces the maximum results returned per query invocation from 500 to 100 for the SearchIndex API. This change has no implications as long as the API is invoked until the nextToken is NULL. +* api-change:``wafv2``: AWS WAF now supports configurable time windows for request aggregation with rate-based rules. Customers can now select time windows of 1 minute, 2 minutes or 10 minutes, in addition to the previously supported 5 minutes. + + 1.34.51 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index e5ba536b29..8dd0eac89a 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.34.51' +__version__ = '1.34.52' class NullHandler(logging.Handler): diff --git a/docs/source/conf.py b/docs/source/conf.py index 5f44550853..5a35143587 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -59,7 +59,7 @@ # The short X.Y version. version = '1.34.' # The full version, including alpha/beta/rc tags. -release = '1.34.51' +release = '1.34.52' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.