From 28bd5fcb588bf2a5ef06a04c95acb83b7915939e Mon Sep 17 00:00:00 2001 From: awstools Date: Fri, 9 Feb 2024 19:14:04 +0000 Subject: [PATCH] feat(client-batch): This feature allows Batch to support configuration of repository credentials for jobs running on ECS --- .../src/commands/CancelJobCommand.ts | 2 +- .../CreateComputeEnvironmentCommand.ts | 4 +- .../commands/DescribeJobDefinitionsCommand.ts | 6 + .../src/commands/DescribeJobsCommand.ts | 6 + .../commands/RegisterJobDefinitionCommand.ts | 6 + clients/client-batch/src/models/models_0.ts | 202 ++++++++++++------ .../src/protocols/Aws_restJson1.ts | 5 + codegen/sdk-codegen/aws-models/batch.json | 197 ++++++++++++++--- 8 files changed, 326 insertions(+), 102 deletions(-) diff --git a/clients/client-batch/src/commands/CancelJobCommand.ts b/clients/client-batch/src/commands/CancelJobCommand.ts index b0a9ac23a0172..debdda3aac563 100644 --- a/clients/client-batch/src/commands/CancelJobCommand.ts +++ b/clients/client-batch/src/commands/CancelJobCommand.ts @@ -41,7 +41,7 @@ export interface CancelJobCommandOutput extends CancelJobResponse, __MetadataBea *

A PENDING job is canceled after all dependency jobs are completed. * Therefore, it may take longer than expected to cancel a job in PENDING * status.

- *

When you try to cancel an array parent job in PENDING, Batch attempts to + *

When you try to cancel an array parent job in PENDING, Batch attempts to * cancel all child jobs. The array parent job is canceled when all child jobs are * completed.

* diff --git a/clients/client-batch/src/commands/CreateComputeEnvironmentCommand.ts b/clients/client-batch/src/commands/CreateComputeEnvironmentCommand.ts index b29894f9527bf..abedd2e275a7e 100644 --- a/clients/client-batch/src/commands/CreateComputeEnvironmentCommand.ts +++ b/clients/client-batch/src/commands/CreateComputeEnvironmentCommand.ts @@ -95,8 +95,8 @@ export interface CreateComputeEnvironmentCommandOutput extends CreateComputeEnvi *

Don't specify an AMI ID in imageId, imageIdOverride (in * ec2Configuration * ), or in the launch - * template (launchTemplate). In that case, Batch selects the latest Amazon ECS optimized AMI that's - * supported by Batch at the time the infrastructure update is initiated. Alternatively, you can specify the AMI ID + * template (launchTemplate). In that case, Batch selects the latest Amazon ECS optimized AMI that's + * supported by Batch at the time the infrastructure update is initiated. Alternatively, you can specify the AMI ID * in the imageId or imageIdOverride parameters, or the launch template identified by the * LaunchTemplate properties. Changing any of these properties starts an infrastructure update. If the * AMI ID is specified in the launch template, it can't be replaced by specifying an AMI ID in either the diff --git a/clients/client-batch/src/commands/DescribeJobDefinitionsCommand.ts b/clients/client-batch/src/commands/DescribeJobDefinitionsCommand.ts index ae2a4ce3318d4..14869d7c8ea1c 100644 --- a/clients/client-batch/src/commands/DescribeJobDefinitionsCommand.ts +++ b/clients/client-batch/src/commands/DescribeJobDefinitionsCommand.ts @@ -182,6 +182,9 @@ export interface DescribeJobDefinitionsCommandOutput extends DescribeJobDefiniti * // operatingSystemFamily: "STRING_VALUE", * // cpuArchitecture: "STRING_VALUE", * // }, + * // repositoryCredentials: { // RepositoryCredentials + * // credentialsParameter: "STRING_VALUE", // required + * // }, * // }, * // timeout: { // JobTimeout * // attemptDurationSeconds: Number("int"), @@ -292,6 +295,9 @@ export interface DescribeJobDefinitionsCommandOutput extends DescribeJobDefiniti * // operatingSystemFamily: "STRING_VALUE", * // cpuArchitecture: "STRING_VALUE", * // }, + * // repositoryCredentials: { + * // credentialsParameter: "STRING_VALUE", // required + * // }, * // }, * // }, * // ], diff --git a/clients/client-batch/src/commands/DescribeJobsCommand.ts b/clients/client-batch/src/commands/DescribeJobsCommand.ts index 32c677f9b20cc..f5357f7a52104 100644 --- a/clients/client-batch/src/commands/DescribeJobsCommand.ts +++ b/clients/client-batch/src/commands/DescribeJobsCommand.ts @@ -222,6 +222,9 @@ export interface DescribeJobsCommandOutput extends DescribeJobsResponse, __Metad * // operatingSystemFamily: "STRING_VALUE", * // cpuArchitecture: "STRING_VALUE", * // }, + * // repositoryCredentials: { // RepositoryCredentials + * // credentialsParameter: "STRING_VALUE", // required + * // }, * // }, * // nodeDetails: { // NodeDetails * // nodeIndex: Number("int"), @@ -333,6 +336,9 @@ export interface DescribeJobsCommandOutput extends DescribeJobsResponse, __Metad * // operatingSystemFamily: "STRING_VALUE", * // cpuArchitecture: "STRING_VALUE", * // }, + * // repositoryCredentials: { + * // credentialsParameter: "STRING_VALUE", // required + * // }, * // }, * // }, * // ], diff --git a/clients/client-batch/src/commands/RegisterJobDefinitionCommand.ts b/clients/client-batch/src/commands/RegisterJobDefinitionCommand.ts index a7a7f14c7767d..dc098e6444859 100644 --- a/clients/client-batch/src/commands/RegisterJobDefinitionCommand.ts +++ b/clients/client-batch/src/commands/RegisterJobDefinitionCommand.ts @@ -154,6 +154,9 @@ export interface RegisterJobDefinitionCommandOutput extends RegisterJobDefinitio * operatingSystemFamily: "STRING_VALUE", * cpuArchitecture: "STRING_VALUE", * }, + * repositoryCredentials: { // RepositoryCredentials + * credentialsParameter: "STRING_VALUE", // required + * }, * }, * nodeProperties: { // NodeProperties * numNodes: Number("int"), // required @@ -261,6 +264,9 @@ export interface RegisterJobDefinitionCommandOutput extends RegisterJobDefinitio * operatingSystemFamily: "STRING_VALUE", * cpuArchitecture: "STRING_VALUE", * }, + * repositoryCredentials: { + * credentialsParameter: "STRING_VALUE", // required + * }, * }, * }, * ], diff --git a/clients/client-batch/src/models/models_0.ts b/clients/client-batch/src/models/models_0.ts index e7881e2180798..c654f27a3e7af 100644 --- a/clients/client-batch/src/models/models_0.ts +++ b/clients/client-batch/src/models/models_0.ts @@ -311,6 +311,15 @@ export interface Ec2Configuration { * (GPU): Default for all GPU instance families (for example P4 and * G4) and can be used for all non Amazon Web Services Graviton-based instance types.

* + *
ECS_AL2023
+ *
+ *

+ * Amazon Linux 2023: Batch + * supports Amazon Linux 2023.

+ * + *

Amazon Linux 2023 does not support A1 instances.

+ *
+ *
*
ECS_AL1
*
*

@@ -407,7 +416,7 @@ export interface LaunchTemplateSpecification { * isn't changed when the compute environment is updated. It's only changed if the * updateToLatestImageVersion parameter for the compute environment is set to * true. During an infrastructure update, if either $Latest or - * $Default is specified, Batch re-evaluates the launch template version, and it + * $Default is specified, Batch re-evaluates the launch template version, and it * might use a different version of the launch template. This is the case even if the launch * template isn't specified in the update. When updating a compute environment, changing the launch * template requires an infrastructure update of the compute environment. For more information, see @@ -497,10 +506,12 @@ export interface ComputeResource { * resources.

*
* - *

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED - * strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot - * Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. - * In this event, Batch never exceeds maxvCpus by more than a single instance.

+ *

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and + * SPOT_PRICE_CAPACITY_OPTIMIZED + * (recommended) strategies using On-Demand or Spot Instances, and the + * BEST_FIT strategy using Spot Instances, Batch might need to exceed + * maxvCpus to meet your capacity requirements. In this event, Batch never exceeds + * maxvCpus by more than a single instance.

*/ allocationStrategy?: CRAllocationStrategy; @@ -524,12 +535,12 @@ export interface ComputeResource { * compute environment can * support.

* - *

With BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED - * allocation strategies using On-Demand or Spot Instances, and the BEST_FIT strategy - * using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity - * requirements. In this event, Batch never exceeds maxvCpus by more than a single - * instance. For example, no more than a single instance from among those specified in your compute - * environment is allocated.

+ *

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and + * SPOT_PRICE_CAPACITY_OPTIMIZED + * (recommended) strategies using On-Demand or Spot Instances, and the + * BEST_FIT strategy using Spot Instances, Batch might need to exceed + * maxvCpus to meet your capacity requirements. In this event, Batch never exceeds + * maxvCpus by more than a single instance.

*
*/ maxvCpus: number | undefined; @@ -631,8 +642,8 @@ export interface ComputeResource { /** * @public - *

The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can - * specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, + *

The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. This parameter is required + * for Amazon EC2 instances types. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, * * ecsInstanceRole * or @@ -653,7 +664,7 @@ export interface ComputeResource { * \{ "Name": "Batch Instance - C4OnDemand" \}. This is helpful for recognizing your * Batch instances in the Amazon EC2 console. Updating these tags requires an infrastructure update to * the compute environment. For more information, see Updating compute environments in the - * Batch User Guide. These tags aren't seen when using the Batch + * Batch User Guide. These tags aren't seen when using the Batch * ListTagsForResource API operation.

* *

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

@@ -855,12 +866,12 @@ export interface CreateComputeEnvironmentRequest { /** * @public *

The full Amazon Resource Name (ARN) of the IAM role that allows Batch to make calls to other Amazon Web Services services on your behalf. For - * more information, see Batch service IAM + * more information, see Batch service IAM * role in the Batch User Guide.

* *

If your account already created the Batch service-linked role, that role is used by default for your compute - * environment unless you specify a different role here. If the Batch service-linked role doesn't exist in your - * account, and no role is specified here, the service attempts to create the Batch service-linked role in your + * environment unless you specify a different role here. If the Batch service-linked role doesn't exist in your + * account, and no role is specified here, the service attempts to create the Batch service-linked role in your * account.

*
*

If your specified role has a path other than /, then you must specify either the full role ARN @@ -869,7 +880,7 @@ export interface CreateComputeEnvironmentRequest { * and paths in the IAM User Guide.

* *

Depending on how you created your Batch service role, its ARN might contain the service-role - * path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the + * path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the * service-role path prefix. Because of this, we recommend that you specify the full ARN of your service * role when you create compute environments.

*
@@ -1095,7 +1106,7 @@ export interface FairsharePolicy { * ActiveFairShares * is the number of active fair share * identifiers.

- *

For example, a computeReservation value of 50 indicates that Batchreserves + *

For example, a computeReservation value of 50 indicates that Batch reserves * 50% of the maximum available vCPU if there's only one fair share identifier. It reserves 25% if * there are two fair share identifiers. It reserves 12.5% if there are three fair share * identifiers. A computeReservation value of 25 indicates that Batch should reserve @@ -1474,9 +1485,10 @@ export interface DescribeComputeEnvironmentsResponse { export interface DescribeJobDefinitionsRequest { /** * @public - *

A list of up to 100 job definitions. Each entry in the list can either be an ARN in the format - * arn:aws:batch:$\{Region\}:$\{Account\}:job-definition/$\{JobDefinitionName\}:$\{Revision\} or a short version - * using the form $\{JobDefinitionName\}:$\{Revision\}.

+ *

A list of up to 100 job definitions. Each entry in the list can either be an ARN in the + * format + * arn:aws:batch:$\{Region\}:$\{Account\}:job-definition/$\{JobDefinitionName\}:$\{Revision\} + * or a short version using the form $\{JobDefinitionName\}:$\{Revision\}. This parameter can't be used with other parameters.

*/ jobDefinitions?: string[]; @@ -1919,7 +1931,7 @@ export interface LogConfiguration { /** * @public *

Details for a Docker volume mount point that's used in a job's container properties. This - * parameter maps to Volumes in the Create a container section of the Docker Remote API and the + * parameter maps to Volumes in the Create a container section of the Docker Remote API and the * --volume option to docker run.

*/ export interface MountPoint { @@ -1960,6 +1972,18 @@ export interface NetworkConfiguration { assignPublicIp?: AssignPublicIp; } +/** + * @public + *

The repository credentials for private registry authentication.

+ */ +export interface RepositoryCredentials { + /** + * @public + *

The Amazon Resource Name (ARN) of the secret containing the private repository credentials.

+ */ + credentialsParameter: string | undefined; +} + /** * @public * @enum @@ -2156,55 +2180,72 @@ export interface ResourceRequirement { /** * @public - *

An object that represents the compute environment architecture for Batch jobs on Fargate.

+ *

+ * An object that represents the compute environment architecture for Batch jobs on Fargate. + *

*/ export interface RuntimePlatform { /** * @public *

The operating system for the compute environment. * Valid values are: - * LINUX (default), WINDOWS_SERVER_2019_CORE, - * WINDOWS_SERVER_2019_FULL, WINDOWS_SERVER_2022_CORE, and - * WINDOWS_SERVER_2022_FULL.

+ * LINUX (default), WINDOWS_SERVER_2019_CORE, + * WINDOWS_SERVER_2019_FULL, WINDOWS_SERVER_2022_CORE, and + * WINDOWS_SERVER_2022_FULL.

* *

The following parameters can’t be set for Windows containers: linuxParameters, - * privileged, user, ulimits, - * readonlyRootFilesystem, - * and efsVolumeConfiguration.

+ * privileged, user, ulimits, + * readonlyRootFilesystem, + * and efsVolumeConfiguration.

*
* - *

The Batch Scheduler checks before registering a task definition with Fargate. If the job - * requires a Windows container and the first compute environment is LINUX, the - * compute environment is skipped and the next is checked until a Windows-based compute environment - * is found.

+ *

The Batch Scheduler checks + * the compute environments + * that are attached to the job queue before registering a task definition with + * Fargate. In this + * scenario, the job queue is where the job is submitted. If the job requires a + * Windows container and the first compute environment is LINUX, the compute + * environment is skipped and the next compute environment is checked until a Windows-based compute + * environment is found.

*
* - *

Fargate Spot is not supported for Windows-based containers on Fargate. A job - * queue will be blocked if a Fargate Windows job is submitted to a job queue with only Fargate - * Spot compute environments. - * However, you can attach both FARGATE and FARGATE_SPOT - * compute environments to the same job - * queue.

+ *

Fargate Spot is not supported for + * ARM64 and + * Windows-based containers on Fargate. A job queue will be blocked if a + * Fargate + * ARM64 or + * Windows job is submitted to a job queue with only Fargate Spot compute environments. + * However, you can attach both FARGATE and + * FARGATE_SPOT compute environments to the same job queue.

*
*/ operatingSystemFamily?: string; /** * @public - *

The vCPU architecture. The default value is X86_64. Valid values are - * X86_64 and ARM64.

+ *

+ * The vCPU architecture. The default value is X86_64. Valid values are + * X86_64 and ARM64.

* *

This parameter must be set to * X86_64 * for Windows containers.

*
+ * + *

Fargate Spot is not supported for ARM64 and Windows-based containers on + * Fargate. A job queue will be blocked if a Fargate ARM64 or Windows job is + * submitted to a job queue with only Fargate Spot compute environments. However, you can attach + * both FARGATE and FARGATE_SPOT compute environments to the same job + * queue.

+ *
*/ cpuArchitecture?: string; } /** * @public - *

The ulimit settings to pass to the container.

+ *

The ulimit settings to pass to the container. For more information, see + * Ulimit.

* *

This object isn't applicable to jobs that are running on Fargate resources.

*
@@ -2212,13 +2253,16 @@ export interface RuntimePlatform { export interface Ulimit { /** * @public - *

The hard limit for the ulimit type.

+ *

The hard limit for the ulimit type.

*/ hardLimit: number | undefined; /** * @public - *

The type of the ulimit.

+ *

The type of the ulimit. Valid values are: core | cpu | + * data | fsize | locks | memlock | msgqueue | + * nice | nofile | nproc | rss | rtprio | + * rttime | sigpending | stack.

*/ name: string | undefined; @@ -2407,8 +2451,10 @@ export interface Volume { export interface ContainerProperties { /** * @public - *

The image used to start a container. This string is passed directly to the Docker daemon. - * Images in the Docker Hub registry are available by default. Other repositories are specified with + *

Required. + * The image used to start a container. This string is passed directly to the + * Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are + * specified with * * repository-url/image:tag * . @@ -2663,6 +2709,12 @@ export interface ContainerProperties { *

An object that represents the compute environment architecture for Batch jobs on Fargate.

*/ runtimePlatform?: RuntimePlatform; + + /** + * @public + *

The private repository authentication credentials to use.

+ */ + repositoryCredentials?: RepositoryCredentials; } /** @@ -3318,7 +3370,7 @@ export interface JobTimeout { /** * @public *

The job timeout time (in seconds) that's measured from the job attempt's - * startedAt timestamp. After this time passes, Batch terminates your jobs if they + * startedAt timestamp. After this time passes, Batch terminates your jobs if they * aren't finished. The minimum value for the timeout is 60 seconds.

*

For array jobs, the timeout applies to the child jobs, not to the parent array job.

*

For multi-node parallel (MNP) jobs, the timeout applies to the whole job, not to the @@ -3890,6 +3942,12 @@ export interface ContainerDetail { *

An object that represents the compute environment architecture for Batch jobs on Fargate.

*/ runtimePlatform?: RuntimePlatform; + + /** + * @public + *

The private repository authentication credentials to use.

+ */ + repositoryCredentials?: RepositoryCredentials; } /** @@ -4149,7 +4207,7 @@ export interface EksPodPropertiesDetail { *

Describes and uniquely identifies Kubernetes resources. For example, the compute environment * that a pod runs in or the jobID for a job running in the pod. For more information, * see Understanding Kubernetes Objects in the Kubernetes - * documentation.

+ * documentation.

*/ metadata?: EksMetadata; } @@ -4290,7 +4348,7 @@ export interface JobDetail { * @public *

The Unix timestamp (in milliseconds) for when the job was started. More specifically, it's * when the job transitioned from the STARTING state to the RUNNING state. - * This parameter isn't provided for child jobs of array jobs or multi-node parallel jobs.

+ *

*/ startedAt: number | undefined; @@ -5248,9 +5306,12 @@ export interface SubmitJobRequest { /** * @public - *

The scheduling priority for the job. This only affects jobs in job queues with a fair share policy. Jobs with a - * higher scheduling priority are scheduled before jobs with a lower scheduling priority. This overrides any scheduling - * priority in the job definition.

+ *

The scheduling priority for the job. This only affects jobs in job queues with a fair share + * policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower + * scheduling priority. + * This + * overrides any scheduling priority in the job definition and works only within a single share + * identifier.

*

The minimum supported value is 0 and the maximum supported value is 9999.

*/ schedulingPriorityOverride?: number; @@ -5335,7 +5396,7 @@ export interface SubmitJobRequest { /** * @public *

The timeout configuration for this SubmitJob operation. You can specify a timeout duration - * after which Batch terminates your jobs if they haven't finished. If a job is terminated due to a timeout, it isn't + * after which Batch terminates your jobs if they haven't finished. If a job is terminated due to a timeout, it isn't * retried. The minimum value for the timeout is 60 seconds. This configuration overrides any timeout configuration * specified in the job definition. For array jobs, child jobs have the same timeout configuration as the parent job. * For more information, see Job @@ -5494,12 +5555,12 @@ export interface ComputeResourceUpdate { * @public *

The maximum number of Amazon EC2 vCPUs that an environment can reach.

* - *

With BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, and SPOT_PRICE_CAPACITY_OPTIMIZED - * allocation strategies using On-Demand or Spot Instances, and the BEST_FIT strategy - * using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity - * requirements. In this event, Batch never exceeds maxvCpus by more than a single - * instance. That is, no more than a single instance from among those specified in your compute - * environment.

+ *

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and + * SPOT_PRICE_CAPACITY_OPTIMIZED + * (recommended) strategies using On-Demand or Spot Instances, and the + * BEST_FIT strategy using Spot Instances, Batch might need to exceed + * maxvCpus to meet your capacity requirements. In this event, Batch never exceeds + * maxvCpus by more than a single instance.

*
*/ maxvCpus?: number; @@ -5598,10 +5659,12 @@ export interface ComputeResourceUpdate { * resources.

* * - *

With both BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, and SPOT_PRICE_CAPACITY_OPTIMIZED - * strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot - * Instances, Batch might need to exceed maxvCpus to meet your capacity requirements. - * In this event, Batch never exceeds maxvCpus by more than a single instance.

+ *

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and + * SPOT_PRICE_CAPACITY_OPTIMIZED + * (recommended) strategies using On-Demand or Spot Instances, and the + * BEST_FIT strategy using Spot Instances, Batch might need to exceed + * maxvCpus to meet your capacity requirements. In this event, Batch never exceeds + * maxvCpus by more than a single instance.

*/ allocationStrategy?: CRUpdateAllocationStrategy; @@ -5647,9 +5710,10 @@ export interface ComputeResourceUpdate { /** * @public - *

The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can - * specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, - * + *

The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. + * Required for Amazon EC2 + * instances. You can specify the short name or full Amazon Resource Name (ARN) of an instance + * profile. For example, * ecsInstanceRole * or * arn:aws:iam:::instance-profile/ecsInstanceRole @@ -5865,7 +5929,7 @@ export interface UpdateComputeEnvironmentRequest { /** * @public *

The full Amazon Resource Name (ARN) of the IAM role that allows Batch to make calls to other Amazon Web Services services on your behalf. - * For more information, see Batch service IAM + * For more information, see Batch service IAM * role in the Batch User Guide.

* *

If the compute environment has a service-linked role, it can't be changed to use a regular IAM role. @@ -5878,7 +5942,7 @@ export interface UpdateComputeEnvironmentRequest { * (recommended) or prefix the role name with the path.

* *

Depending on how you created your Batch service role, its ARN might contain the service-role - * path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the + * path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the * service-role path prefix. Because of this, we recommend that you specify the full ARN of your service * role when you create compute environments.

*
diff --git a/clients/client-batch/src/protocols/Aws_restJson1.ts b/clients/client-batch/src/protocols/Aws_restJson1.ts index 5c012e569c1d8..0b9dec6dad52b 100644 --- a/clients/client-batch/src/protocols/Aws_restJson1.ts +++ b/clients/client-batch/src/protocols/Aws_restJson1.ts @@ -135,6 +135,7 @@ import { NodePropertyOverride, NodeRangeProperty, PlatformCapability, + RepositoryCredentials, ResourceRequirement, RetryStrategy, RuntimePlatform, @@ -1421,6 +1422,8 @@ const se_FairsharePolicy = (input: FairsharePolicy, context: __SerdeContext): an // se_PlatformCapabilityList omitted. +// se_RepositoryCredentials omitted. + // se_ResourceRequirement omitted. // se_ResourceRequirements omitted. @@ -1651,6 +1654,8 @@ const de_FairsharePolicy = (output: any, context: __SerdeContext): FairsharePoli // de_PlatformCapabilityList omitted. +// de_RepositoryCredentials omitted. + // de_ResourceRequirement omitted. // de_ResourceRequirements omitted. diff --git a/codegen/sdk-codegen/aws-models/batch.json b/codegen/sdk-codegen/aws-models/batch.json index 864ed812fcfe1..a8374a8db79b4 100644 --- a/codegen/sdk-codegen/aws-models/batch.json +++ b/codegen/sdk-codegen/aws-models/batch.json @@ -1515,7 +1515,7 @@ } ], "traits": { - "smithy.api#documentation": "

Cancels a job in an Batch job queue. Jobs that are in the\n SUBMITTED\n or\n PENDING\n are\n canceled. A job\n inRUNNABLE remains in RUNNABLE until it reaches the head of the\n job queue. Then the job status is updated to\n FAILED.

\n \n

A PENDING job is canceled after all dependency jobs are completed.\n Therefore, it may take longer than expected to cancel a job in PENDING\n status.

\n

When you try to cancel an array parent job in PENDING, Batch attempts to\n cancel all child jobs. The array parent job is canceled when all child jobs are\n completed.

\n
\n

Jobs that progressed to the STARTING or\n RUNNING state aren't canceled. However, the API operation still succeeds, even\n if no job is canceled. These jobs must be terminated with the TerminateJob\n operation.

", + "smithy.api#documentation": "

Cancels a job in an Batch job queue. Jobs that are in the\n SUBMITTED\n or\n PENDING\n are\n canceled. A job\n inRUNNABLE remains in RUNNABLE until it reaches the head of the\n job queue. Then the job status is updated to\n FAILED.

\n \n

A PENDING job is canceled after all dependency jobs are completed.\n Therefore, it may take longer than expected to cancel a job in PENDING\n status.

\n

When you try to cancel an array parent job in PENDING, Batch attempts to\n cancel all child jobs. The array parent job is canceled when all child jobs are\n completed.

\n
\n

Jobs that progressed to the STARTING or\n RUNNING state aren't canceled. However, the API operation still succeeds, even\n if no job is canceled. These jobs must be terminated with the TerminateJob\n operation.

", "smithy.api#examples": [ { "title": "To cancel a job", @@ -1731,7 +1731,7 @@ "allocationStrategy": { "target": "com.amazonaws.batch#CRAllocationStrategy", "traits": { - "smithy.api#documentation": "

The allocation strategy to use for the compute resource if not enough instances of the best\n fitting instance type can be allocated. This might be because of availability of the instance\n type in the Region or Amazon EC2 service limits. For more\n information, see Allocation strategies in the Batch User Guide.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
\n
\n
BEST_FIT (default)
\n
\n

Batch selects an instance type that best fits the needs of the jobs with a preference\n for the lowest-cost instance type. If additional instances of the selected instance type\n aren't available, Batch waits for the additional instances to be available. If there aren't\n enough instances available or the user is reaching Amazon EC2 service limits,\n additional jobs aren't run until the currently running jobs are completed. This allocation\n strategy keeps costs lower but can limit scaling. If you're using Spot Fleets with\n BEST_FIT, the Spot Fleet IAM Role must be specified. Compute resources that use\n a BEST_FIT allocation strategy don't support infrastructure updates and can't\n update some parameters. For more information, see Updating compute environments in\n the Batch User Guide.

\n
\n
BEST_FIT_PROGRESSIVE
\n
\n

Batch selects additional instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If\n additional instances of the previously selected instance types aren't available, Batch\n selects new instance types.

\n
\n
SPOT_CAPACITY_OPTIMIZED
\n
\n

Batch selects one or more instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types that are less likely to be\n interrupted. This allocation strategy is only available for Spot Instance compute\n resources.

\n
\n
SPOT_PRICE_CAPACITY_OPTIMIZED
\n
\n

The price and capacity optimized allocation strategy looks at both price and capacity to\n select the Spot Instance pools that are the least likely to be interrupted and have the lowest\n possible price. This allocation strategy is only available for Spot Instance compute\n resources.

\n
\n
\n

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED\n strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot\n Instances, Batch might need to exceed maxvCpus to meet your capacity requirements.\n In this event, Batch never exceeds maxvCpus by more than a single instance.

" + "smithy.api#documentation": "

The allocation strategy to use for the compute resource if not enough instances of the best\n fitting instance type can be allocated. This might be because of availability of the instance\n type in the Region or Amazon EC2 service limits. For more\n information, see Allocation strategies in the Batch User Guide.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
\n
\n
BEST_FIT (default)
\n
\n

Batch selects an instance type that best fits the needs of the jobs with a preference\n for the lowest-cost instance type. If additional instances of the selected instance type\n aren't available, Batch waits for the additional instances to be available. If there aren't\n enough instances available or the user is reaching Amazon EC2 service limits,\n additional jobs aren't run until the currently running jobs are completed. This allocation\n strategy keeps costs lower but can limit scaling. If you're using Spot Fleets with\n BEST_FIT, the Spot Fleet IAM Role must be specified. Compute resources that use\n a BEST_FIT allocation strategy don't support infrastructure updates and can't\n update some parameters. For more information, see Updating compute environments in\n the Batch User Guide.

\n
\n
BEST_FIT_PROGRESSIVE
\n
\n

Batch selects additional instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If\n additional instances of the previously selected instance types aren't available, Batch\n selects new instance types.

\n
\n
SPOT_CAPACITY_OPTIMIZED
\n
\n

Batch selects one or more instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types that are less likely to be\n interrupted. This allocation strategy is only available for Spot Instance compute\n resources.

\n
\n
SPOT_PRICE_CAPACITY_OPTIMIZED
\n
\n

The price and capacity optimized allocation strategy looks at both price and capacity to\n select the Spot Instance pools that are the least likely to be interrupted and have the lowest\n possible price. This allocation strategy is only available for Spot Instance compute\n resources.

\n
\n
\n

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and\n SPOT_PRICE_CAPACITY_OPTIMIZED\n (recommended) strategies using On-Demand or Spot Instances, and the\n BEST_FIT strategy using Spot Instances, Batch might need to exceed\n maxvCpus to meet your capacity requirements. In this event, Batch never exceeds\n maxvCpus by more than a single instance.

" } }, "minvCpus": { @@ -1744,7 +1744,7 @@ "target": "com.amazonaws.batch#Integer", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The maximum number of\n vCPUs that a\n compute environment can\n support.

\n \n

With BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED and SPOT_PRICE_CAPACITY_OPTIMIZED\n allocation strategies using On-Demand or Spot Instances, and the BEST_FIT strategy\n using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity\n requirements. In this event, Batch never exceeds maxvCpus by more than a single\n instance. For example, no more than a single instance from among those specified in your compute\n environment is allocated.

\n
", + "smithy.api#documentation": "

The maximum number of\n vCPUs that a\n compute environment can\n support.

\n \n

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and\n SPOT_PRICE_CAPACITY_OPTIMIZED\n (recommended) strategies using On-Demand or Spot Instances, and the\n BEST_FIT strategy using Spot Instances, Batch might need to exceed\n maxvCpus to meet your capacity requirements. In this event, Batch never exceeds\n maxvCpus by more than a single instance.

\n
", "smithy.api#required": {} } }, @@ -1792,13 +1792,13 @@ "instanceRole": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "

The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can\n specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example,\n \n ecsInstanceRole\n or\n arn:aws:iam:::instance-profile/ecsInstanceRole\n .\n For more information, see Amazon ECS instance role in the Batch User Guide.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
" + "smithy.api#documentation": "

The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. This parameter is required \n for Amazon EC2 instances types. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example,\n \n ecsInstanceRole\n or\n arn:aws:iam:::instance-profile/ecsInstanceRole\n .\n For more information, see Amazon ECS instance role in the Batch User Guide.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
" } }, "tags": { "target": "com.amazonaws.batch#TagsMap", "traits": { - "smithy.api#documentation": "

Key-value pair tags to be applied to EC2 resources that are launched in the compute\n environment. For Batch, these take the form of \"String1\": \"String2\", where\n String1 is the tag key and String2 is the tag value-for example,\n { \"Name\": \"Batch Instance - C4OnDemand\" }. This is helpful for recognizing your\n Batch instances in the Amazon EC2 console. Updating these tags requires an infrastructure update to\n the compute environment. For more information, see Updating compute environments in the\n Batch User Guide. These tags aren't seen when using the Batch\n ListTagsForResource API operation.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
" + "smithy.api#documentation": "

Key-value pair tags to be applied to EC2 resources that are launched in the compute\n environment. For Batch, these take the form of \"String1\": \"String2\", where\n String1 is the tag key and String2 is the tag value-for example,\n { \"Name\": \"Batch Instance - C4OnDemand\" }. This is helpful for recognizing your\n Batch instances in the Amazon EC2 console. Updating these tags requires an infrastructure update to\n the compute environment. For more information, see Updating compute environments in the\n Batch User Guide. These tags aren't seen when using the Batch\n ListTagsForResource API operation.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
" } }, "placementGroup": { @@ -1848,7 +1848,7 @@ "maxvCpus": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

The maximum number of Amazon EC2 vCPUs that an environment can reach.

\n \n

With BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, and SPOT_PRICE_CAPACITY_OPTIMIZED\n allocation strategies using On-Demand or Spot Instances, and the BEST_FIT strategy\n using Spot Instances, Batch might need to exceed maxvCpus to meet your capacity\n requirements. In this event, Batch never exceeds maxvCpus by more than a single\n instance. That is, no more than a single instance from among those specified in your compute\n environment.

\n
" + "smithy.api#documentation": "

The maximum number of Amazon EC2 vCPUs that an environment can reach.

\n \n

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and\n SPOT_PRICE_CAPACITY_OPTIMIZED\n (recommended) strategies using On-Demand or Spot Instances, and the\n BEST_FIT strategy using Spot Instances, Batch might need to exceed\n maxvCpus to meet your capacity requirements. In this event, Batch never exceeds\n maxvCpus by more than a single instance.

\n
" } }, "desiredvCpus": { @@ -1872,7 +1872,7 @@ "allocationStrategy": { "target": "com.amazonaws.batch#CRUpdateAllocationStrategy", "traits": { - "smithy.api#documentation": "

The allocation strategy to use for the compute resource if there's not enough instances of\n the best fitting instance type that can be allocated. This might be because of availability of\n the instance type in the Region or Amazon EC2 service limits. For more\n information, see Allocation strategies in the Batch User Guide.

\n

When updating a compute environment, changing the allocation strategy requires an\n infrastructure update of the compute environment. For more information, see Updating compute\n environments in the Batch User Guide. BEST_FIT isn't\n supported when updating a compute environment.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
\n
\n
BEST_FIT_PROGRESSIVE
\n
\n

Batch selects additional instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If\n additional instances of the previously selected instance types aren't available, Batch\n selects new instance types.

\n
\n
SPOT_CAPACITY_OPTIMIZED
\n
\n

Batch selects one or more instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types that are less likely to be\n interrupted. This allocation strategy is only available for Spot Instance compute\n resources.

\n
\n
SPOT_PRICE_CAPACITY_OPTIMIZED
\n
\n

The price and capacity optimized allocation strategy looks at both price and capacity to\n select the Spot Instance pools that are the least likely to be interrupted and have the lowest\n possible price. This allocation strategy is only available for Spot Instance compute\n resources.

\n
\n
\n

With both BEST_FIT_PROGRESSIVE, SPOT_CAPACITY_OPTIMIZED, and SPOT_PRICE_CAPACITY_OPTIMIZED\n strategies using On-Demand or Spot Instances, and the BEST_FIT strategy using Spot\n Instances, Batch might need to exceed maxvCpus to meet your capacity requirements.\n In this event, Batch never exceeds maxvCpus by more than a single instance.

" + "smithy.api#documentation": "

The allocation strategy to use for the compute resource if there's not enough instances of\n the best fitting instance type that can be allocated. This might be because of availability of\n the instance type in the Region or Amazon EC2 service limits. For more\n information, see Allocation strategies in the Batch User Guide.

\n

When updating a compute environment, changing the allocation strategy requires an\n infrastructure update of the compute environment. For more information, see Updating compute\n environments in the Batch User Guide. BEST_FIT isn't\n supported when updating a compute environment.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
\n
\n
BEST_FIT_PROGRESSIVE
\n
\n

Batch selects additional instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If\n additional instances of the previously selected instance types aren't available, Batch\n selects new instance types.

\n
\n
SPOT_CAPACITY_OPTIMIZED
\n
\n

Batch selects one or more instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types that are less likely to be\n interrupted. This allocation strategy is only available for Spot Instance compute\n resources.

\n
\n
SPOT_PRICE_CAPACITY_OPTIMIZED
\n
\n

The price and capacity optimized allocation strategy looks at both price and capacity to\n select the Spot Instance pools that are the least likely to be interrupted and have the lowest\n possible price. This allocation strategy is only available for Spot Instance compute\n resources.

\n
\n
\n

With BEST_FIT_PROGRESSIVE,SPOT_CAPACITY_OPTIMIZED and\n SPOT_PRICE_CAPACITY_OPTIMIZED\n (recommended) strategies using On-Demand or Spot Instances, and the\n BEST_FIT strategy using Spot Instances, Batch might need to exceed\n maxvCpus to meet your capacity requirements. In this event, Batch never exceeds\n maxvCpus by more than a single instance.

" } }, "instanceTypes": { @@ -1890,7 +1890,7 @@ "instanceRole": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "

The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can\n specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example,\n \n ecsInstanceRole\n or\n arn:aws:iam:::instance-profile/ecsInstanceRole\n .\n For more information, see Amazon ECS instance role in the Batch User Guide.

\n

When updating a compute environment, changing this setting requires an infrastructure update\n of the compute environment. For more information, see Updating compute environments in the\n Batch User Guide.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
" + "smithy.api#documentation": "

The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment.\n Required for Amazon EC2\n instances. You can specify the short name or full Amazon Resource Name (ARN) of an instance\n profile. For example, \n ecsInstanceRole\n or\n arn:aws:iam:::instance-profile/ecsInstanceRole\n .\n For more information, see Amazon ECS instance role in the Batch User Guide.

\n

When updating a compute environment, changing this setting requires an infrastructure update\n of the compute environment. For more information, see Updating compute environments in the\n Batch User Guide.

\n \n

This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.

\n
" } }, "tags": { @@ -2112,7 +2112,16 @@ } }, "runtimePlatform": { - "target": "com.amazonaws.batch#RuntimePlatform" + "target": "com.amazonaws.batch#RuntimePlatform", + "traits": { + "smithy.api#documentation": "

An object that represents the compute environment architecture for Batch jobs on Fargate.

" + } + }, + "repositoryCredentials": { + "target": "com.amazonaws.batch#RepositoryCredentials", + "traits": { + "smithy.api#documentation": "

The private repository authentication credentials to use.

" + } } }, "traits": { @@ -2175,7 +2184,7 @@ "image": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "

The image used to start a container. This string is passed directly to the Docker daemon.\n Images in the Docker Hub registry are available by default. Other repositories are specified with\n \n repository-url/image:tag\n .\n It can be 255 characters long. It can contain uppercase and lowercase letters, numbers,\n hyphens (-), underscores (_), colons (:), periods (.), forward slashes (/), and number signs (#). This parameter maps to Image in the\n Create a container section of the Docker Remote API and the IMAGE\n parameter of docker run.

\n \n

Docker image architecture must match the processor architecture of the compute resources\n that they're scheduled on. For example, ARM-based Docker images can only run on ARM-based\n compute resources.

\n
\n
    \n
  • \n

    Images in Amazon ECR Public repositories use the full registry/repository[:tag] or\n registry/repository[@digest] naming conventions. For example,\n public.ecr.aws/registry_alias/my-web-app:latest\n .

    \n
  • \n
  • \n

    Images in Amazon ECR repositories use the full registry and repository URI (for example,\n 123456789012.dkr.ecr..amazonaws.com/).

    \n
  • \n
  • \n

    Images in official repositories on Docker Hub use a single name (for example,\n ubuntu or mongo).

    \n
  • \n
  • \n

    Images in other repositories on Docker Hub are qualified with an organization name (for\n example, amazon/amazon-ecs-agent).

    \n
  • \n
  • \n

    Images in other online repositories are qualified further by a domain name (for example,\n quay.io/assemblyline/ubuntu).

    \n
  • \n
" + "smithy.api#documentation": "

Required.\n The image used to start a container. This string is passed directly to the\n Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are\n specified with\n \n repository-url/image:tag\n .\n It can be 255 characters long. It can contain uppercase and lowercase letters, numbers,\n hyphens (-), underscores (_), colons (:), periods (.), forward slashes (/), and number signs (#). This parameter maps to Image in the\n Create a container section of the Docker Remote API and the IMAGE\n parameter of docker run.

\n \n

Docker image architecture must match the processor architecture of the compute resources\n that they're scheduled on. For example, ARM-based Docker images can only run on ARM-based\n compute resources.

\n
\n
    \n
  • \n

    Images in Amazon ECR Public repositories use the full registry/repository[:tag] or\n registry/repository[@digest] naming conventions. For example,\n public.ecr.aws/registry_alias/my-web-app:latest\n .

    \n
  • \n
  • \n

    Images in Amazon ECR repositories use the full registry and repository URI (for example,\n 123456789012.dkr.ecr..amazonaws.com/).

    \n
  • \n
  • \n

    Images in official repositories on Docker Hub use a single name (for example,\n ubuntu or mongo).

    \n
  • \n
  • \n

    Images in other repositories on Docker Hub are qualified with an organization name (for\n example, amazon/amazon-ecs-agent).

    \n
  • \n
  • \n

    Images in other online repositories are qualified further by a domain name (for example,\n quay.io/assemblyline/ubuntu).

    \n
  • \n
" } }, "vcpus": { @@ -2305,7 +2314,16 @@ } }, "runtimePlatform": { - "target": "com.amazonaws.batch#RuntimePlatform" + "target": "com.amazonaws.batch#RuntimePlatform", + "traits": { + "smithy.api#documentation": "

An object that represents the compute environment architecture for Batch jobs on Fargate.

" + } + }, + "repositoryCredentials": { + "target": "com.amazonaws.batch#RepositoryCredentials", + "traits": { + "smithy.api#documentation": "

The private repository authentication credentials to use.

" + } } }, "traits": { @@ -2349,7 +2367,7 @@ } ], "traits": { - "smithy.api#documentation": "

Creates an Batch compute environment. You can create MANAGED or UNMANAGED compute\n environments. MANAGED compute environments can use Amazon EC2 or Fargate resources.\n UNMANAGED compute environments can only use EC2 resources.

\n

In a managed compute environment, Batch manages the capacity and instance types of the compute resources\n within the environment. This is based on the compute resource specification that you define or the launch template that you\n specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot\n Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can\n optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a\n specified percentage of the On-Demand price.

\n \n

Multi-node parallel jobs aren't supported on Spot Instances.

\n
\n

In an unmanaged compute environment, you can manage your own EC2 compute resources and have flexibility with how\n you configure your compute resources. For example, you can use custom AMIs. However, you must verify that each of\n your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the\n Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch\n your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the\n Amazon Elastic Container Service Developer Guide.

\n \n

To create a compute environment that uses EKS resources, the caller must have permissions to call\n eks:DescribeCluster.

\n
\n \n

Batch doesn't automatically upgrade the AMIs in a compute environment after it's created. For example, it\n also doesn't update the AMIs in your compute environment when a newer version of the Amazon ECS optimized AMI is\n available. You're responsible for the management of the guest operating system. This includes any updates and\n security patches. You're also responsible for any additional application software or utilities that you install on\n the compute resources. There are two ways to use a new AMI for your Batch jobs. The original method is to complete\n these steps:

\n
    \n
  1. \n

    Create a new compute environment with the new AMI.

    \n
  2. \n
  3. \n

    Add the compute environment to an existing job queue.

    \n
  4. \n
  5. \n

    Remove the earlier compute environment from your job queue.

    \n
  6. \n
  7. \n

    Delete the earlier compute environment.

    \n
  8. \n
\n

In April 2022, Batch added enhanced support for updating compute environments. For more information, see\n Updating compute\n environments. To use the enhanced updating of compute environments to update AMIs, follow these\n rules:

\n
    \n
  • \n

    Either don't set the service role (serviceRole) parameter or set it to the AWSBatchServiceRole service-linked role.

    \n
  • \n
  • \n

    Set the allocation strategy (allocationStrategy) parameter to BEST_FIT_PROGRESSIVE, \n SPOT_CAPACITY_OPTIMIZED, or SPOT_PRICE_CAPACITY_OPTIMIZED.

    \n
  • \n
  • \n

    Set the update to latest image version (updateToLatestImageVersion)\n parameter to\n true.\n The updateToLatestImageVersion parameter is used when you update a compute\n environment. This parameter is ignored when you create a compute\n environment.

    \n
  • \n
  • \n

    Don't specify an AMI ID in imageId, imageIdOverride (in \n ec2Configuration\n ), or in the launch\n template (launchTemplate). In that case, Batch selects the latest Amazon ECS optimized AMI that's\n supported by Batch at the time the infrastructure update is initiated. Alternatively, you can specify the AMI ID\n in the imageId or imageIdOverride parameters, or the launch template identified by the\n LaunchTemplate properties. Changing any of these properties starts an infrastructure update. If the\n AMI ID is specified in the launch template, it can't be replaced by specifying an AMI ID in either the\n imageId or imageIdOverride parameters. It can only be replaced by specifying a different\n launch template, or if the launch template version is set to $Default or $Latest, by\n setting either a new default version for the launch template (if $Default) or by adding a new version\n to the launch template (if $Latest).

    \n
  • \n
\n

If these rules are followed, any update that starts an infrastructure update causes the AMI ID to be\n re-selected. If the version setting in the launch template (launchTemplate) is set to\n $Latest or $Default, the latest or default version of the launch template is evaluated up\n at the time of the infrastructure update, even if the launchTemplate wasn't updated.

\n
", + "smithy.api#documentation": "

Creates an Batch compute environment. You can create MANAGED or UNMANAGED compute\n environments. MANAGED compute environments can use Amazon EC2 or Fargate resources.\n UNMANAGED compute environments can only use EC2 resources.

\n

In a managed compute environment, Batch manages the capacity and instance types of the compute resources\n within the environment. This is based on the compute resource specification that you define or the launch template that you\n specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot\n Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can\n optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a\n specified percentage of the On-Demand price.

\n \n

Multi-node parallel jobs aren't supported on Spot Instances.

\n
\n

In an unmanaged compute environment, you can manage your own EC2 compute resources and have flexibility with how\n you configure your compute resources. For example, you can use custom AMIs. However, you must verify that each of\n your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the\n Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch\n your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the\n Amazon Elastic Container Service Developer Guide.

\n \n

To create a compute environment that uses EKS resources, the caller must have permissions to call\n eks:DescribeCluster.

\n
\n \n

Batch doesn't automatically upgrade the AMIs in a compute environment after it's created. For example, it\n also doesn't update the AMIs in your compute environment when a newer version of the Amazon ECS optimized AMI is\n available. You're responsible for the management of the guest operating system. This includes any updates and\n security patches. You're also responsible for any additional application software or utilities that you install on\n the compute resources. There are two ways to use a new AMI for your Batch jobs. The original method is to complete\n these steps:

\n
    \n
  1. \n

    Create a new compute environment with the new AMI.

    \n
  2. \n
  3. \n

    Add the compute environment to an existing job queue.

    \n
  4. \n
  5. \n

    Remove the earlier compute environment from your job queue.

    \n
  6. \n
  7. \n

    Delete the earlier compute environment.

    \n
  8. \n
\n

In April 2022, Batch added enhanced support for updating compute environments. For more information, see\n Updating compute\n environments. To use the enhanced updating of compute environments to update AMIs, follow these\n rules:

\n
    \n
  • \n

    Either don't set the service role (serviceRole) parameter or set it to the AWSBatchServiceRole service-linked role.

    \n
  • \n
  • \n

    Set the allocation strategy (allocationStrategy) parameter to BEST_FIT_PROGRESSIVE, \n SPOT_CAPACITY_OPTIMIZED, or SPOT_PRICE_CAPACITY_OPTIMIZED.

    \n
  • \n
  • \n

    Set the update to latest image version (updateToLatestImageVersion)\n parameter to\n true.\n The updateToLatestImageVersion parameter is used when you update a compute\n environment. This parameter is ignored when you create a compute\n environment.

    \n
  • \n
  • \n

    Don't specify an AMI ID in imageId, imageIdOverride (in \n ec2Configuration\n ), or in the launch\n template (launchTemplate). In that case, Batch selects the latest Amazon ECS optimized AMI that's\n supported by Batch at the time the infrastructure update is initiated. Alternatively, you can specify the AMI ID\n in the imageId or imageIdOverride parameters, or the launch template identified by the\n LaunchTemplate properties. Changing any of these properties starts an infrastructure update. If the\n AMI ID is specified in the launch template, it can't be replaced by specifying an AMI ID in either the\n imageId or imageIdOverride parameters. It can only be replaced by specifying a different\n launch template, or if the launch template version is set to $Default or $Latest, by\n setting either a new default version for the launch template (if $Default) or by adding a new version\n to the launch template (if $Latest).

    \n
  • \n
\n

If these rules are followed, any update that starts an infrastructure update causes the AMI ID to be\n re-selected. If the version setting in the launch template (launchTemplate) is set to\n $Latest or $Default, the latest or default version of the launch template is evaluated up\n at the time of the infrastructure update, even if the launchTemplate wasn't updated.

\n
", "smithy.api#examples": [ { "title": "To create a managed EC2 compute environment", @@ -2378,6 +2396,36 @@ "computeEnvironmentName": "C4OnDemand", "computeEnvironmentArn": "arn:aws:batch:us-east-1:012345678910:compute-environment/C4OnDemand" } + }, + { + "title": "To create a managed EC2 Spot compute environment", + "documentation": "This example creates a managed compute environment with the M4 instance type that is launched when the Spot bid price is at or below 20% of the On-Demand price for the instance type. The compute environment is called M4Spot.", + "input": { + "computeEnvironmentName": "M4Spot", + "state": "ENABLED", + "type": "MANAGED", + "computeResources": { + "subnets": ["subnet-220c0e0a", "subnet-1a95556d", "subnet-978f6dce"], + "type": "SPOT", + "spotIamFleetRole": "arn:aws:iam::012345678910:role/aws-ec2-spot-fleet-role", + "tags": { + "Name": "Batch Instance - M4Spot" + }, + "desiredvCpus": 4, + "minvCpus": 0, + "instanceTypes": ["m4"], + "securityGroupIds": ["sg-cf5093b2"], + "instanceRole": "ecsInstanceRole", + "maxvCpus": 128, + "bidPercentage": 20, + "ec2KeyPair": "id_rsa" + }, + "serviceRole": "arn:aws:iam::012345678910:role/AWSBatchServiceRole" + }, + "output": { + "computeEnvironmentName": "M4Spot", + "computeEnvironmentArn": "arn:aws:batch:us-east-1:012345678910:compute-environment/M4Spot" + } } ], "smithy.api#http": { @@ -2427,7 +2475,7 @@ "serviceRole": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "

The full Amazon Resource Name (ARN) of the IAM role that allows Batch to make calls to other Amazon Web Services services on your behalf. For\n more information, see Batch service IAM\n role in the Batch User Guide.

\n \n

If your account already created the Batch service-linked role, that role is used by default for your compute\n environment unless you specify a different role here. If the Batch service-linked role doesn't exist in your\n account, and no role is specified here, the service attempts to create the Batch service-linked role in your\n account.

\n
\n

If your specified role has a path other than /, then you must specify either the full role ARN\n (recommended) or prefix the role name with the path. For example, if a role with the name bar has a path\n of /foo/, specify /foo/bar as the role name. For more information, see Friendly names\n and paths in the IAM User Guide.

\n \n

Depending on how you created your Batch service role, its ARN might contain the service-role\n path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the\n service-role path prefix. Because of this, we recommend that you specify the full ARN of your service\n role when you create compute environments.

\n
" + "smithy.api#documentation": "

The full Amazon Resource Name (ARN) of the IAM role that allows Batch to make calls to other Amazon Web Services services on your behalf. For\n more information, see Batch service IAM\n role in the Batch User Guide.

\n \n

If your account already created the Batch service-linked role, that role is used by default for your compute\n environment unless you specify a different role here. If the Batch service-linked role doesn't exist in your\n account, and no role is specified here, the service attempts to create the Batch service-linked role in your\n account.

\n
\n

If your specified role has a path other than /, then you must specify either the full role ARN\n (recommended) or prefix the role name with the path. For example, if a role with the name bar has a path\n of /foo/, specify /foo/bar as the role name. For more information, see Friendly names\n and paths in the IAM User Guide.

\n \n

Depending on how you created your Batch service role, its ARN might contain the service-role\n path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the\n service-role path prefix. Because of this, we recommend that you specify the full ARN of your service\n role when you create compute environments.

\n
" } }, "tags": { @@ -2505,6 +2553,29 @@ "jobQueueName": "LowPriority", "jobQueueArn": "arn:aws:batch:us-east-1:012345678910:job-queue/LowPriority" } + }, + { + "title": "To create a job queue with multiple compute environments", + "documentation": "This example creates a job queue called HighPriority that uses the C4OnDemand compute environment with an order of 1 and the M4Spot compute environment with an order of 2.", + "input": { + "priority": 10, + "state": "ENABLED", + "computeEnvironmentOrder": [ + { + "computeEnvironment": "C4OnDemand", + "order": 1 + }, + { + "computeEnvironment": "M4Spot", + "order": 2 + } + ], + "jobQueueName": "HighPriority" + }, + "output": { + "jobQueueName": "HighPriority", + "jobQueueArn": "arn:aws:batch:us-east-1:012345678910:job-queue/HighPriority" + } } ], "smithy.api#http": { @@ -3082,7 +3153,7 @@ "jobDefinitions": { "target": "com.amazonaws.batch#StringList", "traits": { - "smithy.api#documentation": "

A list of up to 100 job definitions. Each entry in the list can either be an ARN in the format\n arn:aws:batch:${Region}:${Account}:job-definition/${JobDefinitionName}:${Revision} or a short version\n using the form ${JobDefinitionName}:${Revision}.

" + "smithy.api#documentation": "

A list of up to 100 job definitions. Each entry in the list can either be an ARN in the\n format\n arn:aws:batch:${Region}:${Account}:job-definition/${JobDefinitionName}:${Revision}\n or a short version using the form ${JobDefinitionName}:${Revision}. This parameter can't be used with other parameters.

" } }, "maxResults": { @@ -3553,7 +3624,7 @@ "target": "com.amazonaws.batch#ImageType", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The image type to match with the instance type to select an AMI. The supported values are\n different for ECS and EKS resources.

\n
\n
ECS
\n
\n

If the imageIdOverride parameter isn't specified, then a recent Amazon ECS-optimized Amazon Linux 2 AMI (ECS_AL2) is used. If a new image type is\n specified in an update, but neither an imageId nor a imageIdOverride\n parameter is specified, then the latest Amazon ECS optimized AMI for that image type that's\n supported by Batch is used.

\n
\n
ECS_AL2
\n
\n

\n Amazon Linux\n 2: Default for all non-GPU instance families.

\n
\n
ECS_AL2_NVIDIA
\n
\n

\n Amazon Linux 2\n (GPU): Default for all GPU instance families (for example P4 and\n G4) and can be used for all non Amazon Web Services Graviton-based instance types.

\n
\n
ECS_AL1
\n
\n

\n Amazon Linux. Amazon Linux has\n reached the end-of-life of standard support. For more information, see Amazon Linux AMI.

\n
\n
\n
\n
EKS
\n
\n

If the imageIdOverride parameter isn't specified, then a recent Amazon EKS-optimized Amazon Linux\n AMI (EKS_AL2) is used. If a new image type is specified in an update,\n but neither an imageId nor a imageIdOverride parameter is specified,\n then the latest Amazon EKS optimized AMI for that image type that Batch supports is used.

\n
\n
EKS_AL2
\n
\n

\n Amazon\n Linux 2: Default for all non-GPU instance families.

\n
\n
EKS_AL2_NVIDIA
\n
\n

\n Amazon\n Linux 2 (accelerated): Default for all GPU instance families (for example,\n P4 and G4) and can be used for all non Amazon Web Services Graviton-based\n instance types.

\n
\n
\n
\n
", + "smithy.api#documentation": "

The image type to match with the instance type to select an AMI. The supported values are\n different for ECS and EKS resources.

\n
\n
ECS
\n
\n

If the imageIdOverride parameter isn't specified, then a recent Amazon ECS-optimized Amazon Linux 2 AMI (ECS_AL2) is used. If a new image type is\n specified in an update, but neither an imageId nor a imageIdOverride\n parameter is specified, then the latest Amazon ECS optimized AMI for that image type that's\n supported by Batch is used.

\n
\n
ECS_AL2
\n
\n

\n Amazon Linux\n 2: Default for all non-GPU instance families.

\n
\n
ECS_AL2_NVIDIA
\n
\n

\n Amazon Linux 2\n (GPU): Default for all GPU instance families (for example P4 and\n G4) and can be used for all non Amazon Web Services Graviton-based instance types.

\n
\n
ECS_AL2023
\n
\n

\n Amazon Linux 2023: Batch\n supports Amazon Linux 2023.

\n \n

Amazon Linux 2023 does not support A1 instances.

\n
\n
\n
ECS_AL1
\n
\n

\n Amazon Linux. Amazon Linux has\n reached the end-of-life of standard support. For more information, see Amazon Linux AMI.

\n
\n
\n
\n
EKS
\n
\n

If the imageIdOverride parameter isn't specified, then a recent Amazon EKS-optimized Amazon Linux\n AMI (EKS_AL2) is used. If a new image type is specified in an update,\n but neither an imageId nor a imageIdOverride parameter is specified,\n then the latest Amazon EKS optimized AMI for that image type that Batch supports is used.

\n
\n
EKS_AL2
\n
\n

\n Amazon\n Linux 2: Default for all non-GPU instance families.

\n
\n
EKS_AL2_NVIDIA
\n
\n

\n Amazon\n Linux 2 (accelerated): Default for all GPU instance families (for example,\n P4 and G4) and can be used for all non Amazon Web Services Graviton-based\n instance types.

\n
\n
\n
\n
", "smithy.api#required": {} } }, @@ -4148,7 +4219,10 @@ } }, "metadata": { - "target": "com.amazonaws.batch#EksMetadata" + "target": "com.amazonaws.batch#EksMetadata", + "traits": { + "smithy.api#documentation": "

Describes and uniquely identifies Kubernetes resources. For example, the compute environment\n that a pod runs in or the jobID for a job running in the pod. For more information,\n see Understanding Kubernetes Objects in the Kubernetes\n documentation.

" + } } }, "traits": { @@ -4362,7 +4436,7 @@ "computeReservation": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

A value used to reserve some of the available maximum vCPU for fair share identifiers that\n aren't already used.

\n

The reserved ratio is\n (computeReservation/100)^ActiveFairShares\n \n where \n ActiveFairShares\n is the number of active fair share\n identifiers.

\n

For example, a computeReservation value of 50 indicates that Batchreserves\n 50% of the maximum available vCPU if there's only one fair share identifier. It reserves 25% if\n there are two fair share identifiers. It reserves 12.5% if there are three fair share\n identifiers. A computeReservation value of 25 indicates that Batch should reserve\n 25% of the maximum available vCPU if there's only one fair share identifier, 6.25% if there are\n two fair share identifiers, and 1.56% if there are three fair share identifiers.

\n

The minimum value is 0 and the maximum value is 99.

" + "smithy.api#documentation": "

A value used to reserve some of the available maximum vCPU for fair share identifiers that\n aren't already used.

\n

The reserved ratio is\n (computeReservation/100)^ActiveFairShares\n \n where \n ActiveFairShares\n is the number of active fair share\n identifiers.

\n

For example, a computeReservation value of 50 indicates that Batch reserves\n 50% of the maximum available vCPU if there's only one fair share identifier. It reserves 25% if\n there are two fair share identifiers. It reserves 12.5% if there are three fair share\n identifiers. A computeReservation value of 25 indicates that Batch should reserve\n 25% of the maximum available vCPU if there's only one fair share identifier, 6.25% if there are\n two fair share identifiers, and 1.56% if there are three fair share identifiers.

\n

The minimum value is 0 and the maximum value is 99.

" } }, "shareDistribution": { @@ -4728,7 +4802,7 @@ "target": "com.amazonaws.batch#Long", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The Unix timestamp (in milliseconds) for when the job was started. More specifically, it's\n when the job transitioned from the STARTING state to the RUNNING state.\n This parameter isn't provided for child jobs of array jobs or multi-node parallel jobs.

", + "smithy.api#documentation": "

The Unix timestamp (in milliseconds) for when the job was started. More specifically, it's\n when the job transitioned from the STARTING state to the RUNNING state.\n

", "smithy.api#required": {} } }, @@ -5071,7 +5145,7 @@ "attemptDurationSeconds": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

The job timeout time (in seconds) that's measured from the job attempt's\n startedAt timestamp. After this time passes, Batch terminates your jobs if they\n aren't finished. The minimum value for the timeout is 60 seconds.

\n

For array jobs, the timeout applies to the child jobs, not to the parent array job.

\n

For multi-node parallel (MNP) jobs, the timeout applies to the whole job, not to the\n individual nodes.

" + "smithy.api#documentation": "

The job timeout time (in seconds) that's measured from the job attempt's\n startedAt timestamp. After this time passes, Batch terminates your jobs if they\n aren't finished. The minimum value for the timeout is 60 seconds.

\n

For array jobs, the timeout applies to the child jobs, not to the parent array job.

\n

For multi-node parallel (MNP) jobs, the timeout applies to the whole job, not to the\n individual nodes.

" } } }, @@ -5146,7 +5220,7 @@ "version": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "

The version number of the launch template, $Latest, or\n $Default.

\n

If the value is $Latest, the latest version of the launch template is used. If\n the value is $Default, the default version of the launch template is used.

\n \n

If the AMI ID that's used in a compute environment is from the launch template, the AMI\n isn't changed when the compute environment is updated. It's only changed if the\n updateToLatestImageVersion parameter for the compute environment is set to\n true. During an infrastructure update, if either $Latest or\n $Default is specified, Batch re-evaluates the launch template version, and it\n might use a different version of the launch template. This is the case even if the launch\n template isn't specified in the update. When updating a compute environment, changing the launch\n template requires an infrastructure update of the compute environment. For more information, see\n Updating compute\n environments in the Batch User Guide.

\n
\n

Default: $Default.

" + "smithy.api#documentation": "

The version number of the launch template, $Latest, or\n $Default.

\n

If the value is $Latest, the latest version of the launch template is used. If\n the value is $Default, the default version of the launch template is used.

\n \n

If the AMI ID that's used in a compute environment is from the launch template, the AMI\n isn't changed when the compute environment is updated. It's only changed if the\n updateToLatestImageVersion parameter for the compute environment is set to\n true. During an infrastructure update, if either $Latest or\n $Default is specified, Batch re-evaluates the launch template version, and it\n might use a different version of the launch template. This is the case even if the launch\n template isn't specified in the update. When updating a compute environment, changing the launch\n template requires an infrastructure update of the compute environment. For more information, see\n Updating compute\n environments in the Batch User Guide.

\n
\n

Default: $Default.

" } } }, @@ -5231,6 +5305,22 @@ } ] } + }, + { + "title": "To list submitted jobs", + "documentation": "This example lists jobs in the HighPriority job queue that are in the SUBMITTED job status.", + "input": { + "jobQueue": "HighPriority", + "jobStatus": "SUBMITTED" + }, + "output": { + "jobSummaryList": [ + { + "jobId": "68f0c163-fbd4-44e6-9fd1-25b14a434786", + "jobName": "example" + } + ] + } } ], "smithy.api#http": { @@ -5579,7 +5669,7 @@ } }, "traits": { - "smithy.api#documentation": "

Details for a Docker volume mount point that's used in a job's container properties. This\n parameter maps to Volumes in the Create a container section of the Docker Remote API and the\n --volume option to docker run.

" + "smithy.api#documentation": "

Details for a Docker volume mount point that's used in a job's container properties. This\n parameter maps to Volumes in the Create a container section of the Docker Remote API and the\n --volume option to docker run.

" } }, "com.amazonaws.batch#MountPoints": { @@ -5891,6 +5981,37 @@ "jobDefinitionArn": "arn:aws:batch:us-east-1:012345678910:job-definition/sleep10:1", "revision": 1 } + }, + { + "title": "RegisterJobDefinition with tags", + "documentation": "This demonstrates calling the RegisterJobDefinition action, including tags.", + "input": { + "jobDefinitionName": "sleep30", + "type": "container", + "containerProperties": { + "image": "busybox", + "command": ["sleep", "30"], + "resourceRequirements": [ + { + "type": "MEMORY", + "value": "128" + }, + { + "type": "VCPU", + "value": "1" + } + ] + }, + "tags": { + "Department": "Engineering", + "User": "JaneDoe" + } + }, + "output": { + "jobDefinitionName": "sleep30", + "jobDefinitionArn": "arn:aws:batch:us-east-1:012345678910:job-definition/sleep30:1", + "revision": 1 + } } ], "smithy.api#http": { @@ -6017,6 +6138,22 @@ "smithy.api#output": {} } }, + "com.amazonaws.batch#RepositoryCredentials": { + "type": "structure", + "members": { + "credentialsParameter": { + "target": "com.amazonaws.batch#String", + "traits": { + "smithy.api#clientOptional": {}, + "smithy.api#documentation": "

The Amazon Resource Name (ARN) of the secret containing the private repository credentials.

", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

The repository credentials for private registry authentication.

" + } + }, "com.amazonaws.batch#ResourceRequirement": { "type": "structure", "members": { @@ -6113,18 +6250,18 @@ "operatingSystemFamily": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "

The operating system for the compute environment.\n Valid values are:\n LINUX (default), WINDOWS_SERVER_2019_CORE,\n WINDOWS_SERVER_2019_FULL, WINDOWS_SERVER_2022_CORE, and\n WINDOWS_SERVER_2022_FULL.

\n \n

The following parameters can’t be set for Windows containers: linuxParameters,\n privileged, user, ulimits,\n readonlyRootFilesystem,\n and efsVolumeConfiguration.

\n
\n \n

The Batch Scheduler checks before registering a task definition with Fargate. If the job\n requires a Windows container and the first compute environment is LINUX, the\n compute environment is skipped and the next is checked until a Windows-based compute environment\n is found.

\n
\n \n

Fargate Spot is not supported for Windows-based containers on Fargate. A job\n queue will be blocked if a Fargate Windows job is submitted to a job queue with only Fargate\n Spot compute environments.\n However, you can attach both FARGATE and FARGATE_SPOT\n compute environments to the same job\n queue.

\n
" + "smithy.api#documentation": "

The operating system for the compute environment.\n Valid values are:\n LINUX (default), WINDOWS_SERVER_2019_CORE,\n WINDOWS_SERVER_2019_FULL, WINDOWS_SERVER_2022_CORE, and\n WINDOWS_SERVER_2022_FULL.

\n \n

The following parameters can’t be set for Windows containers: linuxParameters,\n privileged, user, ulimits,\n readonlyRootFilesystem,\n and efsVolumeConfiguration.

\n
\n \n

The Batch Scheduler checks\n the compute environments\n that are attached to the job queue before registering a task definition with\n Fargate. In this\n scenario, the job queue is where the job is submitted. If the job requires a\n Windows container and the first compute environment is LINUX, the compute\n environment is skipped and the next compute environment is checked until a Windows-based compute\n environment is found.

\n
\n \n

Fargate Spot is not supported for\n ARM64 and\n Windows-based containers on Fargate. A job queue will be blocked if a\n Fargate\n ARM64 or\n Windows job is submitted to a job queue with only Fargate Spot compute environments.\n However, you can attach both FARGATE and\n FARGATE_SPOT compute environments to the same job queue.

\n
" } }, "cpuArchitecture": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "

The vCPU architecture. The default value is X86_64. Valid values are\n X86_64 and ARM64.

\n \n

This parameter must be set to\n X86_64\n for Windows containers.

\n
" + "smithy.api#documentation": "

\n The vCPU architecture. The default value is X86_64. Valid values are\n X86_64 and ARM64.

\n \n

This parameter must be set to\n X86_64\n for Windows containers.

\n
\n \n

Fargate Spot is not supported for ARM64 and Windows-based containers on\n Fargate. A job queue will be blocked if a Fargate ARM64 or Windows job is\n submitted to a job queue with only Fargate Spot compute environments. However, you can attach\n both FARGATE and FARGATE_SPOT compute environments to the same job\n queue.

\n
" } } }, "traits": { - "smithy.api#documentation": "

An object that represents the compute environment architecture for Batch jobs on Fargate.

" + "smithy.api#documentation": "

\n An object that represents the compute environment architecture for Batch jobs on Fargate.\n

" } }, "com.amazonaws.batch#SchedulingPolicyDetail": { @@ -6339,7 +6476,7 @@ "schedulingPriorityOverride": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "

The scheduling priority for the job. This only affects jobs in job queues with a fair share policy. Jobs with a\n higher scheduling priority are scheduled before jobs with a lower scheduling priority. This overrides any scheduling\n priority in the job definition.

\n

The minimum supported value is 0 and the maximum supported value is 9999.

" + "smithy.api#documentation": "

The scheduling priority for the job. This only affects jobs in job queues with a fair share\n policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower\n scheduling priority.\n This\n overrides any scheduling priority in the job definition and works only within a single share\n identifier.

\n

The minimum supported value is 0 and the maximum supported value is 9999.

" } }, "arrayProperties": { @@ -6395,7 +6532,7 @@ "timeout": { "target": "com.amazonaws.batch#JobTimeout", "traits": { - "smithy.api#documentation": "

The timeout configuration for this SubmitJob operation. You can specify a timeout duration\n after which Batch terminates your jobs if they haven't finished. If a job is terminated due to a timeout, it isn't\n retried. The minimum value for the timeout is 60 seconds. This configuration overrides any timeout configuration\n specified in the job definition. For array jobs, child jobs have the same timeout configuration as the parent job.\n For more information, see Job\n Timeouts in the Amazon Elastic Container Service Developer Guide.

" + "smithy.api#documentation": "

The timeout configuration for this SubmitJob operation. You can specify a timeout duration\n after which Batch terminates your jobs if they haven't finished. If a job is terminated due to a timeout, it isn't\n retried. The minimum value for the timeout is 60 seconds. This configuration overrides any timeout configuration\n specified in the job definition. For array jobs, child jobs have the same timeout configuration as the parent job.\n For more information, see Job\n Timeouts in the Amazon Elastic Container Service Developer Guide.

" } }, "tags": { @@ -6681,7 +6818,7 @@ "target": "com.amazonaws.batch#Integer", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The hard limit for the ulimit type.

", + "smithy.api#documentation": "

The hard limit for the ulimit type.

", "smithy.api#required": {} } }, @@ -6689,7 +6826,7 @@ "target": "com.amazonaws.batch#String", "traits": { "smithy.api#clientOptional": {}, - "smithy.api#documentation": "

The type of the ulimit.

", + "smithy.api#documentation": "

The type of the ulimit. Valid values are: core | cpu | \n data | fsize | locks | memlock | msgqueue | \n nice | nofile | nproc | rss | rtprio | \n rttime | sigpending | stack.

", "smithy.api#required": {} } }, @@ -6703,7 +6840,7 @@ } }, "traits": { - "smithy.api#documentation": "

The ulimit settings to pass to the container.

\n \n

This object isn't applicable to jobs that are running on Fargate resources.

\n
" + "smithy.api#documentation": "

The ulimit settings to pass to the container. For more information, see \n Ulimit.

\n \n

This object isn't applicable to jobs that are running on Fargate resources.

\n
" } }, "com.amazonaws.batch#Ulimits": { @@ -6852,7 +6989,7 @@ "serviceRole": { "target": "com.amazonaws.batch#String", "traits": { - "smithy.api#documentation": "

The full Amazon Resource Name (ARN) of the IAM role that allows Batch to make calls to other Amazon Web Services services on your behalf.\n For more information, see Batch service IAM\n role in the Batch User Guide.

\n \n

If the compute environment has a service-linked role, it can't be changed to use a regular IAM role.\n Likewise, if the compute environment has a regular IAM role, it can't be changed to use a service-linked role. To\n update the parameters for the compute environment that require an infrastructure update to change, the AWSServiceRoleForBatch service-linked role must be used. For more information, see\n Updating compute\n environments in the Batch User Guide.

\n
\n

If your specified role has a path other than /, then you must either specify the full role ARN\n (recommended) or prefix the role name with the path.

\n \n

Depending on how you created your Batch service role, its ARN might contain the service-role\n path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the\n service-role path prefix. Because of this, we recommend that you specify the full ARN of your service\n role when you create compute environments.

\n
" + "smithy.api#documentation": "

The full Amazon Resource Name (ARN) of the IAM role that allows Batch to make calls to other Amazon Web Services services on your behalf.\n For more information, see Batch service IAM\n role in the Batch User Guide.

\n \n

If the compute environment has a service-linked role, it can't be changed to use a regular IAM role.\n Likewise, if the compute environment has a regular IAM role, it can't be changed to use a service-linked role. To\n update the parameters for the compute environment that require an infrastructure update to change, the AWSServiceRoleForBatch service-linked role must be used. For more information, see\n Updating compute\n environments in the Batch User Guide.

\n
\n

If your specified role has a path other than /, then you must either specify the full role ARN\n (recommended) or prefix the role name with the path.

\n \n

Depending on how you created your Batch service role, its ARN might contain the service-role\n path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the\n service-role path prefix. Because of this, we recommend that you specify the full ARN of your service\n role when you create compute environments.

\n
" } }, "updatePolicy": {