From 28bd5fcb588bf2a5ef06a04c95acb83b7915939e Mon Sep 17 00:00:00 2001
From: awstools A When you try to cancel an array parent job in When you try to cancel an array parent job in Don't specify an AMI ID in
+ * Amazon Linux 2023: Batch
+ * supports Amazon Linux 2023. Amazon Linux 2023 does not support
@@ -407,7 +416,7 @@ export interface LaunchTemplateSpecification {
* isn't changed when the compute environment is updated. It's only changed if the
* With With PENDING
job is canceled after all dependency jobs are completed.
* Therefore, it may take longer than expected to cancel a job in PENDING
* status.PENDING
, Batch attempts to
+ * PENDING
, Batch attempts to
* cancel all child jobs. The array parent job is canceled when all child jobs are
* completed.imageId
, imageIdOverride
(in
* ec2Configuration
* ), or in the launch
- * template (launchTemplate
). In that case, Batch selects the latest Amazon ECS optimized AMI that's
- * supported by Batch at the time the infrastructure update is initiated. Alternatively, you can specify the AMI ID
+ * template (launchTemplate
). In that case, Batch selects the latest Amazon ECS optimized AMI that's
+ * supported by Batch at the time the infrastructure update is initiated. Alternatively, you can specify the AMI ID
* in the imageId
or imageIdOverride
parameters, or the launch template identified by the
* LaunchTemplate
properties. Changing any of these properties starts an infrastructure update. If the
* AMI ID is specified in the launch template, it can't be replaced by specifying an AMI ID in either the
diff --git a/clients/client-batch/src/commands/DescribeJobDefinitionsCommand.ts b/clients/client-batch/src/commands/DescribeJobDefinitionsCommand.ts
index ae2a4ce3318d4..14869d7c8ea1c 100644
--- a/clients/client-batch/src/commands/DescribeJobDefinitionsCommand.ts
+++ b/clients/client-batch/src/commands/DescribeJobDefinitionsCommand.ts
@@ -182,6 +182,9 @@ export interface DescribeJobDefinitionsCommandOutput extends DescribeJobDefiniti
* // operatingSystemFamily: "STRING_VALUE",
* // cpuArchitecture: "STRING_VALUE",
* // },
+ * // repositoryCredentials: { // RepositoryCredentials
+ * // credentialsParameter: "STRING_VALUE", // required
+ * // },
* // },
* // timeout: { // JobTimeout
* // attemptDurationSeconds: Number("int"),
@@ -292,6 +295,9 @@ export interface DescribeJobDefinitionsCommandOutput extends DescribeJobDefiniti
* // operatingSystemFamily: "STRING_VALUE",
* // cpuArchitecture: "STRING_VALUE",
* // },
+ * // repositoryCredentials: {
+ * // credentialsParameter: "STRING_VALUE", // required
+ * // },
* // },
* // },
* // ],
diff --git a/clients/client-batch/src/commands/DescribeJobsCommand.ts b/clients/client-batch/src/commands/DescribeJobsCommand.ts
index 32c677f9b20cc..f5357f7a52104 100644
--- a/clients/client-batch/src/commands/DescribeJobsCommand.ts
+++ b/clients/client-batch/src/commands/DescribeJobsCommand.ts
@@ -222,6 +222,9 @@ export interface DescribeJobsCommandOutput extends DescribeJobsResponse, __Metad
* // operatingSystemFamily: "STRING_VALUE",
* // cpuArchitecture: "STRING_VALUE",
* // },
+ * // repositoryCredentials: { // RepositoryCredentials
+ * // credentialsParameter: "STRING_VALUE", // required
+ * // },
* // },
* // nodeDetails: { // NodeDetails
* // nodeIndex: Number("int"),
@@ -333,6 +336,9 @@ export interface DescribeJobsCommandOutput extends DescribeJobsResponse, __Metad
* // operatingSystemFamily: "STRING_VALUE",
* // cpuArchitecture: "STRING_VALUE",
* // },
+ * // repositoryCredentials: {
+ * // credentialsParameter: "STRING_VALUE", // required
+ * // },
* // },
* // },
* // ],
diff --git a/clients/client-batch/src/commands/RegisterJobDefinitionCommand.ts b/clients/client-batch/src/commands/RegisterJobDefinitionCommand.ts
index a7a7f14c7767d..dc098e6444859 100644
--- a/clients/client-batch/src/commands/RegisterJobDefinitionCommand.ts
+++ b/clients/client-batch/src/commands/RegisterJobDefinitionCommand.ts
@@ -154,6 +154,9 @@ export interface RegisterJobDefinitionCommandOutput extends RegisterJobDefinitio
* operatingSystemFamily: "STRING_VALUE",
* cpuArchitecture: "STRING_VALUE",
* },
+ * repositoryCredentials: { // RepositoryCredentials
+ * credentialsParameter: "STRING_VALUE", // required
+ * },
* },
* nodeProperties: { // NodeProperties
* numNodes: Number("int"), // required
@@ -261,6 +264,9 @@ export interface RegisterJobDefinitionCommandOutput extends RegisterJobDefinitio
* operatingSystemFamily: "STRING_VALUE",
* cpuArchitecture: "STRING_VALUE",
* },
+ * repositoryCredentials: {
+ * credentialsParameter: "STRING_VALUE", // required
+ * },
* },
* },
* ],
diff --git a/clients/client-batch/src/models/models_0.ts b/clients/client-batch/src/models/models_0.ts
index e7881e2180798..c654f27a3e7af 100644
--- a/clients/client-batch/src/models/models_0.ts
+++ b/clients/client-batch/src/models/models_0.ts
@@ -311,6 +311,15 @@ export interface Ec2Configuration {
* (GPU): Default for all GPU instance families (for example P4
and
* G4
) and can be used for all non Amazon Web Services Graviton-based instance types.A1
instances.updateToLatestImageVersion
parameter for the compute environment is set to
* true
. During an infrastructure update, if either $Latest
or
- * $Default
is specified, Batch re-evaluates the launch template version, and it
+ * $Default
is specified, Batch re-evaluates the launch template version, and it
* might use a different version of the launch template. This is the case even if the launch
* template isn't specified in the update. When updating a compute environment, changing the launch
* template requires an infrastructure update of the compute environment. For more information, see
@@ -497,10 +506,12 @@ export interface ComputeResource {
* resources.BEST_FIT_PROGRESSIVE
,SPOT_CAPACITY_OPTIMIZED
and SPOT_PRICE_CAPACITY_OPTIMIZED
- * strategies using On-Demand or Spot Instances, and the BEST_FIT
strategy using Spot
- * Instances, Batch might need to exceed maxvCpus
to meet your capacity requirements.
- * In this event, Batch never exceeds maxvCpus
by more than a single instance.BEST_FIT_PROGRESSIVE
,SPOT_CAPACITY_OPTIMIZED
and
+ * SPOT_PRICE_CAPACITY_OPTIMIZED
+ * (recommended) strategies using On-Demand or Spot Instances, and the
+ * BEST_FIT
strategy using Spot Instances, Batch might need to exceed
+ * maxvCpus
to meet your capacity requirements. In this event, Batch never exceeds
+ * maxvCpus
by more than a single instance.
With BEST_FIT_PROGRESSIVE
, SPOT_CAPACITY_OPTIMIZED
and SPOT_PRICE_CAPACITY_OPTIMIZED
- * allocation strategies using On-Demand or Spot Instances, and the BEST_FIT
strategy
- * using Spot Instances, Batch might need to exceed maxvCpus
to meet your capacity
- * requirements. In this event, Batch never exceeds maxvCpus
by more than a single
- * instance. For example, no more than a single instance from among those specified in your compute
- * environment is allocated.
With BEST_FIT_PROGRESSIVE
,SPOT_CAPACITY_OPTIMIZED
and
+ * SPOT_PRICE_CAPACITY_OPTIMIZED
+ * (recommended) strategies using On-Demand or Spot Instances, and the
+ * BEST_FIT
strategy using Spot Instances, Batch might need to exceed
+ * maxvCpus
to meet your capacity requirements. In this event, Batch never exceeds
+ * maxvCpus
by more than a single instance.
The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can - * specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example, + *
The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. This parameter is required
+ * for Amazon EC2 instances types. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example,
*
* ecsInstanceRole
*
or
@@ -653,7 +664,7 @@ export interface ComputeResource {
* \{ "Name": "Batch Instance - C4OnDemand" \}
. This is helpful for recognizing your
* Batch instances in the Amazon EC2 console. Updating these tags requires an infrastructure update to
* the compute environment. For more information, see Updating compute environments in the
- * Batch User Guide. These tags aren't seen when using the Batch
+ * Batch User Guide. These tags aren't seen when using the Batch
* ListTagsForResource
API operation.
This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.
@@ -855,12 +866,12 @@ export interface CreateComputeEnvironmentRequest { /** * @public *The full Amazon Resource Name (ARN) of the IAM role that allows Batch to make calls to other Amazon Web Services services on your behalf. For - * more information, see Batch service IAM + * more information, see Batch service IAM * role in the Batch User Guide.
*If your account already created the Batch service-linked role, that role is used by default for your compute - * environment unless you specify a different role here. If the Batch service-linked role doesn't exist in your - * account, and no role is specified here, the service attempts to create the Batch service-linked role in your + * environment unless you specify a different role here. If the Batch service-linked role doesn't exist in your + * account, and no role is specified here, the service attempts to create the Batch service-linked role in your * account.
*If your specified role has a path other than /
, then you must specify either the full role ARN
@@ -869,7 +880,7 @@ export interface CreateComputeEnvironmentRequest {
* and paths in the IAM User Guide.
Depending on how you created your Batch service role, its ARN might contain the service-role
- * path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the
+ * path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the
* service-role
path prefix. Because of this, we recommend that you specify the full ARN of your service
* role when you create compute environments.
For example, a computeReservation
value of 50 indicates that Batchreserves
+ *
For example, a computeReservation
value of 50 indicates that Batch reserves
* 50% of the maximum available vCPU if there's only one fair share identifier. It reserves 25% if
* there are two fair share identifiers. It reserves 12.5% if there are three fair share
* identifiers. A computeReservation
value of 25 indicates that Batch should reserve
@@ -1474,9 +1485,10 @@ export interface DescribeComputeEnvironmentsResponse {
export interface DescribeJobDefinitionsRequest {
/**
* @public
- *
A list of up to 100 job definitions. Each entry in the list can either be an ARN in the format
- * arn:aws:batch:$\{Region\}:$\{Account\}:job-definition/$\{JobDefinitionName\}:$\{Revision\}
or a short version
- * using the form $\{JobDefinitionName\}:$\{Revision\}
.
A list of up to 100 job definitions. Each entry in the list can either be an ARN in the
+ * format
+ * arn:aws:batch:$\{Region\}:$\{Account\}:job-definition/$\{JobDefinitionName\}:$\{Revision\}
+ * or a short version using the form $\{JobDefinitionName\}:$\{Revision\}
. This parameter can't be used with other parameters.
Details for a Docker volume mount point that's used in a job's container properties. This
- * parameter maps to Volumes
in the Create a container section of the Docker Remote API and the
+ * parameter maps to Volumes
in the Create a container section of the Docker Remote API and the
* --volume
option to docker run.
The repository credentials for private registry authentication.
+ */ +export interface RepositoryCredentials { + /** + * @public + *The Amazon Resource Name (ARN) of the secret containing the private repository credentials.
+ */ + credentialsParameter: string | undefined; +} + /** * @public * @enum @@ -2156,55 +2180,72 @@ export interface ResourceRequirement { /** * @public - *An object that represents the compute environment architecture for Batch jobs on Fargate.
+ *+ * An object that represents the compute environment architecture for Batch jobs on Fargate. + *
*/ export interface RuntimePlatform { /** * @public *The operating system for the compute environment.
* Valid values are:
- * LINUX
(default), WINDOWS_SERVER_2019_CORE
,
- * WINDOWS_SERVER_2019_FULL
, WINDOWS_SERVER_2022_CORE
, and
- * WINDOWS_SERVER_2022_FULL
.
LINUX
(default), WINDOWS_SERVER_2019_CORE
,
+ * WINDOWS_SERVER_2019_FULL
, WINDOWS_SERVER_2022_CORE
, and
+ * WINDOWS_SERVER_2022_FULL
.
* The following parameters can’t be set for Windows containers: linuxParameters
,
- * privileged
, user
, ulimits
,
- * readonlyRootFilesystem
,
- * and efsVolumeConfiguration
.
privileged
, user
, ulimits
,
+ * readonlyRootFilesystem
,
+ * and efsVolumeConfiguration
.
* The Batch Scheduler checks before registering a task definition with Fargate. If the job
- * requires a Windows container and the first compute environment is LINUX
, the
- * compute environment is skipped and the next is checked until a Windows-based compute environment
- * is found.
The Batch Scheduler checks
+ * the compute environments
+ * that are attached to the job queue before registering a task definition with
+ * Fargate. In this
+ * scenario, the job queue is where the job is submitted. If the job requires a
+ * Windows container and the first compute environment is LINUX
, the compute
+ * environment is skipped and the next compute environment is checked until a Windows-based compute
+ * environment is found.
Fargate Spot is not supported for Windows-based containers on Fargate. A job
- * queue will be blocked if a Fargate Windows job is submitted to a job queue with only Fargate
- * Spot compute environments.
- * However, you can attach both FARGATE
and FARGATE_SPOT
- * compute environments to the same job
- * queue.
Fargate Spot is not supported for
+ * ARM64
and
+ * Windows-based containers on Fargate. A job queue will be blocked if a
+ * Fargate
+ * ARM64
or
+ * Windows job is submitted to a job queue with only Fargate Spot compute environments.
+ * However, you can attach both FARGATE
and
+ * FARGATE_SPOT
compute environments to the same job queue.
The vCPU architecture. The default value is X86_64
. Valid values are
- * X86_64
and ARM64
.
+ * The vCPU architecture. The default value is X86_64
. Valid values are
+ * X86_64
and ARM64
.
This parameter must be set to
* X86_64
* for Windows containers.
Fargate Spot is not supported for ARM64
and Windows-based containers on
+ * Fargate. A job queue will be blocked if a Fargate ARM64
or Windows job is
+ * submitted to a job queue with only Fargate Spot compute environments. However, you can attach
+ * both FARGATE
and FARGATE_SPOT
compute environments to the same job
+ * queue.
The ulimit
settings to pass to the container.
The ulimit
settings to pass to the container. For more information, see
+ * Ulimit.
This object isn't applicable to jobs that are running on Fargate resources.
*The hard limit for the ulimit
type.
The hard limit for the ulimit
type.
The type
of the ulimit
.
The type
of the ulimit
. Valid values are: core
| cpu
|
+ * data
| fsize
| locks
| memlock
| msgqueue
|
+ * nice
| nofile
| nproc
| rss
| rtprio
|
+ * rttime
| sigpending
| stack
.
The image used to start a container. This string is passed directly to the Docker daemon. - * Images in the Docker Hub registry are available by default. Other repositories are specified with + *
Required.
+ * The image used to start a container. This string is passed directly to the
+ * Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are
+ * specified with
*
* repository-url/image:tag
*
.
@@ -2663,6 +2709,12 @@ export interface ContainerProperties {
*
An object that represents the compute environment architecture for Batch jobs on Fargate.
*/ runtimePlatform?: RuntimePlatform; + + /** + * @public + *The private repository authentication credentials to use.
+ */ + repositoryCredentials?: RepositoryCredentials; } /** @@ -3318,7 +3370,7 @@ export interface JobTimeout { /** * @public *The job timeout time (in seconds) that's measured from the job attempt's
- * startedAt
timestamp. After this time passes, Batch terminates your jobs if they
+ * startedAt
timestamp. After this time passes, Batch terminates your jobs if they
* aren't finished. The minimum value for the timeout is 60 seconds.
For array jobs, the timeout applies to the child jobs, not to the parent array job.
*For multi-node parallel (MNP) jobs, the timeout applies to the whole job, not to the @@ -3890,6 +3942,12 @@ export interface ContainerDetail { *
An object that represents the compute environment architecture for Batch jobs on Fargate.
*/ runtimePlatform?: RuntimePlatform; + + /** + * @public + *The private repository authentication credentials to use.
+ */ + repositoryCredentials?: RepositoryCredentials; } /** @@ -4149,7 +4207,7 @@ export interface EksPodPropertiesDetail { *Describes and uniquely identifies Kubernetes resources. For example, the compute environment
* that a pod runs in or the jobID
for a job running in the pod. For more information,
* see Understanding Kubernetes Objects in the Kubernetes
- * documentation.
The Unix timestamp (in milliseconds) for when the job was started. More specifically, it's
* when the job transitioned from the STARTING
state to the RUNNING
state.
- * This parameter isn't provided for child jobs of array jobs or multi-node parallel jobs.
The scheduling priority for the job. This only affects jobs in job queues with a fair share policy. Jobs with a - * higher scheduling priority are scheduled before jobs with a lower scheduling priority. This overrides any scheduling - * priority in the job definition.
+ *The scheduling priority for the job. This only affects jobs in job queues with a fair share + * policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower + * scheduling priority. + * This + * overrides any scheduling priority in the job definition and works only within a single share + * identifier.
*The minimum supported value is 0 and the maximum supported value is 9999.
*/ schedulingPriorityOverride?: number; @@ -5335,7 +5396,7 @@ export interface SubmitJobRequest { /** * @public *The timeout configuration for this SubmitJob operation. You can specify a timeout duration
- * after which Batch terminates your jobs if they haven't finished. If a job is terminated due to a timeout, it isn't
+ * after which Batch terminates your jobs if they haven't finished. If a job is terminated due to a timeout, it isn't
* retried. The minimum value for the timeout is 60 seconds. This configuration overrides any timeout configuration
* specified in the job definition. For array jobs, child jobs have the same timeout configuration as the parent job.
* For more information, see Job
@@ -5494,12 +5555,12 @@ export interface ComputeResourceUpdate {
* @public
* The maximum number of Amazon EC2 vCPUs that an environment can reach. With With BEST_FIT_PROGRESSIVE
, SPOT_CAPACITY_OPTIMIZED
, and SPOT_PRICE_CAPACITY_OPTIMIZED
- * allocation strategies using On-Demand or Spot Instances, and the BEST_FIT
strategy
- * using Spot Instances, Batch might need to exceed maxvCpus
to meet your capacity
- * requirements. In this event, Batch never exceeds maxvCpus
by more than a single
- * instance. That is, no more than a single instance from among those specified in your compute
- * environment.BEST_FIT_PROGRESSIVE
,SPOT_CAPACITY_OPTIMIZED
and
+ * SPOT_PRICE_CAPACITY_OPTIMIZED
+ * (recommended) strategies using On-Demand or Spot Instances, and the
+ * BEST_FIT
strategy using Spot Instances, Batch might need to exceed
+ * maxvCpus
to meet your capacity requirements. In this event, Batch never exceeds
+ * maxvCpus
by more than a single instance.
With both BEST_FIT_PROGRESSIVE
, SPOT_CAPACITY_OPTIMIZED
, and SPOT_PRICE_CAPACITY_OPTIMIZED
- * strategies using On-Demand or Spot Instances, and the BEST_FIT
strategy using Spot
- * Instances, Batch might need to exceed maxvCpus
to meet your capacity requirements.
- * In this event, Batch never exceeds maxvCpus
by more than a single instance.
With BEST_FIT_PROGRESSIVE
,SPOT_CAPACITY_OPTIMIZED
and
+ * SPOT_PRICE_CAPACITY_OPTIMIZED
+ * (recommended) strategies using On-Demand or Spot Instances, and the
+ * BEST_FIT
strategy using Spot Instances, Batch might need to exceed
+ * maxvCpus
to meet your capacity requirements. In this event, Batch never exceeds
+ * maxvCpus
by more than a single instance.
The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can
- * specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example,
- * The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment.
+ * Required for Amazon EC2
+ * instances. You can specify the short name or full Amazon Resource Name (ARN) of an instance
+ * profile. For example, The full Amazon Resource Name (ARN) of the IAM role that allows Batch to make calls to other Amazon Web Services services on your behalf.
- * For more information, see Batch service IAM
+ * For more information, see Batch service IAM
* role in the Batch User Guide. If the compute environment has a service-linked role, it can't be changed to use a regular IAM role.
@@ -5878,7 +5942,7 @@ export interface UpdateComputeEnvironmentRequest {
* (recommended) or prefix the role name with the path. Depending on how you created your Batch service role, its ARN might contain the Cancels a job in an Batch job queue. Jobs that are in the\n A When you try to cancel an array parent job in Jobs that progressed to the Cancels a job in an Batch job queue. Jobs that are in the\n A When you try to cancel an array parent job in Jobs that progressed to the The allocation strategy to use for the compute resource if not enough instances of the best\n fitting instance type can be allocated. This might be because of availability of the instance\n type in the Region or Amazon EC2 service limits. For more\n information, see Allocation strategies in the Batch User Guide. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. Batch selects an instance type that best fits the needs of the jobs with a preference\n for the lowest-cost instance type. If additional instances of the selected instance type\n aren't available, Batch waits for the additional instances to be available. If there aren't\n enough instances available or the user is reaching Amazon EC2 service limits,\n additional jobs aren't run until the currently running jobs are completed. This allocation\n strategy keeps costs lower but can limit scaling. If you're using Spot Fleets with\n Batch selects additional instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If\n additional instances of the previously selected instance types aren't available, Batch\n selects new instance types. Batch selects one or more instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types that are less likely to be\n interrupted. This allocation strategy is only available for Spot Instance compute\n resources. The price and capacity optimized allocation strategy looks at both price and capacity to\n select the Spot Instance pools that are the least likely to be interrupted and have the lowest\n possible price. This allocation strategy is only available for Spot Instance compute\n resources. With The allocation strategy to use for the compute resource if not enough instances of the best\n fitting instance type can be allocated. This might be because of availability of the instance\n type in the Region or Amazon EC2 service limits. For more\n information, see Allocation strategies in the Batch User Guide. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. Batch selects an instance type that best fits the needs of the jobs with a preference\n for the lowest-cost instance type. If additional instances of the selected instance type\n aren't available, Batch waits for the additional instances to be available. If there aren't\n enough instances available or the user is reaching Amazon EC2 service limits,\n additional jobs aren't run until the currently running jobs are completed. This allocation\n strategy keeps costs lower but can limit scaling. If you're using Spot Fleets with\n Batch selects additional instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If\n additional instances of the previously selected instance types aren't available, Batch\n selects new instance types. Batch selects one or more instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types that are less likely to be\n interrupted. This allocation strategy is only available for Spot Instance compute\n resources. The price and capacity optimized allocation strategy looks at both price and capacity to\n select the Spot Instance pools that are the least likely to be interrupted and have the lowest\n possible price. This allocation strategy is only available for Spot Instance compute\n resources. With The maximum number of\n vCPUs that a\n compute environment can\n support. With The maximum number of\n vCPUs that a\n compute environment can\n support. With The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can\n specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example,\n This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. This parameter is required \n for Amazon EC2 instances types. You can specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example,\n This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. Key-value pair tags to be applied to EC2 resources that are launched in the compute\n environment. For Batch, these take the form of This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. Key-value pair tags to be applied to EC2 resources that are launched in the compute\n environment. For Batch, these take the form of This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. The maximum number of Amazon EC2 vCPUs that an environment can reach. With The maximum number of Amazon EC2 vCPUs that an environment can reach. With The allocation strategy to use for the compute resource if there's not enough instances of\n the best fitting instance type that can be allocated. This might be because of availability of\n the instance type in the Region or Amazon EC2 service limits. For more\n information, see Allocation strategies in the Batch User Guide. When updating a compute environment, changing the allocation strategy requires an\n infrastructure update of the compute environment. For more information, see Updating compute\n environments in the Batch User Guide. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. Batch selects additional instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If\n additional instances of the previously selected instance types aren't available, Batch\n selects new instance types. Batch selects one or more instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types that are less likely to be\n interrupted. This allocation strategy is only available for Spot Instance compute\n resources. The price and capacity optimized allocation strategy looks at both price and capacity to\n select the Spot Instance pools that are the least likely to be interrupted and have the lowest\n possible price. This allocation strategy is only available for Spot Instance compute\n resources. With both The allocation strategy to use for the compute resource if there's not enough instances of\n the best fitting instance type that can be allocated. This might be because of availability of\n the instance type in the Region or Amazon EC2 service limits. For more\n information, see Allocation strategies in the Batch User Guide. When updating a compute environment, changing the allocation strategy requires an\n infrastructure update of the compute environment. For more information, see Updating compute\n environments in the Batch User Guide. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. Batch selects additional instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types with lower cost vCPUs. If\n additional instances of the previously selected instance types aren't available, Batch\n selects new instance types. Batch selects one or more instance types that are large enough to meet the requirements\n of the jobs in the queue. Its preference is for instance types that are less likely to be\n interrupted. This allocation strategy is only available for Spot Instance compute\n resources. The price and capacity optimized allocation strategy looks at both price and capacity to\n select the Spot Instance pools that are the least likely to be interrupted and have the lowest\n possible price. This allocation strategy is only available for Spot Instance compute\n resources. With The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment. You can\n specify the short name or full Amazon Resource Name (ARN) of an instance profile. For example,\n When updating a compute environment, changing this setting requires an infrastructure update\n of the compute environment. For more information, see Updating compute environments in the\n Batch User Guide. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. The Amazon ECS instance profile applied to Amazon EC2 instances in a compute environment.\n Required for Amazon EC2\n instances. You can specify the short name or full Amazon Resource Name (ARN) of an instance\n profile. For example, When updating a compute environment, changing this setting requires an infrastructure update\n of the compute environment. For more information, see Updating compute environments in the\n Batch User Guide. This parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it. An object that represents the compute environment architecture for Batch jobs on Fargate. The private repository authentication credentials to use. The image used to start a container. This string is passed directly to the Docker daemon.\n Images in the Docker Hub registry are available by default. Other repositories are specified with\n Docker image architecture must match the processor architecture of the compute resources\n that they're scheduled on. For example, ARM-based Docker images can only run on ARM-based\n compute resources. Images in Amazon ECR Public repositories use the full Images in Amazon ECR repositories use the full registry and repository URI (for example,\n Images in official repositories on Docker Hub use a single name (for example,\n Images in other repositories on Docker Hub are qualified with an organization name (for\n example, Images in other online repositories are qualified further by a domain name (for example,\n Required.\n The image used to start a container. This string is passed directly to the\n Docker daemon. Images in the Docker Hub registry are available by default. Other repositories are\n specified with\n Docker image architecture must match the processor architecture of the compute resources\n that they're scheduled on. For example, ARM-based Docker images can only run on ARM-based\n compute resources. Images in Amazon ECR Public repositories use the full Images in Amazon ECR repositories use the full registry and repository URI (for example,\n Images in official repositories on Docker Hub use a single name (for example,\n Images in other repositories on Docker Hub are qualified with an organization name (for\n example, Images in other online repositories are qualified further by a domain name (for example,\n An object that represents the compute environment architecture for Batch jobs on Fargate. The private repository authentication credentials to use. Creates an Batch compute environment. You can create In a managed compute environment, Batch manages the capacity and instance types of the compute resources\n within the environment. This is based on the compute resource specification that you define or the launch template that you\n specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot\n Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can\n optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a\n specified percentage of the On-Demand price. Multi-node parallel jobs aren't supported on Spot Instances. In an unmanaged compute environment, you can manage your own EC2 compute resources and have flexibility with how\n you configure your compute resources. For example, you can use custom AMIs. However, you must verify that each of\n your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the\n Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch\n your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the\n Amazon Elastic Container Service Developer Guide. To create a compute environment that uses EKS resources, the caller must have permissions to call\n Batch doesn't automatically upgrade the AMIs in a compute environment after it's created. For example, it\n also doesn't update the AMIs in your compute environment when a newer version of the Amazon ECS optimized AMI is\n available. You're responsible for the management of the guest operating system. This includes any updates and\n security patches. You're also responsible for any additional application software or utilities that you install on\n the compute resources. There are two ways to use a new AMI for your Batch jobs. The original method is to complete\n these steps: Create a new compute environment with the new AMI. Add the compute environment to an existing job queue. Remove the earlier compute environment from your job queue. Delete the earlier compute environment. In April 2022, Batch added enhanced support for updating compute environments. For more information, see\n Updating compute\n environments. To use the enhanced updating of compute environments to update AMIs, follow these\n rules: Either don't set the service role ( Set the allocation strategy ( Set the update to latest image version ( Don't specify an AMI ID in If these rules are followed, any update that starts an infrastructure update causes the AMI ID to be\n re-selected. If the Creates an Batch compute environment. You can create In a managed compute environment, Batch manages the capacity and instance types of the compute resources\n within the environment. This is based on the compute resource specification that you define or the launch template that you\n specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot\n Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can\n optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a\n specified percentage of the On-Demand price. Multi-node parallel jobs aren't supported on Spot Instances. In an unmanaged compute environment, you can manage your own EC2 compute resources and have flexibility with how\n you configure your compute resources. For example, you can use custom AMIs. However, you must verify that each of\n your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the\n Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch\n your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the\n Amazon Elastic Container Service Developer Guide. To create a compute environment that uses EKS resources, the caller must have permissions to call\n Batch doesn't automatically upgrade the AMIs in a compute environment after it's created. For example, it\n also doesn't update the AMIs in your compute environment when a newer version of the Amazon ECS optimized AMI is\n available. You're responsible for the management of the guest operating system. This includes any updates and\n security patches. You're also responsible for any additional application software or utilities that you install on\n the compute resources. There are two ways to use a new AMI for your Batch jobs. The original method is to complete\n these steps: Create a new compute environment with the new AMI. Add the compute environment to an existing job queue. Remove the earlier compute environment from your job queue. Delete the earlier compute environment. In April 2022, Batch added enhanced support for updating compute environments. For more information, see\n Updating compute\n environments. To use the enhanced updating of compute environments to update AMIs, follow these\n rules: Either don't set the service role ( Set the allocation strategy ( Set the update to latest image version ( Don't specify an AMI ID in If these rules are followed, any update that starts an infrastructure update causes the AMI ID to be\n re-selected. If the The full Amazon Resource Name (ARN) of the IAM role that allows Batch to make calls to other Amazon Web Services services on your behalf. For\n more information, see Batch service IAM\n role in the Batch User Guide. If your account already created the Batch service-linked role, that role is used by default for your compute\n environment unless you specify a different role here. If the Batch service-linked role doesn't exist in your\n account, and no role is specified here, the service attempts to create the Batch service-linked role in your\n account. If your specified role has a path other than Depending on how you created your Batch service role, its ARN might contain the The full Amazon Resource Name (ARN) of the IAM role that allows Batch to make calls to other Amazon Web Services services on your behalf. For\n more information, see Batch service IAM\n role in the Batch User Guide. If your account already created the Batch service-linked role, that role is used by default for your compute\n environment unless you specify a different role here. If the Batch service-linked role doesn't exist in your\n account, and no role is specified here, the service attempts to create the Batch service-linked role in your\n account. If your specified role has a path other than Depending on how you created your Batch service role, its ARN might contain the A list of up to 100 job definitions. Each entry in the list can either be an ARN in the format\n A list of up to 100 job definitions. Each entry in the list can either be an ARN in the\n format\n The image type to match with the instance type to select an AMI. The supported values are\n different for If the \n Amazon Linux\n 2: Default for all non-GPU instance families. \n Amazon Linux 2\n (GPU): Default for all GPU instance families (for example \n Amazon Linux. Amazon Linux has\n reached the end-of-life of standard support. For more information, see Amazon Linux AMI. If the \n Amazon\n Linux 2: Default for all non-GPU instance families. \n Amazon\n Linux 2 (accelerated): Default for all GPU instance families (for example,\n The image type to match with the instance type to select an AMI. The supported values are\n different for If the \n Amazon Linux\n 2: Default for all non-GPU instance families. \n Amazon Linux 2\n (GPU): Default for all GPU instance families (for example \n Amazon Linux 2023: Batch\n supports Amazon Linux 2023. Amazon Linux 2023 does not support \n Amazon Linux. Amazon Linux has\n reached the end-of-life of standard support. For more information, see Amazon Linux AMI. If the \n Amazon\n Linux 2: Default for all non-GPU instance families. \n Amazon\n Linux 2 (accelerated): Default for all GPU instance families (for example,\n Describes and uniquely identifies Kubernetes resources. For example, the compute environment\n that a pod runs in or the A value used to reserve some of the available maximum vCPU for fair share identifiers that\n aren't already used. The reserved ratio is\n For example, a The minimum value is 0 and the maximum value is 99. A value used to reserve some of the available maximum vCPU for fair share identifiers that\n aren't already used. The reserved ratio is\n For example, a The minimum value is 0 and the maximum value is 99. The Unix timestamp (in milliseconds) for when the job was started. More specifically, it's\n when the job transitioned from the The Unix timestamp (in milliseconds) for when the job was started. More specifically, it's\n when the job transitioned from the The job timeout time (in seconds) that's measured from the job attempt's\n For array jobs, the timeout applies to the child jobs, not to the parent array job. For multi-node parallel (MNP) jobs, the timeout applies to the whole job, not to the\n individual nodes. The job timeout time (in seconds) that's measured from the job attempt's\n For array jobs, the timeout applies to the child jobs, not to the parent array job. For multi-node parallel (MNP) jobs, the timeout applies to the whole job, not to the\n individual nodes. The version number of the launch template, If the value is If the AMI ID that's used in a compute environment is from the launch template, the AMI\n isn't changed when the compute environment is updated. It's only changed if the\n Default: The version number of the launch template, If the value is If the AMI ID that's used in a compute environment is from the launch template, the AMI\n isn't changed when the compute environment is updated. It's only changed if the\n Default: Details for a Docker volume mount point that's used in a job's container properties. This\n parameter maps to Details for a Docker volume mount point that's used in a job's container properties. This\n parameter maps to The Amazon Resource Name (ARN) of the secret containing the private repository credentials. The repository credentials for private registry authentication. The operating system for the compute environment.\n Valid values are:\n The following parameters can’t be set for Windows containers: The Batch Scheduler checks before registering a task definition with Fargate. If the job\n requires a Windows container and the first compute environment is Fargate Spot is not supported for Windows-based containers on Fargate. A job\n queue will be blocked if a Fargate Windows job is submitted to a job queue with only Fargate\n Spot compute environments.\n However, you can attach both The operating system for the compute environment.\n Valid values are:\n The following parameters can’t be set for Windows containers: The Batch Scheduler checks\n the compute environments\n that are attached to the job queue before registering a task definition with\n Fargate. In this\n scenario, the job queue is where the job is submitted. If the job requires a\n Windows container and the first compute environment is Fargate Spot is not supported for\n The vCPU architecture. The default value is This parameter must be set to\n \n The vCPU architecture. The default value is This parameter must be set to\n Fargate Spot is not supported for An object that represents the compute environment architecture for Batch jobs on Fargate. \n An object that represents the compute environment architecture for Batch jobs on Fargate.\n The scheduling priority for the job. This only affects jobs in job queues with a fair share policy. Jobs with a\n higher scheduling priority are scheduled before jobs with a lower scheduling priority. This overrides any scheduling\n priority in the job definition. The minimum supported value is 0 and the maximum supported value is 9999. The scheduling priority for the job. This only affects jobs in job queues with a fair share\n policy. Jobs with a higher scheduling priority are scheduled before jobs with a lower\n scheduling priority.\n This\n overrides any scheduling priority in the job definition and works only within a single share\n identifier. The minimum supported value is 0 and the maximum supported value is 9999. The timeout configuration for this SubmitJob operation. You can specify a timeout duration\n after which Batch terminates your jobs if they haven't finished. If a job is terminated due to a timeout, it isn't\n retried. The minimum value for the timeout is 60 seconds. This configuration overrides any timeout configuration\n specified in the job definition. For array jobs, child jobs have the same timeout configuration as the parent job.\n For more information, see Job\n Timeouts in the Amazon Elastic Container Service Developer Guide. The timeout configuration for this SubmitJob operation. You can specify a timeout duration\n after which Batch terminates your jobs if they haven't finished. If a job is terminated due to a timeout, it isn't\n retried. The minimum value for the timeout is 60 seconds. This configuration overrides any timeout configuration\n specified in the job definition. For array jobs, child jobs have the same timeout configuration as the parent job.\n For more information, see Job\n Timeouts in the Amazon Elastic Container Service Developer Guide. The hard limit for the The hard limit for the The The The This object isn't applicable to jobs that are running on Fargate resources. The This object isn't applicable to jobs that are running on Fargate resources. The full Amazon Resource Name (ARN) of the IAM role that allows Batch to make calls to other Amazon Web Services services on your behalf.\n For more information, see Batch service IAM\n role in the Batch User Guide. If the compute environment has a service-linked role, it can't be changed to use a regular IAM role.\n Likewise, if the compute environment has a regular IAM role, it can't be changed to use a service-linked role. To\n update the parameters for the compute environment that require an infrastructure update to change, the AWSServiceRoleForBatch service-linked role must be used. For more information, see\n Updating compute\n environments in the Batch User Guide. If your specified role has a path other than Depending on how you created your Batch service role, its ARN might contain the The full Amazon Resource Name (ARN) of the IAM role that allows Batch to make calls to other Amazon Web Services services on your behalf.\n For more information, see Batch service IAM\n role in the Batch User Guide. If the compute environment has a service-linked role, it can't be changed to use a regular IAM role.\n Likewise, if the compute environment has a regular IAM role, it can't be changed to use a service-linked role. To\n update the parameters for the compute environment that require an infrastructure update to change, the AWSServiceRoleForBatch service-linked role must be used. For more information, see\n Updating compute\n environments in the Batch User Guide. If your specified role has a path other than Depending on how you created your Batch service role, its ARN might contain the
+ *
* ecsInstanceRole
*
or
* arn:aws:iam::
service-role
- * path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the
+ * path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the
* service-role
path prefix. Because of this, we recommend that you specify the full ARN of your service
* role when you create compute environments.SUBMITTED
\n or\n PENDING
\n are\n canceled. A job\n inRUNNABLE
remains in RUNNABLE
until it reaches the head of the\n job queue. Then the job status is updated to\n FAILED
.PENDING
job is canceled after all dependency jobs are completed.\n Therefore, it may take longer than expected to cancel a job in PENDING
\n status.PENDING
, Batch attempts to\n cancel all child jobs. The array parent job is canceled when all child jobs are\n completed.STARTING
or\n RUNNING
state aren't canceled. However, the API operation still succeeds, even\n if no job is canceled. These jobs must be terminated with the TerminateJob\n operation.SUBMITTED
\n or\n PENDING
\n are\n canceled. A job\n inRUNNABLE
remains in RUNNABLE
until it reaches the head of the\n job queue. Then the job status is updated to\n FAILED
.PENDING
job is canceled after all dependency jobs are completed.\n Therefore, it may take longer than expected to cancel a job in PENDING
\n status.PENDING
, Batch attempts to\n cancel all child jobs. The array parent job is canceled when all child jobs are\n completed.STARTING
or\n RUNNING
state aren't canceled. However, the API operation still succeeds, even\n if no job is canceled. These jobs must be terminated with the TerminateJob\n operation.\n
\n BEST_FIT
, the Spot Fleet IAM Role must be specified. Compute resources that use\n a BEST_FIT
allocation strategy don't support infrastructure updates and can't\n update some parameters. For more information, see Updating compute environments in\n the Batch User Guide.BEST_FIT_PROGRESSIVE
,SPOT_CAPACITY_OPTIMIZED
and SPOT_PRICE_CAPACITY_OPTIMIZED
\n strategies using On-Demand or Spot Instances, and the BEST_FIT
strategy using Spot\n Instances, Batch might need to exceed maxvCpus
to meet your capacity requirements.\n In this event, Batch never exceeds maxvCpus
by more than a single instance.\n
\n BEST_FIT
, the Spot Fleet IAM Role must be specified. Compute resources that use\n a BEST_FIT
allocation strategy don't support infrastructure updates and can't\n update some parameters. For more information, see Updating compute environments in\n the Batch User Guide.BEST_FIT_PROGRESSIVE
,SPOT_CAPACITY_OPTIMIZED
and\n SPOT_PRICE_CAPACITY_OPTIMIZED
\n (recommended) strategies using On-Demand or Spot Instances, and the\n BEST_FIT
strategy using Spot Instances, Batch might need to exceed\n maxvCpus
to meet your capacity requirements. In this event, Batch never exceeds\n maxvCpus
by more than a single instance.BEST_FIT_PROGRESSIVE
, SPOT_CAPACITY_OPTIMIZED
and SPOT_PRICE_CAPACITY_OPTIMIZED
\n allocation strategies using On-Demand or Spot Instances, and the BEST_FIT
strategy\n using Spot Instances, Batch might need to exceed maxvCpus
to meet your capacity\n requirements. In this event, Batch never exceeds maxvCpus
by more than a single\n instance. For example, no more than a single instance from among those specified in your compute\n environment is allocated.BEST_FIT_PROGRESSIVE
,SPOT_CAPACITY_OPTIMIZED
and\n SPOT_PRICE_CAPACITY_OPTIMIZED
\n (recommended) strategies using On-Demand or Spot Instances, and the\n BEST_FIT
strategy using Spot Instances, Batch might need to exceed\n maxvCpus
to meet your capacity requirements. In this event, Batch never exceeds\n maxvCpus
by more than a single instance.\n ecsInstanceRole\n
or\n arn:aws:iam::
.\n For more information, see Amazon ECS instance role in the Batch User Guide.\n ecsInstanceRole\n
or\n arn:aws:iam::
.\n For more information, see Amazon ECS instance role in the Batch User Guide.\"String1\": \"String2\"
, where\n String1
is the tag key and String2
is the tag value-for example,\n { \"Name\": \"Batch Instance - C4OnDemand\" }
. This is helpful for recognizing your\n Batch instances in the Amazon EC2 console. Updating these tags requires an infrastructure update to\n the compute environment. For more information, see Updating compute environments in the\n Batch User Guide. These tags aren't seen when using the Batch\n ListTagsForResource
API operation.\"String1\": \"String2\"
, where\n String1
is the tag key and String2
is the tag value-for example,\n { \"Name\": \"Batch Instance - C4OnDemand\" }
. This is helpful for recognizing your\n Batch instances in the Amazon EC2 console. Updating these tags requires an infrastructure update to\n the compute environment. For more information, see Updating compute environments in the\n Batch User Guide. These tags aren't seen when using the Batch\n ListTagsForResource
API operation.BEST_FIT_PROGRESSIVE
, SPOT_CAPACITY_OPTIMIZED
, and SPOT_PRICE_CAPACITY_OPTIMIZED
\n allocation strategies using On-Demand or Spot Instances, and the BEST_FIT
strategy\n using Spot Instances, Batch might need to exceed maxvCpus
to meet your capacity\n requirements. In this event, Batch never exceeds maxvCpus
by more than a single\n instance. That is, no more than a single instance from among those specified in your compute\n environment.BEST_FIT_PROGRESSIVE
,SPOT_CAPACITY_OPTIMIZED
and\n SPOT_PRICE_CAPACITY_OPTIMIZED
\n (recommended) strategies using On-Demand or Spot Instances, and the\n BEST_FIT
strategy using Spot Instances, Batch might need to exceed\n maxvCpus
to meet your capacity requirements. In this event, Batch never exceeds\n maxvCpus
by more than a single instance.BEST_FIT
isn't\n supported when updating a compute environment.\n
\n BEST_FIT_PROGRESSIVE
, SPOT_CAPACITY_OPTIMIZED
, and SPOT_PRICE_CAPACITY_OPTIMIZED
\n strategies using On-Demand or Spot Instances, and the BEST_FIT
strategy using Spot\n Instances, Batch might need to exceed maxvCpus
to meet your capacity requirements.\n In this event, Batch never exceeds maxvCpus
by more than a single instance.BEST_FIT
isn't\n supported when updating a compute environment.\n
\n BEST_FIT_PROGRESSIVE
,SPOT_CAPACITY_OPTIMIZED
and\n SPOT_PRICE_CAPACITY_OPTIMIZED
\n (recommended) strategies using On-Demand or Spot Instances, and the\n BEST_FIT
strategy using Spot Instances, Batch might need to exceed\n maxvCpus
to meet your capacity requirements. In this event, Batch never exceeds\n maxvCpus
by more than a single instance.\n ecsInstanceRole\n
or\n arn:aws:iam::
.\n For more information, see Amazon ECS instance role in the Batch User Guide.\n ecsInstanceRole\n
or\n arn:aws:iam::
.\n For more information, see Amazon ECS instance role in the Batch User Guide.\n repository-url/image:tag\n
.\n It can be 255 characters long. It can contain uppercase and lowercase letters, numbers,\n hyphens (-), underscores (_), colons (:), periods (.), forward slashes (/), and number signs (#). This parameter maps to Image
in the\n Create a container section of the Docker Remote API and the IMAGE
\n parameter of docker run.\n
"
+ "smithy.api#documentation": "registry/repository[:tag]
or\n registry/repository[@digest]
naming conventions. For example,\n public.ecr.aws/registry_alias/my-web-app:latest\n
.123456789012.dkr.ecr.
).ubuntu
or mongo
).amazon/amazon-ecs-agent
).quay.io/assemblyline/ubuntu
).\n repository-url/image:tag\n
.\n It can be 255 characters long. It can contain uppercase and lowercase letters, numbers,\n hyphens (-), underscores (_), colons (:), periods (.), forward slashes (/), and number signs (#). This parameter maps to Image
in the\n Create a container section of the Docker Remote API and the IMAGE
\n parameter of docker run.\n
"
}
},
"vcpus": {
@@ -2305,7 +2314,16 @@
}
},
"runtimePlatform": {
- "target": "com.amazonaws.batch#RuntimePlatform"
+ "target": "com.amazonaws.batch#RuntimePlatform",
+ "traits": {
+ "smithy.api#documentation": "registry/repository[:tag]
or\n registry/repository[@digest]
naming conventions. For example,\n public.ecr.aws/registry_alias/my-web-app:latest\n
.123456789012.dkr.ecr.
).ubuntu
or mongo
).amazon/amazon-ecs-agent
).quay.io/assemblyline/ubuntu
).MANAGED
or UNMANAGED
compute\n environments. MANAGED
compute environments can use Amazon EC2 or Fargate resources.\n UNMANAGED
compute environments can only use EC2 resources.eks:DescribeCluster
.\n
\n \n
\n serviceRole
) parameter or set it to the AWSBatchServiceRole service-linked role.allocationStrategy
) parameter to BEST_FIT_PROGRESSIVE
, \n SPOT_CAPACITY_OPTIMIZED
, or SPOT_PRICE_CAPACITY_OPTIMIZED
.updateToLatestImageVersion
)\n parameter to\n true
.\n The updateToLatestImageVersion
parameter is used when you update a compute\n environment. This parameter is ignored when you create a compute\n environment.imageId
, imageIdOverride
(in \n ec2Configuration
\n ), or in the launch\n template (launchTemplate
). In that case, Batch selects the latest Amazon ECS optimized AMI that's\n supported by Batch at the time the infrastructure update is initiated. Alternatively, you can specify the AMI ID\n in the imageId
or imageIdOverride
parameters, or the launch template identified by the\n LaunchTemplate
properties. Changing any of these properties starts an infrastructure update. If the\n AMI ID is specified in the launch template, it can't be replaced by specifying an AMI ID in either the\n imageId
or imageIdOverride
parameters. It can only be replaced by specifying a different\n launch template, or if the launch template version is set to $Default
or $Latest
, by\n setting either a new default version for the launch template (if $Default
) or by adding a new version\n to the launch template (if $Latest
).version
setting in the launch template (launchTemplate
) is set to\n $Latest
or $Default
, the latest or default version of the launch template is evaluated up\n at the time of the infrastructure update, even if the launchTemplate
wasn't updated.MANAGED
or UNMANAGED
compute\n environments. MANAGED
compute environments can use Amazon EC2 or Fargate resources.\n UNMANAGED
compute environments can only use EC2 resources.eks:DescribeCluster
.\n
\n \n
\n serviceRole
) parameter or set it to the AWSBatchServiceRole service-linked role.allocationStrategy
) parameter to BEST_FIT_PROGRESSIVE
, \n SPOT_CAPACITY_OPTIMIZED
, or SPOT_PRICE_CAPACITY_OPTIMIZED
.updateToLatestImageVersion
)\n parameter to\n true
.\n The updateToLatestImageVersion
parameter is used when you update a compute\n environment. This parameter is ignored when you create a compute\n environment.imageId
, imageIdOverride
(in \n ec2Configuration
\n ), or in the launch\n template (launchTemplate
). In that case, Batch selects the latest Amazon ECS optimized AMI that's\n supported by Batch at the time the infrastructure update is initiated. Alternatively, you can specify the AMI ID\n in the imageId
or imageIdOverride
parameters, or the launch template identified by the\n LaunchTemplate
properties. Changing any of these properties starts an infrastructure update. If the\n AMI ID is specified in the launch template, it can't be replaced by specifying an AMI ID in either the\n imageId
or imageIdOverride
parameters. It can only be replaced by specifying a different\n launch template, or if the launch template version is set to $Default
or $Latest
, by\n setting either a new default version for the launch template (if $Default
) or by adding a new version\n to the launch template (if $Latest
).version
setting in the launch template (launchTemplate
) is set to\n $Latest
or $Default
, the latest or default version of the launch template is evaluated up\n at the time of the infrastructure update, even if the launchTemplate
wasn't updated./
, then you must specify either the full role ARN\n (recommended) or prefix the role name with the path. For example, if a role with the name bar
has a path\n of /foo/
, specify /foo/bar
as the role name. For more information, see Friendly names\n and paths in the IAM User Guide.service-role
\n path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the\n service-role
path prefix. Because of this, we recommend that you specify the full ARN of your service\n role when you create compute environments./
, then you must specify either the full role ARN\n (recommended) or prefix the role name with the path. For example, if a role with the name bar
has a path\n of /foo/
, specify /foo/bar
as the role name. For more information, see Friendly names\n and paths in the IAM User Guide.service-role
\n path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the\n service-role
path prefix. Because of this, we recommend that you specify the full ARN of your service\n role when you create compute environments.arn:aws:batch:${Region}:${Account}:job-definition/${JobDefinitionName}:${Revision}
or a short version\n using the form ${JobDefinitionName}:${Revision}
.arn:aws:batch:${Region}:${Account}:job-definition/${JobDefinitionName}:${Revision}
\n or a short version using the form ${JobDefinitionName}:${Revision}
. This parameter can't be used with other parameters.ECS
and EKS
resources.\n
",
+ "smithy.api#documentation": "imageIdOverride
parameter isn't specified, then a recent Amazon ECS-optimized Amazon Linux 2 AMI (ECS_AL2
) is used. If a new image type is\n specified in an update, but neither an imageId
nor a imageIdOverride
\n parameter is specified, then the latest Amazon ECS optimized AMI for that image type that's\n supported by Batch is used.\n
\n P4
and\n G4
) and can be used for all non Amazon Web Services Graviton-based instance types.imageIdOverride
parameter isn't specified, then a recent Amazon EKS-optimized Amazon Linux\n AMI (EKS_AL2
) is used. If a new image type is specified in an update,\n but neither an imageId
nor a imageIdOverride
parameter is specified,\n then the latest Amazon EKS optimized AMI for that image type that Batch supports is used.\n
\n P4
and G4
) and can be used for all non Amazon Web Services Graviton-based\n instance types.ECS
and EKS
resources.\n
",
"smithy.api#required": {}
}
},
@@ -4148,7 +4219,10 @@
}
},
"metadata": {
- "target": "com.amazonaws.batch#EksMetadata"
+ "target": "com.amazonaws.batch#EksMetadata",
+ "traits": {
+ "smithy.api#documentation": "imageIdOverride
parameter isn't specified, then a recent Amazon ECS-optimized Amazon Linux 2 AMI (ECS_AL2
) is used. If a new image type is\n specified in an update, but neither an imageId
nor a imageIdOverride
\n parameter is specified, then the latest Amazon ECS optimized AMI for that image type that's\n supported by Batch is used.\n
\n P4
and\n G4
) and can be used for all non Amazon Web Services Graviton-based instance types.A1
instances.imageIdOverride
parameter isn't specified, then a recent Amazon EKS-optimized Amazon Linux\n AMI (EKS_AL2
) is used. If a new image type is specified in an update,\n but neither an imageId
nor a imageIdOverride
parameter is specified,\n then the latest Amazon EKS optimized AMI for that image type that Batch supports is used.\n
\n P4
and G4
) and can be used for all non Amazon Web Services Graviton-based\n instance types.jobID
for a job running in the pod. For more information,\n see Understanding Kubernetes Objects in the Kubernetes\n documentation.(computeReservation/100)^ActiveFairShares\n
\n where \n ActiveFairShares\n
is the number of active fair share\n identifiers.computeReservation
value of 50 indicates that Batchreserves\n 50% of the maximum available vCPU if there's only one fair share identifier. It reserves 25% if\n there are two fair share identifiers. It reserves 12.5% if there are three fair share\n identifiers. A computeReservation
value of 25 indicates that Batch should reserve\n 25% of the maximum available vCPU if there's only one fair share identifier, 6.25% if there are\n two fair share identifiers, and 1.56% if there are three fair share identifiers.(computeReservation/100)^ActiveFairShares\n
\n where \n ActiveFairShares\n
is the number of active fair share\n identifiers.computeReservation
value of 50 indicates that Batch reserves\n 50% of the maximum available vCPU if there's only one fair share identifier. It reserves 25% if\n there are two fair share identifiers. It reserves 12.5% if there are three fair share\n identifiers. A computeReservation
value of 25 indicates that Batch should reserve\n 25% of the maximum available vCPU if there's only one fair share identifier, 6.25% if there are\n two fair share identifiers, and 1.56% if there are three fair share identifiers.STARTING
state to the RUNNING
state.\n This parameter isn't provided for child jobs of array jobs or multi-node parallel jobs.STARTING
state to the RUNNING
state.\n startedAt
timestamp. After this time passes, Batch terminates your jobs if they\n aren't finished. The minimum value for the timeout is 60 seconds.startedAt
timestamp. After this time passes, Batch terminates your jobs if they\n aren't finished. The minimum value for the timeout is 60 seconds.$Latest
, or\n $Default
.$Latest
, the latest version of the launch template is used. If\n the value is $Default
, the default version of the launch template is used.updateToLatestImageVersion
parameter for the compute environment is set to\n true
. During an infrastructure update, if either $Latest
or\n $Default
is specified, Batch re-evaluates the launch template version, and it\n might use a different version of the launch template. This is the case even if the launch\n template isn't specified in the update. When updating a compute environment, changing the launch\n template requires an infrastructure update of the compute environment. For more information, see\n Updating compute\n environments in the Batch User Guide.$Default
.$Latest
, or\n $Default
.$Latest
, the latest version of the launch template is used. If\n the value is $Default
, the default version of the launch template is used.updateToLatestImageVersion
parameter for the compute environment is set to\n true
. During an infrastructure update, if either $Latest
or\n $Default
is specified, Batch re-evaluates the launch template version, and it\n might use a different version of the launch template. This is the case even if the launch\n template isn't specified in the update. When updating a compute environment, changing the launch\n template requires an infrastructure update of the compute environment. For more information, see\n Updating compute\n environments in the Batch User Guide.$Default
.Volumes
in the Create a container section of the Docker Remote API and the\n --volume
option to docker run.Volumes
in the Create a container section of the Docker Remote API and the\n --volume
option to docker run.LINUX
(default), WINDOWS_SERVER_2019_CORE
,\n WINDOWS_SERVER_2019_FULL
, WINDOWS_SERVER_2022_CORE
, and\n WINDOWS_SERVER_2022_FULL
.linuxParameters
,\n privileged
, user
, ulimits
,\n readonlyRootFilesystem
,\n and efsVolumeConfiguration
.LINUX
, the\n compute environment is skipped and the next is checked until a Windows-based compute environment\n is found.FARGATE
and FARGATE_SPOT
\n compute environments to the same job\n queue.LINUX
(default), WINDOWS_SERVER_2019_CORE
,\n WINDOWS_SERVER_2019_FULL
, WINDOWS_SERVER_2022_CORE
, and\n WINDOWS_SERVER_2022_FULL
.linuxParameters
,\n privileged
, user
, ulimits
,\n readonlyRootFilesystem
,\n and efsVolumeConfiguration
.LINUX
, the compute\n environment is skipped and the next compute environment is checked until a Windows-based compute\n environment is found.ARM64
and\n Windows-based containers on Fargate. A job queue will be blocked if a\n Fargate\n ARM64
or\n Windows job is submitted to a job queue with only Fargate Spot compute environments.\n However, you can attach both FARGATE
and\n FARGATE_SPOT
compute environments to the same job queue.X86_64
. Valid values are\n X86_64
and ARM64
.X86_64
\n for Windows containers.X86_64
. Valid values are\n X86_64
and ARM64
.X86_64
\n for Windows containers.ARM64
and Windows-based containers on\n Fargate. A job queue will be blocked if a Fargate ARM64
or Windows job is\n submitted to a job queue with only Fargate Spot compute environments. However, you can attach\n both FARGATE
and FARGATE_SPOT
compute environments to the same job\n queue.ulimit
type.ulimit
type. type
of the ulimit
.type
of the ulimit
. Valid values are: core
| cpu
| \n data
| fsize
| locks
| memlock
| msgqueue
| \n nice
| nofile
| nproc
| rss
| rtprio
| \n rttime
| sigpending
| stack
.ulimit
settings to pass to the container.ulimit
settings to pass to the container. For more information, see \n Ulimit./
, then you must either specify the full role ARN\n (recommended) or prefix the role name with the path.service-role
\n path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the\n service-role
path prefix. Because of this, we recommend that you specify the full ARN of your service\n role when you create compute environments./
, then you must either specify the full role ARN\n (recommended) or prefix the role name with the path.service-role
\n path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the\n service-role
path prefix. Because of this, we recommend that you specify the full ARN of your service\n role when you create compute environments.