diff --git a/CHANGELOG.md b/CHANGELOG.md index c565c38d410..84e0bc34083 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +Release v1.42.1 (2021-11-09) +=== + +### Service Client Updates +* `service/batch`: Updates service API, documentation, paginators, and examples + * Adds support for scheduling policy APIs. +* `service/greengrassv2`: Updates service API and documentation +* `service/health`: Updates service documentation + * Documentation updates for AWS Health. + Release v1.42.0 (2021-11-08) === diff --git a/aws/endpoints/defaults.go b/aws/endpoints/defaults.go index 614345da70a..bc91d275318 100644 --- a/aws/endpoints/defaults.go +++ b/aws/endpoints/defaults.go @@ -4498,6 +4498,138 @@ var awsPartition = partition{ }, }, }, + "compute-optimizer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "compute-optimizer.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "compute-optimizer.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "compute-optimizer.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "compute-optimizer.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "compute-optimizer.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "compute-optimizer.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "compute-optimizer.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "compute-optimizer.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "compute-optimizer.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "config": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -18316,12 +18448,21 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -18331,12 +18472,24 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -20971,6 +21124,26 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "compute-optimizer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "compute-optimizer.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "compute-optimizer.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "config": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/aws/version.go b/aws/version.go index f885a90b95b..6dc54971a7e 100644 --- a/aws/version.go +++ b/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.42.0" +const SDKVersion = "1.42.1" diff --git a/models/apis/batch/2016-08-10/api-2.json b/models/apis/batch/2016-08-10/api-2.json index aed72587a73..e6fcec312b7 100644 --- a/models/apis/batch/2016-08-10/api-2.json +++ b/models/apis/batch/2016-08-10/api-2.json @@ -51,6 +51,19 @@ {"shape":"ServerException"} ] }, + "CreateSchedulingPolicy":{ + "name":"CreateSchedulingPolicy", + "http":{ + "method":"POST", + "requestUri":"/v1/createschedulingpolicy" + }, + "input":{"shape":"CreateSchedulingPolicyRequest"}, + "output":{"shape":"CreateSchedulingPolicyResponse"}, + "errors":[ + {"shape":"ClientException"}, + {"shape":"ServerException"} + ] + }, "DeleteComputeEnvironment":{ "name":"DeleteComputeEnvironment", "http":{ @@ -77,6 +90,19 @@ {"shape":"ServerException"} ] }, + "DeleteSchedulingPolicy":{ + "name":"DeleteSchedulingPolicy", + "http":{ + "method":"POST", + "requestUri":"/v1/deleteschedulingpolicy" + }, + "input":{"shape":"DeleteSchedulingPolicyRequest"}, + "output":{"shape":"DeleteSchedulingPolicyResponse"}, + "errors":[ + {"shape":"ClientException"}, + {"shape":"ServerException"} + ] + }, "DeregisterJobDefinition":{ "name":"DeregisterJobDefinition", "http":{ @@ -142,6 +168,19 @@ {"shape":"ServerException"} ] }, + "DescribeSchedulingPolicies":{ + "name":"DescribeSchedulingPolicies", + "http":{ + "method":"POST", + "requestUri":"/v1/describeschedulingpolicies" + }, + "input":{"shape":"DescribeSchedulingPoliciesRequest"}, + "output":{"shape":"DescribeSchedulingPoliciesResponse"}, + "errors":[ + {"shape":"ClientException"}, + {"shape":"ServerException"} + ] + }, "ListJobs":{ "name":"ListJobs", "http":{ @@ -155,6 +194,19 @@ {"shape":"ServerException"} ] }, + "ListSchedulingPolicies":{ + "name":"ListSchedulingPolicies", + "http":{ + "method":"POST", + "requestUri":"/v1/listschedulingpolicies" + }, + "input":{"shape":"ListSchedulingPoliciesRequest"}, + "output":{"shape":"ListSchedulingPoliciesResponse"}, + "errors":[ + {"shape":"ClientException"}, + {"shape":"ServerException"} + ] + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -258,6 +310,19 @@ {"shape":"ClientException"}, {"shape":"ServerException"} ] + }, + "UpdateSchedulingPolicy":{ + "name":"UpdateSchedulingPolicy", + "http":{ + "method":"POST", + "requestUri":"/v1/updateschedulingpolicy" + }, + "input":{"shape":"UpdateSchedulingPolicyRequest"}, + "output":{"shape":"UpdateSchedulingPolicyResponse"}, + "errors":[ + {"shape":"ClientException"}, + {"shape":"ServerException"} + ] } }, "shapes":{ @@ -402,6 +467,7 @@ "members":{ "computeEnvironmentName":{"shape":"String"}, "computeEnvironmentArn":{"shape":"String"}, + "unmanagedvCpus":{"shape":"Integer"}, "ecsClusterArn":{"shape":"String"}, "tags":{"shape":"TagrisTagsMap"}, "type":{"shape":"CEType"}, @@ -572,6 +638,7 @@ "computeEnvironmentName":{"shape":"String"}, "type":{"shape":"CEType"}, "state":{"shape":"CEState"}, + "unmanagedvCpus":{"shape":"Integer"}, "computeResources":{"shape":"ComputeResource"}, "serviceRole":{"shape":"String"}, "tags":{"shape":"TagrisTagsMap"} @@ -594,6 +661,7 @@ "members":{ "jobQueueName":{"shape":"String"}, "state":{"shape":"JQState"}, + "schedulingPolicyArn":{"shape":"String"}, "priority":{"shape":"Integer"}, "computeEnvironmentOrder":{"shape":"ComputeEnvironmentOrders"}, "tags":{"shape":"TagrisTagsMap"} @@ -610,6 +678,26 @@ "jobQueueArn":{"shape":"String"} } }, + "CreateSchedulingPolicyRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{"shape":"String"}, + "fairsharePolicy":{"shape":"FairsharePolicy"}, + "tags":{"shape":"TagrisTagsMap"} + } + }, + "CreateSchedulingPolicyResponse":{ + "type":"structure", + "required":[ + "name", + "arn" + ], + "members":{ + "name":{"shape":"String"}, + "arn":{"shape":"String"} + } + }, "DeleteComputeEnvironmentRequest":{ "type":"structure", "required":["computeEnvironment"], @@ -634,6 +722,18 @@ "members":{ } }, + "DeleteSchedulingPolicyRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"String"} + } + }, + "DeleteSchedulingPolicyResponse":{ + "type":"structure", + "members":{ + } + }, "DeregisterJobDefinitionRequest":{ "type":"structure", "required":["jobDefinition"], @@ -706,6 +806,19 @@ "jobs":{"shape":"JobDetailList"} } }, + "DescribeSchedulingPoliciesRequest":{ + "type":"structure", + "required":["arns"], + "members":{ + "arns":{"shape":"StringList"} + } + }, + "DescribeSchedulingPoliciesResponse":{ + "type":"structure", + "members":{ + "schedulingPolicies":{"shape":"SchedulingPolicyDetailList"} + } + }, "Device":{ "type":"structure", "required":["hostPath"], @@ -793,12 +906,21 @@ "type":"list", "member":{"shape":"EvaluateOnExit"} }, + "FairsharePolicy":{ + "type":"structure", + "members":{ + "shareDecaySeconds":{"shape":"Integer"}, + "computeReservation":{"shape":"Integer"}, + "shareDistribution":{"shape":"ShareAttributesList"} + } + }, "FargatePlatformConfiguration":{ "type":"structure", "members":{ "platformVersion":{"shape":"String"} } }, + "Float":{"type":"float"}, "Host":{ "type":"structure", "members":{ @@ -848,6 +970,7 @@ "revision":{"shape":"Integer"}, "status":{"shape":"String"}, "type":{"shape":"String"}, + "schedulingPriority":{"shape":"Integer"}, "parameters":{"shape":"ParametersMap"}, "retryStrategy":{"shape":"RetryStrategy"}, "containerProperties":{"shape":"ContainerProperties"}, @@ -896,6 +1019,8 @@ "jobId":{"shape":"String"}, "jobQueue":{"shape":"String"}, "status":{"shape":"JobStatus"}, + "shareIdentifier":{"shape":"String"}, + "schedulingPriority":{"shape":"Integer"}, "attempts":{"shape":"AttemptDetails"}, "statusReason":{"shape":"String"}, "createdAt":{"shape":"Long"}, @@ -932,6 +1057,7 @@ "jobQueueName":{"shape":"String"}, "jobQueueArn":{"shape":"String"}, "state":{"shape":"JQState"}, + "schedulingPolicyArn":{"shape":"String"}, "status":{"shape":"JQStatus"}, "statusReason":{"shape":"String"}, "priority":{"shape":"Integer"}, @@ -1043,6 +1169,20 @@ "nextToken":{"shape":"String"} } }, + "ListSchedulingPoliciesRequest":{ + "type":"structure", + "members":{ + "maxResults":{"shape":"Integer"}, + "nextToken":{"shape":"String"} + } + }, + "ListSchedulingPoliciesResponse":{ + "type":"structure", + "members":{ + "schedulingPolicies":{"shape":"SchedulingPolicyListingDetailList"}, + "nextToken":{"shape":"String"} + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["resourceArn"], @@ -1202,6 +1342,7 @@ "jobDefinitionName":{"shape":"String"}, "type":{"shape":"JobDefinitionType"}, "parameters":{"shape":"ParametersMap"}, + "schedulingPriority":{"shape":"Integer"}, "containerProperties":{"shape":"ContainerProperties"}, "nodeProperties":{"shape":"NodeProperties"}, "retryStrategy":{"shape":"RetryStrategy"}, @@ -1261,6 +1402,34 @@ "evaluateOnExit":{"shape":"EvaluateOnExitList"} } }, + "SchedulingPolicyDetail":{ + "type":"structure", + "required":[ + "name", + "arn" + ], + "members":{ + "name":{"shape":"String"}, + "arn":{"shape":"String"}, + "fairsharePolicy":{"shape":"FairsharePolicy"}, + "tags":{"shape":"TagrisTagsMap"} + } + }, + "SchedulingPolicyDetailList":{ + "type":"list", + "member":{"shape":"SchedulingPolicyDetail"} + }, + "SchedulingPolicyListingDetail":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"String"} + } + }, + "SchedulingPolicyListingDetailList":{ + "type":"list", + "member":{"shape":"SchedulingPolicyListingDetail"} + }, "Secret":{ "type":"structure", "required":[ @@ -1285,6 +1454,18 @@ "exception":true, "fault":true }, + "ShareAttributes":{ + "type":"structure", + "required":["shareIdentifier"], + "members":{ + "shareIdentifier":{"shape":"String"}, + "weightFactor":{"shape":"Float"} + } + }, + "ShareAttributesList":{ + "type":"list", + "member":{"shape":"ShareAttributes"} + }, "String":{"type":"string"}, "StringList":{ "type":"list", @@ -1300,6 +1481,8 @@ "members":{ "jobName":{"shape":"String"}, "jobQueue":{"shape":"String"}, + "shareIdentifier":{"shape":"String"}, + "schedulingPriorityOverride":{"shape":"Integer"}, "arrayProperties":{"shape":"ArrayProperties"}, "dependsOn":{"shape":"JobDependencyList"}, "jobDefinition":{"shape":"String"}, @@ -1450,6 +1633,7 @@ "members":{ "computeEnvironment":{"shape":"String"}, "state":{"shape":"CEState"}, + "unmanagedvCpus":{"shape":"Integer"}, "computeResources":{"shape":"ComputeResourceUpdate"}, "serviceRole":{"shape":"String"} } @@ -1467,6 +1651,7 @@ "members":{ "jobQueue":{"shape":"String"}, "state":{"shape":"JQState"}, + "schedulingPolicyArn":{"shape":"String"}, "priority":{"shape":"Integer"}, "computeEnvironmentOrder":{"shape":"ComputeEnvironmentOrders"} } @@ -1478,6 +1663,19 @@ "jobQueueArn":{"shape":"String"} } }, + "UpdateSchedulingPolicyRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{"shape":"String"}, + "fairsharePolicy":{"shape":"FairsharePolicy"} + } + }, + "UpdateSchedulingPolicyResponse":{ + "type":"structure", + "members":{ + } + }, "Volume":{ "type":"structure", "members":{ diff --git a/models/apis/batch/2016-08-10/docs-2.json b/models/apis/batch/2016-08-10/docs-2.json index a11b2cdb4f0..88f649700be 100644 --- a/models/apis/batch/2016-08-10/docs-2.json +++ b/models/apis/batch/2016-08-10/docs-2.json @@ -1,26 +1,31 @@ { "version": "2.0", - "service": "Batch

Using Batch, you can run batch computing workloads on the Cloud. Batch computing is a common means for developers, scientists, and engineers to access large amounts of compute resources. Batch uses the advantages of this computing workload to remove the undifferentiated heavy lifting of configuring and managing required infrastructure. At the same time, it also adopts a familiar batch computing software approach. Given these advantages, Batch can help you to efficiently provision resources in response to jobs submitted, thus effectively helping you to eliminate capacity constraints, reduce compute costs, and deliver your results more quickly.

As a fully managed service, Batch can run batch computing workloads of any scale. Batch automatically provisions compute resources and optimizes workload distribution based on the quantity and scale of your specific workloads. With Batch, there's no need to install or manage batch computing software. This means that you can focus your time and energy on analyzing results and solving your specific problems.

", + "service": "Batch

Using Batch, you can run batch computing workloads on the Amazon Web Services Cloud. Batch computing is a common means for developers, scientists, and engineers to access large amounts of compute resources. Batch uses the advantages of this computing workload to remove the undifferentiated heavy lifting of configuring and managing required infrastructure. At the same time, it also adopts a familiar batch computing software approach. Given these advantages, Batch can help you to efficiently provision resources in response to jobs submitted, thus effectively helping you to eliminate capacity constraints, reduce compute costs, and deliver your results more quickly.

As a fully managed service, Batch can run batch computing workloads of any scale. Batch automatically provisions compute resources and optimizes workload distribution based on the quantity and scale of your specific workloads. With Batch, there's no need to install or manage batch computing software. This means that you can focus your time and energy on analyzing results and solving your specific problems.

", "operations": { "CancelJob": "

Cancels a job in an Batch job queue. Jobs that are in the SUBMITTED, PENDING, or RUNNABLE state are canceled. Jobs that have progressed to STARTING or RUNNING aren't canceled, but the API operation still succeeds, even if no job is canceled. These jobs must be terminated with the TerminateJob operation.

", "CreateComputeEnvironment": "

Creates an Batch compute environment. You can create MANAGED or UNMANAGED compute environments. MANAGED compute environments can use Amazon EC2 or Fargate resources. UNMANAGED compute environments can only use EC2 resources.

In a managed compute environment, Batch manages the capacity and instance types of the compute resources within the environment. This is based on the compute resource specification that you define or the launch template that you specify when you create the compute environment. Either, you can choose to use EC2 On-Demand Instances and EC2 Spot Instances. Or, you can use Fargate and Fargate Spot capacity in your managed compute environment. You can optionally set a maximum price so that Spot Instances only launch when the Spot Instance price is less than a specified percentage of the On-Demand price.

Multi-node parallel jobs aren't supported on Spot Instances.

In an unmanaged compute environment, you can manage your own EC2 compute resources and have a lot of flexibility with how you configure your compute resources. For example, you can use custom AMIs. However, you must verify that each of your AMIs meet the Amazon ECS container instance AMI specification. For more information, see container instance AMIs in the Amazon Elastic Container Service Developer Guide. After you created your unmanaged compute environment, you can use the DescribeComputeEnvironments operation to find the Amazon ECS cluster that's associated with it. Then, launch your container instances into that Amazon ECS cluster. For more information, see Launching an Amazon ECS container instance in the Amazon Elastic Container Service Developer Guide.

Batch doesn't upgrade the AMIs in a compute environment after the environment is created. For example, it doesn't update the AMIs when a newer version of the Amazon ECS optimized AMI is available. Therefore, you're responsible for managing the guest operating system (including its updates and security patches) and any additional application software or utilities that you install on the compute resources. To use a new AMI for your Batch jobs, complete these steps:

  1. Create a new compute environment with the new AMI.

  2. Add the compute environment to an existing job queue.

  3. Remove the earlier compute environment from your job queue.

  4. Delete the earlier compute environment.

", "CreateJobQueue": "

Creates an Batch job queue. When you create a job queue, you associate one or more compute environments to the queue and assign an order of preference for the compute environments.

You also set a priority to the job queue that determines the order that the Batch scheduler places jobs onto its associated compute environments. For example, if a compute environment is associated with more than one job queue, the job queue with a higher priority is given preference for scheduling jobs to that compute environment.

", + "CreateSchedulingPolicy": "

Creates an Batch scheduling policy.

", "DeleteComputeEnvironment": "

Deletes an Batch compute environment.

Before you can delete a compute environment, you must set its state to DISABLED with the UpdateComputeEnvironment API operation and disassociate it from any job queues with the UpdateJobQueue API operation. Compute environments that use Fargate resources must terminate all active jobs on that compute environment before deleting the compute environment. If this isn't done, the compute environment enters an invalid state.

", "DeleteJobQueue": "

Deletes the specified job queue. You must first disable submissions for a queue with the UpdateJobQueue operation. All jobs in the queue are eventually terminated when you delete a job queue. The jobs are terminated at a rate of about 16 jobs each second.

It's not necessary to disassociate compute environments from a queue before submitting a DeleteJobQueue request.

", + "DeleteSchedulingPolicy": "

Deletes the specified scheduling policy.

You can't delete a scheduling policy that is used in any job queues.

", "DeregisterJobDefinition": "

Deregisters an Batch job definition. Job definitions are permanently deleted after 180 days.

", "DescribeComputeEnvironments": "

Describes one or more of your compute environments.

If you're using an unmanaged compute environment, you can use the DescribeComputeEnvironment operation to determine the ecsClusterArn that you should launch your Amazon ECS container instances into.

", "DescribeJobDefinitions": "

Describes a list of job definitions. You can specify a status (such as ACTIVE) to only return job definitions that match that status.

", "DescribeJobQueues": "

Describes one or more of your job queues.

", "DescribeJobs": "

Describes a list of Batch jobs.

", + "DescribeSchedulingPolicies": "

Describes one or more of your scheduling policies.

", "ListJobs": "

Returns a list of Batch jobs.

You must specify only one of the following items:

You can filter the results by job status with the jobStatus parameter. If you don't specify a status, only RUNNING jobs are returned.

", - "ListTagsForResource": "

Lists the tags for an Batch resource. Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", + "ListSchedulingPolicies": "

Returns a list of Batch scheduling policies.

", + "ListTagsForResource": "

Lists the tags for an Batch resource. Batch resources that support tags are compute environments, jobs, job definitions, job queues, and scheduling policies. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", "RegisterJobDefinition": "

Registers an Batch job definition.

", - "SubmitJob": "

Submits an Batch job from a job definition. Parameters that are specified during SubmitJob override parameters defined in the job definition. vCPU and memory requirements that are specified in the ResourceRequirements objects in the job definition are the exception. They can't be overridden this way using the memory and vcpus parameters. Rather, you must specify updates to job definition parameters in a ResourceRequirements object that's included in the containerOverrides parameter.

Jobs that run on Fargate resources can't be guaranteed to run for more than 14 days. This is because, after 14 days, Fargate resources might become unavailable and job might be terminated.

", - "TagResource": "

Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource aren't specified in the request parameters, they aren't changed. When a resource is deleted, the tags that are associated with that resource are deleted as well. Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", + "SubmitJob": "

Submits an Batch job from a job definition. Parameters that are specified during SubmitJob override parameters defined in the job definition. vCPU and memory requirements that are specified in the resourceRequirements objects in the job definition are the exception. They can't be overridden this way using the memory and vcpus parameters. Rather, you must specify updates to job definition parameters in a ResourceRequirements object that's included in the containerOverrides parameter.

Job queues with a scheduling policy are limited to 500 active fair share identifiers at a time.

Jobs that run on Fargate resources can't be guaranteed to run for more than 14 days. This is because, after 14 days, Fargate resources might become unavailable and job might be terminated.

", + "TagResource": "

Associates the specified tags to a resource with the specified resourceArn. If existing tags on a resource aren't specified in the request parameters, they aren't changed. When a resource is deleted, the tags that are associated with that resource are deleted as well. Batch resources that support tags are compute environments, jobs, job definitions, job queues, and scheduling policies. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", "TerminateJob": "

Terminates a job in a job queue. Jobs that are in the STARTING or RUNNING state are terminated, which causes them to transition to FAILED. Jobs that have not progressed to the STARTING state are cancelled.

", "UntagResource": "

Deletes specified tags from an Batch resource.

", "UpdateComputeEnvironment": "

Updates an Batch compute environment.

", - "UpdateJobQueue": "

Updates a job queue.

" + "UpdateJobQueue": "

Updates a job queue.

", + "UpdateSchedulingPolicy": "

Updates a scheduling policy.

" }, "shapes": { "ArrayJobDependency": { @@ -228,6 +233,16 @@ "refs": { } }, + "CreateSchedulingPolicyRequest": { + "base": null, + "refs": { + } + }, + "CreateSchedulingPolicyResponse": { + "base": null, + "refs": { + } + }, "DeleteComputeEnvironmentRequest": { "base": "

Contains the parameters for DeleteComputeEnvironment.

", "refs": { @@ -248,6 +263,16 @@ "refs": { } }, + "DeleteSchedulingPolicyRequest": { + "base": null, + "refs": { + } + }, + "DeleteSchedulingPolicyResponse": { + "base": null, + "refs": { + } + }, "DeregisterJobDefinitionRequest": { "base": null, "refs": { @@ -298,6 +323,16 @@ "refs": { } }, + "DescribeSchedulingPoliciesRequest": { + "base": null, + "refs": { + } + }, + "DescribeSchedulingPoliciesResponse": { + "base": null, + "refs": { + } + }, "Device": { "base": "

An object representing a container instance host device.

This object isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.

", "refs": { @@ -347,7 +382,7 @@ } }, "Ec2Configuration": { - "base": "

Provides information used to select Amazon Machine Images (AMIs) for instances in the compute environment. If Ec2Configuration isn't specified, the default is currently ECS_AL1 (Amazon Linux) for non-GPU, non AWSGraviton instances. Starting on March 31, 2021, this default will be changing to ECS_AL2 (Amazon Linux 2).

This object isn't applicable to jobs that are running on Fargate resources.

", + "base": "

Provides information used to select Amazon Machine Images (AMIs) for instances in the compute environment. If Ec2Configuration isn't specified, the default is ECS_AL2 (Amazon Linux 2).

This object isn't applicable to jobs that are running on Fargate resources.

", "refs": { "Ec2ConfigurationList$member": null } @@ -355,7 +390,7 @@ "Ec2ConfigurationList": { "base": null, "refs": { - "ComputeResource$ec2Configuration": "

Provides information used to select Amazon Machine Images (AMIs) for EC2 instances in the compute environment. If Ec2Configuration isn't specified, the default is ECS_AL1.

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

" + "ComputeResource$ec2Configuration": "

Provides information used to select Amazon Machine Images (AMIs) for EC2 instances in the compute environment. If Ec2Configuration isn't specified, the default is ECS_AL2.

One or two values can be provided.

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

" } }, "EnvironmentVariables": { @@ -378,6 +413,14 @@ "RetryStrategy$evaluateOnExit": "

Array of up to 5 objects that specify conditions under which the job should be retried or failed. If this parameter is specified, then the attempts parameter must also be specified.

" } }, + "FairsharePolicy": { + "base": "

The fair share policy for a scheduling policy.

", + "refs": { + "CreateSchedulingPolicyRequest$fairsharePolicy": "

The fair share policy of the scheduling policy.

", + "SchedulingPolicyDetail$fairsharePolicy": "

The fair share policy for the scheduling policy.

", + "UpdateSchedulingPolicyRequest$fairsharePolicy": "

The fair share policy.

" + } + }, "FargatePlatformConfiguration": { "base": "

The platform configuration for jobs that are running on Fargate resources. Jobs that run on EC2 resources must not specify this parameter.

", "refs": { @@ -385,6 +428,12 @@ "ContainerProperties$fargatePlatformConfiguration": "

The platform configuration for jobs that are running on Fargate resources. Jobs that are running on EC2 resources must not specify this parameter.

" } }, + "Float": { + "base": null, + "refs": { + "ShareAttributes$weightFactor": "

The weight factor for the fair share identifier. The default value is 1.0. A lower value has a higher priority for compute resources. For example, jobs using a share identifier with a weight factor of 0.125 (1/8) will get 8 times the compute resources of jobs using a share identifier with a weight factor of 1.

The smallest supported value is 0.0001 and the largest supported value is 999.9999.

" + } + }, "Host": { "base": "

Determine whether your data volume persists on the host container instance and where it is stored. If this parameter is empty, then the Docker daemon assigns a host path for your data volume, but the data isn't guaranteed to persist after the containers associated with it stop running.

", "refs": { @@ -400,7 +449,7 @@ "ImageType": { "base": null, "refs": { - "Ec2Configuration$imageType": "

The image type to match with the instance type to select an AMI. If the imageIdOverride parameter isn't specified, then a recent Amazon ECS-optimized AMI (ECS_AL1) is used. Starting on March 31, 2021, this default will be changing to ECS_AL2 (Amazon Linux 2).

ECS_AL2

Amazon Linux 2− Default for all Amazon Web Services Graviton-based instance families (for example, C6g, M6g, R6g, and T4g) and can be used for all non-GPU instance types.

ECS_AL2_NVIDIA

Amazon Linux 2 (GPU)−Default for all GPU instance families (for example P4 and G4) and can be used for all non Amazon Web Services Graviton-based instance types.

ECS_AL1

Amazon Linux−Default for all non-GPU, non Amazon Web Services Graviton instance families. Amazon Linux is reaching the end-of-life of standard support. For more information, see Amazon Linux AMI.

" + "Ec2Configuration$imageType": "

The image type to match with the instance type to select an AMI. If the imageIdOverride parameter isn't specified, then a recent Amazon ECS-optimized Amazon Linux 2 AMI (ECS_AL2) is used.

ECS_AL2

Amazon Linux 2− Default for all non-GPU instance families.

ECS_AL2_NVIDIA

Amazon Linux 2 (GPU)−Default for all GPU instance families (for example P4 and G4) and can be used for all non Amazon Web Services Graviton-based instance types.

ECS_AL1

Amazon Linux. Amazon Linux is reaching the end-of-life of standard support. For more information, see Amazon Linux AMI.

" } }, "Integer": { @@ -413,6 +462,7 @@ "ArrayPropertiesSummary$size": "

The size of the array job. This parameter is returned for parent array jobs.

", "ArrayPropertiesSummary$index": "

The job index within the array that's associated with this job. This parameter is returned for children of array jobs.

", "AttemptContainerDetail$exitCode": "

The exit code for the job attempt. A non-zero exit code is considered a failure.

", + "ComputeEnvironmentDetail$unmanagedvCpus": "

The maximum number of VCPUs expected to be used for an unmanaged compute environment.

", "ComputeEnvironmentOrder$order": "

The order of the compute environment. Compute environments are tried in ascending order. For example, if two compute environments are associated with a job queue, the compute environment with a lower order integer value is tried for job placement first.

", "ComputeResource$minvCpus": "

The minimum number of Amazon EC2 vCPUs that an environment should maintain (even if the compute environment is DISABLED).

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

", "ComputeResource$maxvCpus": "

The maximum number of Amazon EC2 vCPUs that a compute environment can reach.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. For example, no more than a single instance from among those specified in your compute environment is allocated.

", @@ -421,37 +471,46 @@ "ComputeResourceUpdate$minvCpus": "

The minimum number of Amazon EC2 vCPUs that an environment should maintain.

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

", "ComputeResourceUpdate$maxvCpus": "

The maximum number of Amazon EC2 vCPUs that an environment can reach.

With both BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies, Batch might need to exceed maxvCpus to meet your capacity requirements. In this event, Batch never exceeds maxvCpus by more than a single instance. That is, no more than a single instance from among those specified in your compute environment.

", "ComputeResourceUpdate$desiredvCpus": "

The desired number of Amazon EC2 vCPUS in the compute environment.

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

", - "ContainerDetail$vcpus": "

The number of vCPUs reserved for the container. For jobs that run on EC2 resources, you can specify the vCPU requirement for the job using resourceRequirements, but you can't specify the vCPU requirements in both the vcpus and resourceRequirement object. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU. This is required but can be specified in several places. It must be specified for each node at least once.

This parameter isn't applicable to jobs that run on Fargate resources. For jobs that run on Fargate resources, you must specify the vCPU requirement for the job using resourceRequirements.

", - "ContainerDetail$memory": "

For jobs run on EC2 resources that didn't specify memory requirements using ResourceRequirement, the number of MiB of memory reserved for the job. For other jobs, including all run on Fargate resources, see resourceRequirements.

", + "ContainerDetail$vcpus": "

The number of vCPUs reserved for the container. For jobs that run on EC2 resources, you can specify the vCPU requirement for the job using resourceRequirements, but you can't specify the vCPU requirements in both the vcpus and resourceRequirements object. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU. This is required but can be specified in several places. It must be specified for each node at least once.

This parameter isn't applicable to jobs that run on Fargate resources. For jobs that run on Fargate resources, you must specify the vCPU requirement for the job using resourceRequirements.

", + "ContainerDetail$memory": "

For jobs run on EC2 resources that didn't specify memory requirements using resourceRequirements, the number of MiB of memory reserved for the job. For other jobs, including all run on Fargate resources, see resourceRequirements.

", "ContainerDetail$exitCode": "

The exit code to return upon completion.

", - "ContainerOverrides$vcpus": "

This parameter indicates the number of vCPUs reserved for the container.It overrides the vcpus parameter that's set in the job definition, but doesn't override any vCPU requirement specified in the resourceRequirement structure in the job definition. To override vCPU requirements that are specified in the ResourceRequirement structure in the job definition, ResourceRequirement must be specified in the SubmitJob request, with type set to VCPU and value set to the new value.

This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one vCPU.

This parameter is supported for jobs that run on EC2 resources, but isn't supported for jobs that run on Fargate resources. For Fargate resources, you can only use resourceRequirement. For EC2 resources, you can use either this parameter or resourceRequirement but not both.

", - "ContainerOverrides$memory": "

This parameter indicates the amount of memory (in MiB) that's reserved for the job. It overrides the memory parameter set in the job definition, but doesn't override any memory requirement specified in the ResourceRequirement structure in the job definition. To override memory requirements that are specified in the ResourceRequirement structure in the job definition, ResourceRequirement must be specified in the SubmitJob request, with type set to MEMORY and value set to the new value.

This parameter is supported for jobs that run on EC2 resources, but isn't supported for jobs that run on Fargate resources. For these resources, use resourceRequirement instead.

", - "ContainerProperties$vcpus": "

The number of vCPUs reserved for the job. Each vCPU is equivalent to 1,024 CPU shares. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. The number of vCPUs must be specified but can be specified in several places. You must specify it at least once for each node.

This parameter is supported on EC2 resources but isn't supported for jobs that run on Fargate resources. For these resources, use resourceRequirement instead. You can use this parameter or resourceRequirements structure but not both.

This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided. For jobs that run on Fargate resources, you must specify the vCPU requirement for the job using resourceRequirements.

", - "ContainerProperties$memory": "

This parameter indicates the memory hard limit (in MiB) for a container. If your container attempts to exceed the specified number, it's terminated. You must specify at least 4 MiB of memory for a job using this parameter. The memory hard limit can be specified in several places. It must be specified for each node at least once.

This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

This parameter is supported on EC2 resources but isn't supported on Fargate resources. For Fargate resources, you should specify the memory requirement using resourceRequirement. You can also do this for EC2 resources.

If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory Management in the Batch User Guide.

", + "ContainerOverrides$vcpus": "

This parameter is deprecated, use resourceRequirements to override the vcpus parameter that's set in the job definition. It's not supported for jobs that run on Fargate resources. For jobs run on EC2 resources, it overrides the vcpus parameter set in the job definition, but doesn't override any vCPU requirement specified in the resourceRequirements structure in the job definition. To override vCPU requirements that are specified in the resourceRequirements structure in the job definition, resourceRequirements must be specified in the SubmitJob request, with type set to VCPU and value set to the new value. For more information, see Can't override job definition resource requirements in the Batch User Guide.

", + "ContainerOverrides$memory": "

This parameter is deprecated, use resourceRequirements to override the memory requirements specified in the job definition. It's not supported for jobs that run on Fargate resources. For jobs run on EC2 resources, it overrides the memory parameter set in the job definition, but doesn't override any memory requirement specified in the resourceRequirements structure in the job definition. To override memory requirements that are specified in the resourceRequirements structure in the job definition, resourceRequirements must be specified in the SubmitJob request, with type set to MEMORY and value set to the new value. For more information, see Can't override job definition resource requirements in the Batch User Guide.

", + "ContainerProperties$vcpus": "

This parameter is deprecated, use resourceRequirements to specify the vCPU requirements for the job definition. It's not supported for jobs that run on Fargate resources. For jobs run on EC2 resources, it specifies the number of vCPUs reserved for the job.

Each vCPU is equivalent to 1,024 CPU shares. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. The number of vCPUs must be specified but can be specified in several places. You must specify it at least once for each node.

", + "ContainerProperties$memory": "

This parameter is deprecated, use resourceRequirements to specify the memory requirements for the job definition. It's not supported for jobs that run on Fargate resources. For jobs run on EC2 resources, it specifies the memory hard limit (in MiB) for a container. If your container attempts to exceed the specified number, it's terminated. You must specify at least 4 MiB of memory for a job using this parameter. The memory hard limit can be specified in several places. It must be specified for each node at least once.

", "ContainerSummary$exitCode": "

The exit code to return upon completion.

", + "CreateComputeEnvironmentRequest$unmanagedvCpus": "

The maximum number of vCPUs for an unmanaged compute environment. This parameter is only used for fair share scheduling to reserve vCPU capacity for new share identifiers. If this parameter is not provided for a fair share job queue, no vCPU capacity will be reserved.

This parameter is only supported when the type parameter is set to UNMANAGED/

", "CreateJobQueueRequest$priority": "

The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order. For example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments can't be mixed.

", "DescribeComputeEnvironmentsRequest$maxResults": "

The maximum number of cluster results returned by DescribeComputeEnvironments in paginated output. When this parameter is used, DescribeComputeEnvironments only returns maxResults results in a single page along with a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeComputeEnvironments request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then DescribeComputeEnvironments returns up to 100 results and a nextToken value if applicable.

", "DescribeJobDefinitionsRequest$maxResults": "

The maximum number of results returned by DescribeJobDefinitions in paginated output. When this parameter is used, DescribeJobDefinitions only returns maxResults results in a single page and a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeJobDefinitions request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then DescribeJobDefinitions returns up to 100 results and a nextToken value if applicable.

", "DescribeJobQueuesRequest$maxResults": "

The maximum number of results returned by DescribeJobQueues in paginated output. When this parameter is used, DescribeJobQueues only returns maxResults results in a single page and a nextToken response element. The remaining results of the initial request can be seen by sending another DescribeJobQueues request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then DescribeJobQueues returns up to 100 results and a nextToken value if applicable.

", "EFSVolumeConfiguration$transitEncryptionPort": "

The port to use when sending encrypted data between the Amazon ECS host and the Amazon EFS server. If you don't specify a transit encryption port, it uses the port selection strategy that the Amazon EFS mount helper uses. The value must be between 0 and 65,535. For more information, see EFS Mount Helper in the Amazon Elastic File System User Guide.

", + "FairsharePolicy$shareDecaySeconds": "

The time period to use to calculate a fair share percentage for each fair share identifier in use, in seconds. A value of zero (0) indicates that only current usage should be measured; if there are four evenly weighted fair share identifiers then each can only use up to 25% of the available CPU resources, even if some of the fair share identifiers have no currently running jobs. The decay allows for more recently run jobs to have more weight than jobs that ran earlier. The maximum supported value is 604800 (1 week).

", + "FairsharePolicy$computeReservation": "

A value used to reserve some of the available maximum vCPU for fair share identifiers that have not yet been used.

The reserved ratio is (computeReservation/100)^ActiveFairShares where ActiveFairShares is the number of active fair share identifiers.

For example, a computeReservation value of 50 indicates that Batch should reserve 50% of the maximum available vCPU if there is only one fair share identifier, 25% if there are two fair share identifiers, and 12.5% if there are three fair share identifiers. A computeReservation value of 25 indicates that Batch should reserve 25% of the maximum available vCPU if there is only one fair share identifier, 6.25% if there are two fair share identifiers, and 1.56% if there are three fair share identifiers.

The minimum value is 0 and the maximum value is 99.

", "JobDefinition$revision": "

The revision of the job definition.

", + "JobDefinition$schedulingPriority": "

The scheduling priority of the job definition. This will only affect jobs in job queues with a fair share policy. Jobs with a higher scheduling priority will be scheduled before jobs with a lower scheduling priority.

", + "JobDetail$schedulingPriority": "

The scheduling policy of the job definition. This will only affect jobs in job queues with a fair share policy. Jobs with a higher scheduling priority will be scheduled before jobs with a lower scheduling priority.

", "JobQueueDetail$priority": "

The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order, for example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT); EC2 and Fargate compute environments can't be mixed.

", "JobTimeout$attemptDurationSeconds": "

The time duration in seconds (measured from the job attempt's startedAt timestamp) after which Batch terminates your jobs if they have not finished. The minimum value for the timeout is 60 seconds.

", "LinuxParameters$sharedMemorySize": "

The value for the size (in MiB) of the /dev/shm volume. This parameter maps to the --shm-size option to docker run.

This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.

", "LinuxParameters$maxSwap": "

The total amount of swap memory (in MiB) a container can use. This parameter is translated to the --memory-swap option to docker run where the value is the sum of the container memory plus the maxSwap value. For more information, see --memory-swap details in the Docker documentation.

If a maxSwap value of 0 is specified, the container doesn't use swap. Accepted values are 0 or any positive integer. If the maxSwap parameter is omitted, the container doesn't use the swap configuration for the container instance it is running on. A maxSwap value must be set for the swappiness parameter to be used.

This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.

", "LinuxParameters$swappiness": "

This allows you to tune a container's memory swappiness behavior. A swappiness value of 0 causes swapping not to happen unless absolutely necessary. A swappiness value of 100 causes pages to be swapped very aggressively. Accepted values are whole numbers between 0 and 100. If the swappiness parameter isn't specified, a default value of 60 is used. If a value isn't specified for maxSwap, then this parameter is ignored. If maxSwap is set to 0, the container doesn't use swap. This parameter maps to the --memory-swappiness option to docker run.

Consider the following when you use a per-container swap configuration.

This parameter isn't applicable to jobs that are running on Fargate resources and shouldn't be provided.

", "ListJobsRequest$maxResults": "

The maximum number of results returned by ListJobs in paginated output. When this parameter is used, ListJobs only returns maxResults results in a single page and a nextToken response element. The remaining results of the initial request can be seen by sending another ListJobs request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then ListJobs returns up to 100 results and a nextToken value if applicable.

", + "ListSchedulingPoliciesRequest$maxResults": "

The maximum number of results returned by ListSchedulingPolicies in paginated output. When this parameter is used, ListSchedulingPolicies only returns maxResults results in a single page and a nextToken response element. The remaining results of the initial request can be seen by sending another ListSchedulingPolicies request with the returned nextToken value. This value can be between 1 and 100. If this parameter isn't used, then ListSchedulingPolicies returns up to 100 results and a nextToken value if applicable.

", "NodeDetails$nodeIndex": "

The node index for the node. Node index numbering begins at zero. This index is also available on the node with the AWS_BATCH_JOB_NODE_INDEX environment variable.

", "NodeOverrides$numNodes": "

The number of nodes to use with a multi-node parallel job. This value overrides the number of nodes that are specified in the job definition. To use this override:

", "NodeProperties$numNodes": "

The number of nodes associated with a multi-node parallel job.

", "NodeProperties$mainNode": "

Specifies the node index for the main node of a multi-node parallel job. This node index value must be fewer than the number of nodes.

", "NodePropertiesSummary$numNodes": "

The number of nodes associated with a multi-node parallel job.

", "NodePropertiesSummary$nodeIndex": "

The node index for the node. Node index numbering begins at zero. This index is also available on the node with the AWS_BATCH_JOB_NODE_INDEX environment variable.

", + "RegisterJobDefinitionRequest$schedulingPriority": "

The scheduling priority for jobs that are submitted with this job definition. This will only affect jobs in job queues with a fair share policy. Jobs with a higher scheduling priority will be scheduled before jobs with a lower scheduling priority.

The minimum supported value is 0 and the maximum supported value is 9999.

", "RegisterJobDefinitionResponse$revision": "

The revision of the job definition.

", "RetryStrategy$attempts": "

The number of times to move a job to the RUNNABLE status. You can specify between 1 and 10 attempts. If the value of attempts is greater than one, the job is retried on failure the same number of attempts as the value.

", + "SubmitJobRequest$schedulingPriorityOverride": "

The scheduling priority for the job. This will only affect jobs in job queues with a fair share policy. Jobs with a higher scheduling priority will be scheduled before jobs with a lower scheduling priority. This will override any scheduling priority in the job definition.

The minimum supported value is 0 and the maximum supported value is 9999.

", "Tmpfs$size": "

The size (in MiB) of the tmpfs volume.

", "Ulimit$hardLimit": "

The hard limit for the ulimit type.

", "Ulimit$softLimit": "

The soft limit for the ulimit type.

", + "UpdateComputeEnvironmentRequest$unmanagedvCpus": "

The maximum number of vCPUs expected to be used for an unmanaged compute environment. This parameter should not be specified for a managed compute environment. This parameter is only used for fair share scheduling to reserve vCPU capacity for new share identifiers. If this parameter is not provided for a fair share job queue, no vCPU capacity will be reserved.

", "UpdateJobQueueRequest$priority": "

The priority of the job queue. Job queues with a higher priority (or a higher integer value for the priority parameter) are evaluated first when associated with the same compute environment. Priority is determined in descending order, for example, a job queue with a priority value of 10 is given scheduling preference over a job queue with a priority value of 1. All of the compute environments must be either EC2 (EC2 or SPOT) or Fargate (FARGATE or FARGATE_SPOT). EC2 and Fargate compute environments can't be mixed.

" } }, @@ -594,6 +653,16 @@ "refs": { } }, + "ListSchedulingPoliciesRequest": { + "base": null, + "refs": { + } + }, + "ListSchedulingPoliciesResponse": { + "base": null, + "refs": { + } + }, "ListTagsForResourceRequest": { "base": null, "refs": { @@ -787,6 +856,30 @@ "SubmitJobRequest$retryStrategy": "

The retry strategy to use for failed jobs from this SubmitJob operation. When a retry strategy is specified here, it overrides the retry strategy defined in the job definition.

" } }, + "SchedulingPolicyDetail": { + "base": "

An object representing a scheduling policy.

", + "refs": { + "SchedulingPolicyDetailList$member": null + } + }, + "SchedulingPolicyDetailList": { + "base": null, + "refs": { + "DescribeSchedulingPoliciesResponse$schedulingPolicies": "

The list of scheduling policies.

" + } + }, + "SchedulingPolicyListingDetail": { + "base": "

An object containing the details of a scheduling policy returned in a ListSchedulingPolicy action.

", + "refs": { + "SchedulingPolicyListingDetailList$member": null + } + }, + "SchedulingPolicyListingDetailList": { + "base": null, + "refs": { + "ListSchedulingPoliciesResponse$schedulingPolicies": "

A list of scheduling policies that match the request.

" + } + }, "Secret": { "base": "

An object representing the secret to expose to your container. Secrets can be exposed to a container in the following ways:

For more information, see Specifying sensitive data in the Batch User Guide.

", "refs": { @@ -806,6 +899,18 @@ "refs": { } }, + "ShareAttributes": { + "base": "

Specifies the weights for the fair share identifiers for the fair share policy. Fair share identifiers that are not included have a default weight of 1.0.

", + "refs": { + "ShareAttributesList$member": null + } + }, + "ShareAttributesList": { + "base": null, + "refs": { + "FairsharePolicy$shareDistribution": "

Array of SharedIdentifier objects that contain the weights for the fair share identifiers for the fair share policy. Fair share identifiers that are not included have a default weight of 1.0.

" + } + }, "String": { "base": null, "refs": { @@ -850,13 +955,18 @@ "CreateComputeEnvironmentResponse$computeEnvironmentName": "

The name of the compute environment. Up to 128 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

", "CreateComputeEnvironmentResponse$computeEnvironmentArn": "

The Amazon Resource Name (ARN) of the compute environment.

", "CreateJobQueueRequest$jobQueueName": "

The name of the job queue. Up to 128 letters (uppercase and lowercase), numbers, and underscores are allowed.

", + "CreateJobQueueRequest$schedulingPolicyArn": "

Amazon Resource Name (ARN) of the fair share scheduling policy. If this parameter is specified, the job queue will use a fair share scheduling policy. If this parameter is not specified, the job queue will use a first in, first out (FIFO) scheduling policy. Once a job queue is created, the fair share scheduling policy can be replaced but not removed. The format is aws:Partition:batch:Region:Account:scheduling-policy/Name . For example, aws:aws:batch:us-west-2:012345678910:scheduling-policy/MySchedulingPolicy.

", "CreateJobQueueResponse$jobQueueName": "

The name of the job queue.

", "CreateJobQueueResponse$jobQueueArn": "

The Amazon Resource Name (ARN) of the job queue.

", + "CreateSchedulingPolicyRequest$name": "

The name of the scheduling policy. Up to 128 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

", + "CreateSchedulingPolicyResponse$name": "

The name of the scheduling policy.

", + "CreateSchedulingPolicyResponse$arn": "

The Amazon Resource Name (ARN) of the scheduling policy. The format is aws:Partition:batch:Region:Account:scheduling-policy/Name . For example, aws:aws:batch:us-west-2:012345678910:scheduling-policy/MySchedulingPolicy.

", "DeleteComputeEnvironmentRequest$computeEnvironment": "

The name or Amazon Resource Name (ARN) of the compute environment to delete.

", "DeleteJobQueueRequest$jobQueue": "

The short name or full Amazon Resource Name (ARN) of the queue to delete.

", + "DeleteSchedulingPolicyRequest$arn": "

The Amazon Resource Name (ARN) of the scheduling policy to delete.

", "DeregisterJobDefinitionRequest$jobDefinition": "

The name and revision (name:revision) or full Amazon Resource Name (ARN) of the job definition to deregister.

", "DescribeComputeEnvironmentsRequest$nextToken": "

The nextToken value returned from a previous paginated DescribeComputeEnvironments request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

This token should be treated as an opaque identifier that's only used to retrieve the next items in a list and not for other programmatic purposes.

", - "DescribeComputeEnvironmentsResponse$nextToken": "

The nextToken value to include in a future DescribeComputeEnvironments request. When the results of a DescribeJobDefinitions request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

", + "DescribeComputeEnvironmentsResponse$nextToken": "

The nextToken value to include in a future DescribeComputeEnvironments request. When the results of a DescribeComputeEnvironments request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

", "DescribeJobDefinitionsRequest$jobDefinitionName": "

The name of the job definition to describe.

", "DescribeJobDefinitionsRequest$status": "

The status used to filter job definitions.

", "DescribeJobDefinitionsRequest$nextToken": "

The nextToken value returned from a previous paginated DescribeJobDefinitions request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

This token should be treated as an opaque identifier that's only used to retrieve the next items in a list and not for other programmatic purposes.

", @@ -868,24 +978,26 @@ "EFSAuthorizationConfig$accessPointId": "

The Amazon EFS access point ID to use. If an access point is specified, the root directory value specified in the EFSVolumeConfiguration must either be omitted or set to / which will enforce the path set on the EFS access point. If an access point is used, transit encryption must be enabled in the EFSVolumeConfiguration. For more information, see Working with Amazon EFS Access Points in the Amazon Elastic File System User Guide.

", "EFSVolumeConfiguration$fileSystemId": "

The Amazon EFS file system ID to use.

", "EFSVolumeConfiguration$rootDirectory": "

The directory within the Amazon EFS file system to mount as the root directory inside the host. If this parameter is omitted, the root of the Amazon EFS volume is used instead. Specifying / has the same effect as omitting this parameter. The maximum length is 4,096 characters.

If an EFS access point is specified in the authorizationConfig, the root directory parameter must either be omitted or set to /, which enforces the path set on the Amazon EFS access point.

", - "EvaluateOnExit$onStatusReason": "

Contains a glob pattern to match against the StatusReason returned for a job. The pattern can be up to 512 characters in length. It can contain letters, numbers, periods (.), colons (:), and white space (including spaces or tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

", - "EvaluateOnExit$onReason": "

Contains a glob pattern to match against the Reason returned for a job. The pattern can be up to 512 characters in length. It can contain letters, numbers, periods (.), colons (:), and white space (including spaces and tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

", - "EvaluateOnExit$onExitCode": "

Contains a glob pattern to match against the decimal representation of the ExitCode returned for a job. The pattern can be up to 512 characters in length. It can contain only numbers, and can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

", + "EvaluateOnExit$onStatusReason": "

Contains a glob pattern to match against the StatusReason returned for a job. The pattern can be up to 512 characters in length. It can contain letters, numbers, periods (.), colons (:), and white space (including spaces or tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

The string can be between 1 and 512 characters in length.

", + "EvaluateOnExit$onReason": "

Contains a glob pattern to match against the Reason returned for a job. The pattern can be up to 512 characters in length. It can contain letters, numbers, periods (.), colons (:), and white space (including spaces and tabs). It can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

The string can be between 1 and 512 characters in length.

", + "EvaluateOnExit$onExitCode": "

Contains a glob pattern to match against the decimal representation of the ExitCode returned for a job. The pattern can be up to 512 characters in length. It can contain only numbers, and can optionally end with an asterisk (*) so that only the start of the string needs to be an exact match.

The string can be between 1 and 512 characters in length.

", "FargatePlatformConfiguration$platformVersion": "

The Fargate platform version where the jobs are running. A platform version is specified only for jobs that are running on Fargate resources. If one isn't specified, the LATEST platform version is used by default. This uses a recent, approved version of the Fargate platform for compute resources. For more information, see Fargate platform versions in the Amazon Elastic Container Service Developer Guide.

", "Host$sourcePath": "

The path on the host container instance that's presented to the container. If this parameter is empty, then the Docker daemon has assigned a host path for you. If this parameter contains a file location, then the data volume persists at the specified location on the host container instance until you delete it manually. If the source path location doesn't exist on the host container instance, the Docker daemon creates it. If the location does exist, the contents of the source path folder are exported.

This parameter isn't applicable to jobs that run on Fargate resources and shouldn't be provided.

", "JobDefinition$jobDefinitionName": "

The name of the job definition.

", "JobDefinition$jobDefinitionArn": "

The Amazon Resource Name (ARN) for the job definition.

", "JobDefinition$status": "

The status of the job definition.

", - "JobDefinition$type": "

The type of job definition. If the job is run on Fargate resources, then multinode isn't supported. For more information about multi-node parallel jobs, see Creating a multi-node parallel job definition in the Batch User Guide.

", + "JobDefinition$type": "

The type of job definition, either container or multinode. If the job is run on Fargate resources, then multinode isn't supported. For more information about multi-node parallel jobs, see Creating a multi-node parallel job definition in the Batch User Guide.

", "JobDependency$jobId": "

The job ID of the Batch job associated with this dependency.

", "JobDetail$jobArn": "

The Amazon Resource Name (ARN) of the job.

", "JobDetail$jobName": "

The name of the job.

", "JobDetail$jobId": "

The ID for the job.

", "JobDetail$jobQueue": "

The Amazon Resource Name (ARN) of the job queue that the job is associated with.

", + "JobDetail$shareIdentifier": "

The share identifier for the job.

", "JobDetail$statusReason": "

A short, human-readable string to provide additional details about the current status of the job.

", "JobDetail$jobDefinition": "

The job definition that's used by this job.

", "JobQueueDetail$jobQueueName": "

The name of the job queue.

", "JobQueueDetail$jobQueueArn": "

The Amazon Resource Name (ARN) of the job queue.

", + "JobQueueDetail$schedulingPolicyArn": "

Amazon Resource Name (ARN) of the scheduling policy. The format is aws:Partition:batch:Region:Account:scheduling-policy/Name . For example, aws:aws:batch:us-west-2:012345678910:scheduling-policy/MySchedulingPolicy.

", "JobQueueDetail$statusReason": "

A short, human-readable string to provide additional details about the current status of the job queue.

", "JobSummary$jobArn": "

The Amazon Resource Name (ARN) of the job.

", "JobSummary$jobId": "

The ID of the job.

", @@ -903,7 +1015,9 @@ "ListJobsRequest$multiNodeJobId": "

The job ID for a multi-node parallel job. Specifying a multi-node parallel job ID with this parameter lists all nodes that are associated with the specified job.

", "ListJobsRequest$nextToken": "

The nextToken value returned from a previous paginated ListJobs request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

This token should be treated as an opaque identifier that's only used to retrieve the next items in a list and not for other programmatic purposes.

", "ListJobsResponse$nextToken": "

The nextToken value to include in a future ListJobs request. When the results of a ListJobs request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

", - "ListTagsForResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) that identifies the resource that tags are listed for. Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", + "ListSchedulingPoliciesRequest$nextToken": "

The nextToken value returned from a previous paginated ListSchedulingPolicies request where maxResults was used and the results exceeded the value of that parameter. Pagination continues from the end of the previous results that returned the nextToken value. This value is null when there are no more results to return.

This token should be treated as an opaque identifier that's only used to retrieve the next items in a list and not for other programmatic purposes.

", + "ListSchedulingPoliciesResponse$nextToken": "

The nextToken value to include in a future ListSchedulingPolicies request. When the results of a ListSchedulingPolicies request exceed maxResults, this value can be used to retrieve the next page of results. This value is null when there are no more results to return.

", + "ListTagsForResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) that identifies the resource that tags are listed for. Batch resources that support tags are compute environments, jobs, job definitions, job queues, and scheduling policies. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", "LogConfigurationOptionsMap$key": null, "LogConfigurationOptionsMap$value": null, "MountPoint$containerPath": "

The path on the container where the host volume is mounted.

", @@ -918,32 +1032,39 @@ "RegisterJobDefinitionRequest$jobDefinitionName": "

The name of the job definition to register. Up to 128 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

", "RegisterJobDefinitionResponse$jobDefinitionName": "

The name of the job definition.

", "RegisterJobDefinitionResponse$jobDefinitionArn": "

The Amazon Resource Name (ARN) of the job definition.

", - "ResourceRequirement$value": "

The quantity of the specified resource to reserve for the container. The values vary based on the type specified.

type=\"GPU\"

The number of physical GPUs to reserve for the container. The number of GPUs reserved for all containers in a job shouldn't exceed the number of available GPUs on the compute resource that the job is launched on.

GPUs are not available for jobs that are running on Fargate resources.

type=\"MEMORY\"

The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are running on EC2 resources. If your container attempts to exceed the memory specified, the container is terminated. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory Management in the Batch User Guide.

For jobs that are running on Fargate resources, then value is the hard limit (in MiB), and must match one of the supported values and the VCPU values must be one of the values supported for that memory value.

value = 512

VCPU = 0.25

value = 1024

VCPU = 0.25 or 0.5

value = 2048

VCPU = 0.25, 0.5, or 1

value = 3072

VCPU = 0.5, or 1

value = 4096

VCPU = 0.5, 1, or 2

value = 5120, 6144, or 7168

VCPU = 1 or 2

value = 8192

VCPU = 1, 2, or 4

value = 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384

VCPU = 2 or 4

value = 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720

VCPU = 4

type=\"VCPU\"

The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be specified for each node at least once.

For jobs that are running on Fargate resources, then value must match one of the supported values and the MEMORY values must be one of the values supported for that VCPU value. The supported values are 0.25, 0.5, 1, 2, and 4

value = 0.25

MEMORY = 512, 1024, or 2048

value = 0.5

MEMORY = 1024, 2048, 3072, or 4096

value = 1

MEMORY = 2048, 3072, 4096, 5120, 6144, 7168, or 8192

value = 2

MEMORY = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384

value = 4

MEMORY = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720

", + "ResourceRequirement$value": "

The quantity of the specified resource to reserve for the container. The values vary based on the type specified.

type=\"GPU\"

The number of physical GPUs to reserve for the container. The number of GPUs reserved for all containers in a job shouldn't exceed the number of available GPUs on the compute resource that the job is launched on.

GPUs are not available for jobs that are running on Fargate resources.

type=\"MEMORY\"

The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are running on EC2 resources. If your container attempts to exceed the memory specified, the container is terminated. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run. You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to Memory in the Create a container section of the Docker Remote API and the --memory option to docker run.

If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for a particular instance type, see Memory Management in the Batch User Guide.

For jobs that are running on Fargate resources, then value is the hard limit (in MiB), and must match one of the supported values and the VCPU values must be one of the values supported for that memory value.

value = 512

VCPU = 0.25

value = 1024

VCPU = 0.25 or 0.5

value = 2048

VCPU = 0.25, 0.5, or 1

value = 3072

VCPU = 0.5, or 1

value = 4096

VCPU = 0.5, 1, or 2

value = 5120, 6144, or 7168

VCPU = 1 or 2

value = 8192

VCPU = 1, 2, or 4

value = 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384

VCPU = 2 or 4

value = 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720

VCPU = 4

type=\"VCPU\"

The number of vCPUs reserved for the container. This parameter maps to CpuShares in the Create a container section of the Docker Remote API and the --cpu-shares option to docker run. Each vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be specified for each node at least once.

For jobs that are running on Fargate resources, then value must match one of the supported values and the MEMORY values must be one of the values supported for that VCPU value. The supported values are 0.25, 0.5, 1, 2, and 4

value = 0.25

MEMORY = 512, 1024, or 2048

value = 0.5

MEMORY = 1024, 2048, 3072, or 4096

value = 1

MEMORY = 2048, 3072, 4096, 5120, 6144, 7168, or 8192

value = 2

MEMORY = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384

value = 4

MEMORY = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720

", + "SchedulingPolicyDetail$name": "

The name of the scheduling policy.

", + "SchedulingPolicyDetail$arn": "

Amazon Resource Name (ARN) of the scheduling policy. An example would be arn:aws:batch:us-east-1:123456789012:scheduling-policy/HighPriority

", + "SchedulingPolicyListingDetail$arn": "

Amazon Resource Name (ARN) of the scheduling policy.

", "Secret$name": "

The name of the secret.

", "Secret$valueFrom": "

The secret to expose to the container. The supported values are either the full ARN of the Secrets Manager secret or the full ARN of the parameter in the Amazon Web Services Systems Manager Parameter Store.

If the Amazon Web Services Systems Manager Parameter Store parameter exists in the same Region as the job you're launching, then you can use either the full ARN or name of the parameter. If the parameter exists in a different Region, then the full ARN must be specified.

", "ServerException$message": null, + "ShareAttributes$shareIdentifier": "

A fair share identifier or fair share identifier prefix. If the string ends with '*' then this entry specifies the weight factor to use for fair share identifiers that begin with that prefix. The list of fair share identifiers in a fair share policy cannot overlap. For example you cannot have one that specifies a shareIdentifier of UserA* and another that specifies a shareIdentifier of UserA-1.

There can be no more than 500 fair share identifiers active in a job queue.

The string is limited to 255 alphanumeric characters, optionally followed by '*'.

", "StringList$member": null, "SubmitJobRequest$jobName": "

The name of the job. The first character must be alphanumeric, and up to 128 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

", "SubmitJobRequest$jobQueue": "

The job queue where the job is submitted. You can specify either the name or the Amazon Resource Name (ARN) of the queue.

", + "SubmitJobRequest$shareIdentifier": "

The share identifier for the job.

", "SubmitJobRequest$jobDefinition": "

The job definition used by this job. This value can be one of name, name:revision, or the Amazon Resource Name (ARN) for the job definition. If name is specified without a revision then the latest active revision is used.

", "SubmitJobResponse$jobArn": "

The Amazon Resource Name (ARN) for the job.

", "SubmitJobResponse$jobName": "

The name of the job.

", "SubmitJobResponse$jobId": "

The unique identifier for the job.

", - "TagResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) of the resource that tags are added to. Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", + "TagResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) of the resource that tags are added to. Batch resources that support tags are compute environments, jobs, job definitions, job queues, and scheduling policies. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", "TagsMap$key": null, "TagsMap$value": null, "TerminateJobRequest$jobId": "

The Batch job ID of the job to terminate.

", "TerminateJobRequest$reason": "

A message to attach to the job that explains the reason for canceling it. This message is returned by future DescribeJobs operations on the job. This message is also recorded in the Batch activity logs.

", "Tmpfs$containerPath": "

The absolute file path in the container where the tmpfs volume is mounted.

", "Ulimit$name": "

The type of the ulimit.

", - "UntagResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) of the resource from which to delete tags. Batch resources that support tags are compute environments, jobs, job definitions, and job queues. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", + "UntagResourceRequest$resourceArn": "

The Amazon Resource Name (ARN) of the resource from which to delete tags. Batch resources that support tags are compute environments, jobs, job definitions, job queues, and scheduling policies. ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported.

", "UpdateComputeEnvironmentRequest$computeEnvironment": "

The name or full Amazon Resource Name (ARN) of the compute environment to update.

", "UpdateComputeEnvironmentRequest$serviceRole": "

The full Amazon Resource Name (ARN) of the IAM role that allows Batch to make calls to other Amazon Web Services services on your behalf. For more information, see Batch service IAM role in the Batch User Guide.

If the compute environment has a service-linked role, it can't be changed to use a regular IAM role. Likewise, if the compute environment has a regular IAM role, it can't be changed to use a service-linked role.

If your specified role has a path other than /, then you must either specify the full role ARN (this is recommended) or prefix the role name with the path.

Depending on how you created your Batch service role, its ARN might contain the service-role path prefix. When you only specify the name of the service role, Batch assumes that your ARN doesn't use the service-role path prefix. Because of this, we recommend that you specify the full ARN of your service role when you create compute environments.

", "UpdateComputeEnvironmentResponse$computeEnvironmentName": "

The name of the compute environment. Up to 128 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed.

", "UpdateComputeEnvironmentResponse$computeEnvironmentArn": "

The Amazon Resource Name (ARN) of the compute environment.

", "UpdateJobQueueRequest$jobQueue": "

The name or the Amazon Resource Name (ARN) of the job queue.

", + "UpdateJobQueueRequest$schedulingPolicyArn": "

Amazon Resource Name (ARN) of the fair share scheduling policy. Once a job queue is created, the fair share scheduling policy can be replaced but not removed. The format is aws:Partition:batch:Region:Account:scheduling-policy/Name . For example, aws:aws:batch:us-west-2:012345678910:scheduling-policy/MySchedulingPolicy.

", "UpdateJobQueueResponse$jobQueueName": "

The name of the job queue.

", "UpdateJobQueueResponse$jobQueueArn": "

The Amazon Resource Name (ARN) of the job queue.

", + "UpdateSchedulingPolicyRequest$arn": "

The Amazon Resource Name (ARN) of the scheduling policy to update.

", "Volume$name": "

The name of the volume. Up to 255 letters (uppercase and lowercase), numbers, hyphens, and underscores are allowed. This name is referenced in the sourceVolume parameter of container definition mountPoints.

" } }, @@ -962,6 +1083,7 @@ "DescribeJobDefinitionsRequest$jobDefinitions": "

A list of up to 100 job definitions. Each entry in the list can either be an ARN of the form arn:aws:batch:${Region}:${Account}:job-definition/${JobDefinitionName}:${Revision} or a short version using the form ${JobDefinitionName}:${Revision}.

", "DescribeJobQueuesRequest$jobQueues": "

A list of up to 100 queue names or full queue Amazon Resource Name (ARN) entries.

", "DescribeJobsRequest$jobs": "

A list of up to 100 job IDs.

", + "DescribeSchedulingPoliciesRequest$arns": "

A list of up to 100 scheduling policy Amazon Resource Name (ARN) entries.

", "KeyValuesPair$values": "

The filter values.

", "Tmpfs$mountOptions": "

The list of tmpfs volume mount options.

Valid values: \"defaults\" | \"ro\" | \"rw\" | \"suid\" | \"nosuid\" | \"dev\" | \"nodev\" | \"exec\" | \"noexec\" | \"sync\" | \"async\" | \"dirsync\" | \"remount\" | \"mand\" | \"nomand\" | \"atime\" | \"noatime\" | \"diratime\" | \"nodiratime\" | \"bind\" | \"rbind\" | \"unbindable\" | \"runbindable\" | \"private\" | \"rprivate\" | \"shared\" | \"rshared\" | \"slave\" | \"rslave\" | \"relatime\" | \"norelatime\" | \"strictatime\" | \"nostrictatime\" | \"mode\" | \"uid\" | \"gid\" | \"nr_inodes\" | \"nr_blocks\" | \"mpol\"

" } @@ -1011,11 +1133,13 @@ "ComputeEnvironmentDetail$tags": "

The tags applied to the compute environment.

", "CreateComputeEnvironmentRequest$tags": "

The tags that you apply to the compute environment to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging Amazon Web Services Resources in Amazon Web Services General Reference.

These tags can be updated or removed using the TagResource and UntagResource API operations. These tags don't propagate to the underlying compute resources.

", "CreateJobQueueRequest$tags": "

The tags that you apply to the job queue to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging your Batch resources in Batch User Guide.

", + "CreateSchedulingPolicyRequest$tags": "

The tags that you apply to the scheduling policy to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging Amazon Web Services Resources in Amazon Web Services General Reference.

These tags can be updated or removed using the TagResource and UntagResource API operations.

", "JobDefinition$tags": "

The tags applied to the job definition.

", "JobDetail$tags": "

The tags applied to the job.

", "JobQueueDetail$tags": "

The tags applied to the job queue. For more information, see Tagging your Batch resources in Batch User Guide.

", "ListTagsForResourceResponse$tags": "

The tags for the resource.

", "RegisterJobDefinitionRequest$tags": "

The tags that you apply to the job definition to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging Amazon Web Services Resources in Batch User Guide.

", + "SchedulingPolicyDetail$tags": "

The tags that you apply to the scheduling policy to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging Amazon Web Services Resources in Amazon Web Services General Reference.

", "SubmitJobRequest$tags": "

The tags that you apply to the job request to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging Amazon Web Services Resources in Amazon Web Services General Reference.

", "TagResourceRequest$tags": "

The tags that you apply to the resource to help you categorize and organize your resources. Each tag consists of a key and an optional value. For more information, see Tagging Amazon Web Services Resources in Amazon Web Services General Reference.

" } @@ -1023,7 +1147,7 @@ "TagsMap": { "base": null, "refs": { - "ComputeResource$tags": "

Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For Batch, these take the form of \"String1\": \"String2\", where String1 is the tag key and String2 is the tag value−for example, { \"Name\": \"Batch Instance - C4OnDemand\" }. This is helpful for recognizing your Batch instances in the Amazon EC2 console. These tags can't be updated or removed after the compute environment is created.Aany changes to these tags require that you create a new compute environment and remove the old compute environment. These tags aren't seen when using the Batch ListTagsForResource API operation.

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

" + "ComputeResource$tags": "

Key-value pair tags to be applied to EC2 resources that are launched in the compute environment. For Batch, these take the form of \"String1\": \"String2\", where String1 is the tag key and String2 is the tag value−for example, { \"Name\": \"Batch Instance - C4OnDemand\" }. This is helpful for recognizing your Batch instances in the Amazon EC2 console. These tags can't be updated or removed after the compute environment is created. Any changes to these tags require that you create a new compute environment and remove the old compute environment. These tags aren't seen when using the Batch ListTagsForResource API operation.

This parameter isn't applicable to jobs that are running on Fargate resources, and shouldn't be specified.

" } }, "TerminateJobRequest": { @@ -1091,6 +1215,16 @@ "refs": { } }, + "UpdateSchedulingPolicyRequest": { + "base": null, + "refs": { + } + }, + "UpdateSchedulingPolicyResponse": { + "base": null, + "refs": { + } + }, "Volume": { "base": "

A data volume used in a job's container properties.

", "refs": { diff --git a/models/apis/batch/2016-08-10/examples-1.json b/models/apis/batch/2016-08-10/examples-1.json index 87bc683b6fa..18203dc8869 100644 --- a/models/apis/batch/2016-08-10/examples-1.json +++ b/models/apis/batch/2016-08-10/examples-1.json @@ -298,14 +298,22 @@ ], "image": "busybox", - "memory": 128, "mountPoints": [ + ], + "resourceRequirements": [ + { + "type": "MEMORY", + "value": "128" + }, + { + "type": "VCPU", + "value": "1" + } ], "ulimits": [ ], - "vcpus": 1, "volumes": [ ] @@ -504,8 +512,16 @@ "10" ], "image": "busybox", - "memory": 128, - "vcpus": 1 + "resourceRequirements": [ + { + "type": "MEMORY", + "value": "128" + }, + { + "type": "VCPU", + "value": "1" + } + ] }, "jobDefinitionName": "sleep10" }, @@ -533,8 +549,16 @@ "30" ], "image": "busybox", - "memory": 128, - "vcpus": 1 + "resourceRequirements": [ + { + "type": "MEMORY", + "value": "128" + }, + { + "type": "VCPU", + "value": "1" + } + ] }, "jobDefinitionName": "sleep30", "tags": { diff --git a/models/apis/batch/2016-08-10/paginators-1.json b/models/apis/batch/2016-08-10/paginators-1.json index e17851bd75d..4ce40a874f5 100644 --- a/models/apis/batch/2016-08-10/paginators-1.json +++ b/models/apis/batch/2016-08-10/paginators-1.json @@ -23,6 +23,12 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "jobSummaryList" + }, + "ListSchedulingPolicies": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "schedulingPolicies" } } } diff --git a/models/apis/greengrassv2/2020-11-30/api-2.json b/models/apis/greengrassv2/2020-11-30/api-2.json index 15ae946b475..db5b6015dfc 100644 --- a/models/apis/greengrassv2/2020-11-30/api-2.json +++ b/models/apis/greengrassv2/2020-11-30/api-2.json @@ -628,7 +628,8 @@ "type":"structure", "members":{ "posixUser":{"shape":"NonEmptyString"}, - "systemResourceLimits":{"shape":"SystemResourceLimits"} + "systemResourceLimits":{"shape":"SystemResourceLimits"}, + "windowsUser":{"shape":"NonEmptyString"} } }, "ComponentVersionARN":{ diff --git a/models/apis/greengrassv2/2020-11-30/docs-2.json b/models/apis/greengrassv2/2020-11-30/docs-2.json index 2d8f0103569..8ff61d61534 100644 --- a/models/apis/greengrassv2/2020-11-30/docs-2.json +++ b/models/apis/greengrassv2/2020-11-30/docs-2.json @@ -5,7 +5,7 @@ "BatchAssociateClientDeviceWithCoreDevice": "

Associate a list of client devices with a core device. Use this API operation to specify which client devices can discover a core device through cloud discovery. With cloud discovery, client devices connect to IoT Greengrass to retrieve associated core devices' connectivity information and certificates. For more information, see Configure cloud discovery in the IoT Greengrass V2 Developer Guide.

Client devices are local IoT devices that connect to and communicate with an IoT Greengrass core device over MQTT. You can connect client devices to a core device to sync MQTT messages and data to Amazon Web Services IoT Core and interact with client devices in Greengrass components. For more information, see Interact with local IoT devices in the IoT Greengrass V2 Developer Guide.

", "BatchDisassociateClientDeviceFromCoreDevice": "

Disassociate a list of client devices from a core device. After you disassociate a client device from a core device, the client device won't be able to use cloud discovery to retrieve the core device's connectivity information and certificates.

", "CancelDeployment": "

Cancels a deployment. This operation cancels the deployment for devices that haven't yet received it. If a device already received the deployment, this operation doesn't change anything for that device.

", - "CreateComponentVersion": "

Creates a component. Components are software that run on Greengrass core devices. After you develop and test a component on your core device, you can use this operation to upload your component to IoT Greengrass. Then, you can deploy the component to other core devices.

You can use this operation to do the following:

", + "CreateComponentVersion": "

Creates a component. Components are software that run on Greengrass core devices. After you develop and test a component on your core device, you can use this operation to upload your component to IoT Greengrass. Then, you can deploy the component to other core devices.

You can use this operation to do the following:

", "CreateDeployment": "

Creates a continuous deployment for a target, which is a Greengrass core device or group of core devices. When you add a new core device to a group of core devices that has a deployment, IoT Greengrass deploys that group's deployment to the new device.

You can define one deployment for each target. When you create a new deployment for a target that has an existing deployment, you replace the previous deployment. IoT Greengrass applies the new deployment to the target devices.

Every deployment has a revision number that indicates how many deployment revisions you define for a target. Use this operation to create a new revision of an existing deployment. This operation returns the revision number of the new deployment when you create it.

For more information, see the Create deployments in the IoT Greengrass V2 Developer Guide.

", "DeleteComponent": "

Deletes a version of a component from IoT Greengrass.

This operation deletes the component's recipe and artifacts. As a result, deployments that refer to this component version will fail. If you have deployments that use this component version, you can remove the component from the deployment or update the deployment to use a valid version.

", "DeleteCoreDevice": "

Deletes a Greengrass core device, which is an IoT thing. This operation removes the core device from the list of core devices. This operation doesn't delete the IoT thing. For more information about how to delete the IoT thing, see DeleteThing in the IoT API Reference.

", @@ -986,7 +986,8 @@ "ComponentLatestVersion$description": "

The description of the component version.

", "ComponentLatestVersion$publisher": "

The publisher of the component version.

", "ComponentPlatform$name": "

The friendly name of the platform. This name helps you identify the platform.

If you omit this parameter, IoT Greengrass creates a friendly name from the os and architecture of the platform.

", - "ComponentRunWith$posixUser": "

The POSIX system user and (optional) group to use to run this component. Specify the user and group separated by a colon (:) in the following format: user:group. The group is optional. If you don't specify a group, the IoT Greengrass Core software uses the primary user for the group.

If you omit this parameter, the IoT Greengrass Core software uses the default system user and group that you configure on the Greengrass nucleus component. For more information, see Configure the user and group that run components.

", + "ComponentRunWith$posixUser": "

The POSIX system user and, optionally, group to use to run this component on Linux core devices. The user, and group if specified, must exist on each Linux core device. Specify the user and group separated by a colon (:) in the following format: user:group. The group is optional. If you don't specify a group, the IoT Greengrass Core software uses the primary user for the group.

If you omit this parameter, the IoT Greengrass Core software uses the default system user and group that you configure on the Greengrass nucleus component. For more information, see Configure the user and group that run components.

", + "ComponentRunWith$windowsUser": "

The Windows user to use to run this component on Windows core devices. The user must exist on each Windows core device, and its name and password must be in the LocalSystem account's Credentials Manager instance.

If you omit this parameter, the IoT Greengrass Core software uses the default Windows user that you configure on the Greengrass nucleus component. For more information, see Configure the user and group that run components.

", "ComponentVersionListItem$arn": "

The ARN of the component version.

", "ComponentVersionRequirementMap$key": null, "ComponentVersionRequirementMap$value": null, @@ -1151,7 +1152,7 @@ "SystemResourceLimits": { "base": "

Contains information about system resource limits that the IoT Greengrass Core software applies to a component's processes. For more information, see Configure system resource limits for components.

", "refs": { - "ComponentRunWith$systemResourceLimits": "

The system resource limits to apply to this component's process on the core device.

If you omit this parameter, the IoT Greengrass Core software uses the default system resource limits that you configure on the Greengrass nucleus component. For more information, see Configure system resource limits for components.

" + "ComponentRunWith$systemResourceLimits": "

The system resource limits to apply to this component's process on the core device. IoT Greengrass currently supports this feature on only Linux core devices.

If you omit this parameter, the IoT Greengrass Core software uses the default system resource limits that you configure on the Greengrass nucleus component. For more information, see Configure system resource limits for components.

" } }, "TagKey": { diff --git a/models/apis/health/2016-08-04/docs-2.json b/models/apis/health/2016-08-04/docs-2.json index 470855225dc..31c42fcd7dd 100644 --- a/models/apis/health/2016-08-04/docs-2.json +++ b/models/apis/health/2016-08-04/docs-2.json @@ -1,20 +1,20 @@ { "version": "2.0", - "service": "AWS Health

The AWS Health API provides programmatic access to the AWS Health information that appears in the AWS Personal Health Dashboard. You can use the API operations to get information about AWS Health events that affect your AWS services and resources.

For authentication of requests, AWS Health uses the Signature Version 4 Signing Process.

If your AWS account is part of AWS Organizations, you can use the AWS Health organizational view feature. This feature provides a centralized view of AWS Health events across all accounts in your organization. You can aggregate AWS Health events in real time to identify accounts in your organization that are affected by an operational event or get notified of security vulnerabilities. Use the organizational view API operations to enable this feature and return event information. For more information, see Aggregating AWS Health events in the AWS Health User Guide.

When you use the AWS Health API operations to return AWS Health events, see the following recommendations:

", + "service": "Health

The Health API provides programmatic access to the Health information that appears in the Personal Health Dashboard. You can use the API operations to get information about events that might affect your Amazon Web Services services and resources.

For authentication of requests, Health uses the Signature Version 4 Signing Process.

If your Amazon Web Services account is part of Organizations, you can use the Health organizational view feature. This feature provides a centralized view of Health events across all accounts in your organization. You can aggregate Health events in real time to identify accounts in your organization that are affected by an operational event or get notified of security vulnerabilities. Use the organizational view API operations to enable this feature and return event information. For more information, see Aggregating Health events in the Health User Guide.

When you use the Health API operations to return Health events, see the following recommendations:

", "operations": { - "DescribeAffectedAccountsForOrganization": "

Returns a list of accounts in the organization from AWS Organizations that are affected by the provided event. For more information about the different types of AWS Health events, see Event.

Before you can call this operation, you must first enable AWS Health to work with AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's management account.

This API operation uses pagination. Specify the nextToken parameter in the next request to return more results.

", - "DescribeAffectedEntities": "

Returns a list of entities that have been affected by the specified events, based on the specified filter criteria. Entities can refer to individual customer resources, groups of customer resources, or any other construct, depending on the AWS service. Events that have impact beyond that of the affected entities, or where the extent of impact is unknown, include at least one entity indicating this.

At least one event ARN is required. Results are sorted by the lastUpdatedTime of the entity, starting with the most recent.

", - "DescribeAffectedEntitiesForOrganization": "

Returns a list of entities that have been affected by one or more events for one or more accounts in your organization in AWS Organizations, based on the filter criteria. Entities can refer to individual customer resources, groups of customer resources, or any other construct, depending on the AWS service.

At least one event Amazon Resource Name (ARN) and account ID are required. Results are sorted by the lastUpdatedTime of the entity, starting with the most recent.

Before you can call this operation, you must first enable AWS Health to work with AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's management account.

", - "DescribeEntityAggregates": "

Returns the number of entities that are affected by each of the specified events. If no events are specified, the counts of all affected entities are returned.

", + "DescribeAffectedAccountsForOrganization": "

Returns a list of accounts in the organization from Organizations that are affected by the provided event. For more information about the different types of Health events, see Event.

Before you can call this operation, you must first enable Health to work with Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's management account.

This API operation uses pagination. Specify the nextToken parameter in the next request to return more results.

", + "DescribeAffectedEntities": "

Returns a list of entities that have been affected by the specified events, based on the specified filter criteria. Entities can refer to individual customer resources, groups of customer resources, or any other construct, depending on the Amazon Web Services service. Events that have impact beyond that of the affected entities, or where the extent of impact is unknown, include at least one entity indicating this.

At least one event ARN is required.

", + "DescribeAffectedEntitiesForOrganization": "

Returns a list of entities that have been affected by one or more events for one or more accounts in your organization in Organizations, based on the filter criteria. Entities can refer to individual customer resources, groups of customer resources, or any other construct, depending on the Amazon Web Services service.

At least one event Amazon Resource Name (ARN) and account ID are required.

Before you can call this operation, you must first enable Health to work with Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's management account.

", + "DescribeEntityAggregates": "

Returns the number of entities that are affected by each of the specified events.

", "DescribeEventAggregates": "

Returns the number of events of each event type (issue, scheduled change, and account notification). If no filter is specified, the counts of all events in each category are returned.

This API operation uses pagination. Specify the nextToken parameter in the next request to return more results.

", - "DescribeEventDetails": "

Returns detailed information about one or more specified events. Information includes standard event data (AWS Region, service, and so on, as returned by DescribeEvents), a detailed event description, and possible additional metadata that depends upon the nature of the event. Affected entities are not included. To retrieve the entities, use the DescribeAffectedEntities operation.

If a specified event can't be retrieved, an error message is returned for that event.

This operation supports resource-level permissions. You can use this operation to allow or deny access to specific AWS Health events. For more information, see Resource- and action-based conditions in the AWS Health User Guide.

", - "DescribeEventDetailsForOrganization": "

Returns detailed information about one or more specified events for one or more AWS accounts in your organization. This information includes standard event data (such as the AWS Region and service), an event description, and (depending on the event) possible metadata. This operation doesn't return affected entities, such as the resources related to the event. To return affected entities, use the DescribeAffectedEntitiesForOrganization operation.

Before you can call this operation, you must first enable AWS Health to work with AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's management account.

When you call the DescribeEventDetailsForOrganization operation, specify the organizationEventDetailFilters object in the request. Depending on the AWS Health event type, note the following differences:

For more information, see Event.

This operation doesn't support resource-level permissions. You can't use this operation to allow or deny access to specific AWS Health events. For more information, see Resource- and action-based conditions in the AWS Health User Guide.

", - "DescribeEventTypes": "

Returns the event types that meet the specified filter criteria. You can use this API operation to find information about the AWS Health event, such as the category, AWS service, and event code. The metadata for each event appears in the EventType object.

If you don't specify a filter criteria, the API operation returns all event types, in no particular order.

This API operation uses pagination. Specify the nextToken parameter in the next request to return more results.

", - "DescribeEvents": "

Returns information about events that meet the specified filter criteria. Events are returned in a summary form and do not include the detailed description, any additional metadata that depends on the event type, or any affected resources. To retrieve that information, use the DescribeEventDetails and DescribeAffectedEntities operations.

If no filter criteria are specified, all events are returned. Results are sorted by lastModifiedTime, starting with the most recent event.

", - "DescribeEventsForOrganization": "

Returns information about events across your organization in AWS Organizations. You can use thefilters parameter to specify the events that you want to return. Events are returned in a summary form and don't include the affected accounts, detailed description, any additional metadata that depends on the event type, or any affected resources. To retrieve that information, use the following operations:

If you don't specify a filter, the DescribeEventsForOrganizations returns all events across your organization. Results are sorted by lastModifiedTime, starting with the most recent event.

For more information about the different types of AWS Health events, see Event.

Before you can call this operation, you must first enable AWS Health to work with AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's management account.

This API operation uses pagination. Specify the nextToken parameter in the next request to return more results.

", - "DescribeHealthServiceStatusForOrganization": "

This operation provides status information on enabling or disabling AWS Health to work with your organization. To call this operation, you must sign in as an IAM user, assume an IAM role, or sign in as the root user (not recommended) in the organization's management account.

", - "DisableHealthServiceAccessForOrganization": "

Disables AWS Health from working with AWS Organizations. To call this operation, you must sign in as an AWS Identity and Access Management (IAM) user, assume an IAM role, or sign in as the root user (not recommended) in the organization's management account. For more information, see Aggregating AWS Health events in the AWS Health User Guide.

This operation doesn't remove the service-linked role from the management account in your organization. You must use the IAM console, API, or AWS Command Line Interface (AWS CLI) to remove the service-linked role. For more information, see Deleting a Service-Linked Role in the IAM User Guide.

You can also disable the organizational feature by using the Organizations DisableAWSServiceAccess API operation. After you call this operation, AWS Health stops aggregating events for all other AWS accounts in your organization. If you call the AWS Health API operations for organizational view, AWS Health returns an error. AWS Health continues to aggregate health events for your AWS account.

", - "EnableHealthServiceAccessForOrganization": "

Enables AWS Health to work with AWS Organizations. You can use the organizational view feature to aggregate events from all AWS accounts in your organization in a centralized location.

This operation also creates a service-linked role for the management account in the organization.

To call this operation, you must meet the following requirements:

If you don't have the required support plan, you can instead use the AWS Health console to enable the organizational view feature. For more information, see Aggregating AWS Health events in the AWS Health User Guide.

" + "DescribeEventDetails": "

Returns detailed information about one or more specified events. Information includes standard event data (Amazon Web Services Region, service, and so on, as returned by DescribeEvents), a detailed event description, and possible additional metadata that depends upon the nature of the event. Affected entities are not included. To retrieve the entities, use the DescribeAffectedEntities operation.

If a specified event can't be retrieved, an error message is returned for that event.

This operation supports resource-level permissions. You can use this operation to allow or deny access to specific Health events. For more information, see Resource- and action-based conditions in the Health User Guide.

", + "DescribeEventDetailsForOrganization": "

Returns detailed information about one or more specified events for one or more Amazon Web Services accounts in your organization. This information includes standard event data (such as the Amazon Web Services Region and service), an event description, and (depending on the event) possible metadata. This operation doesn't return affected entities, such as the resources related to the event. To return affected entities, use the DescribeAffectedEntitiesForOrganization operation.

Before you can call this operation, you must first enable Health to work with Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's management account.

When you call the DescribeEventDetailsForOrganization operation, specify the organizationEventDetailFilters object in the request. Depending on the Health event type, note the following differences:

For more information, see Event.

This operation doesn't support resource-level permissions. You can't use this operation to allow or deny access to specific Health events. For more information, see Resource- and action-based conditions in the Health User Guide.

", + "DescribeEventTypes": "

Returns the event types that meet the specified filter criteria. You can use this API operation to find information about the Health event, such as the category, Amazon Web Services service, and event code. The metadata for each event appears in the EventType object.

If you don't specify a filter criteria, the API operation returns all event types, in no particular order.

This API operation uses pagination. Specify the nextToken parameter in the next request to return more results.

", + "DescribeEvents": "

Returns information about events that meet the specified filter criteria. Events are returned in a summary form and do not include the detailed description, any additional metadata that depends on the event type, or any affected resources. To retrieve that information, use the DescribeEventDetails and DescribeAffectedEntities operations.

If no filter criteria are specified, all events are returned. Results are sorted by lastModifiedTime, starting with the most recent event.

", + "DescribeEventsForOrganization": "

Returns information about events across your organization in Organizations. You can use thefilters parameter to specify the events that you want to return. Events are returned in a summary form and don't include the affected accounts, detailed description, any additional metadata that depends on the event type, or any affected resources. To retrieve that information, use the following operations:

If you don't specify a filter, the DescribeEventsForOrganizations returns all events across your organization. Results are sorted by lastModifiedTime, starting with the most recent event.

For more information about the different types of Health events, see Event.

Before you can call this operation, you must first enable Health to work with Organizations. To do this, call the EnableHealthServiceAccessForOrganization operation from your organization's management account.

This API operation uses pagination. Specify the nextToken parameter in the next request to return more results.

", + "DescribeHealthServiceStatusForOrganization": "

This operation provides status information on enabling or disabling Health to work with your organization. To call this operation, you must sign in as an IAM user, assume an IAM role, or sign in as the root user (not recommended) in the organization's management account.

", + "DisableHealthServiceAccessForOrganization": "

Disables Health from working with Organizations. To call this operation, you must sign in as an Identity and Access Management (IAM) user, assume an IAM role, or sign in as the root user (not recommended) in the organization's management account. For more information, see Aggregating Health events in the Health User Guide.

This operation doesn't remove the service-linked role from the management account in your organization. You must use the IAM console, API, or Command Line Interface (CLI) to remove the service-linked role. For more information, see Deleting a Service-Linked Role in the IAM User Guide.

You can also disable the organizational feature by using the Organizations DisableAWSServiceAccess API operation. After you call this operation, Health stops aggregating events for all other Amazon Web Services accounts in your organization. If you call the Health API operations for organizational view, Health returns an error. Health continues to aggregate health events for your Amazon Web Services account.

", + "EnableHealthServiceAccessForOrganization": "

Enables Health to work with Organizations. You can use the organizational view feature to aggregate events from all Amazon Web Services accounts in your organization in a centralized location.

This operation also creates a service-linked role for the management account in the organization.

To call this operation, you must meet the following requirements:

If you don't have the required support plan, you can instead use the Health console to enable the organizational view feature. For more information, see Aggregating Health events in the Health User Guide.

" }, "shapes": { "AffectedEntity": { @@ -185,7 +185,7 @@ } }, "EntityFilter": { - "base": "

The values to use to filter results from the EntityFilter operation.

", + "base": "

The values to use to filter results from the DescribeAffectedEntities operation.

", "refs": { "DescribeAffectedEntitiesRequest$filter": "

Values to narrow the results returned. At least one event ARN is required.

" } @@ -198,7 +198,7 @@ } }, "Event": { - "base": "

Summary information about an AWS Health event.

AWS Health events can be public or account-specific:

You can determine if an event is public or account-specific by using the eventScopeCode parameter. For more information, see eventScopeCode.

", + "base": "

Summary information about an Health event.

Health events can be public or account-specific:

You can determine if an event is public or account-specific by using the eventScopeCode parameter. For more information, see eventScopeCode.

", "refs": { "EventDetails$event": "

Summary information about the event.

", "EventList$member": null, @@ -263,7 +263,7 @@ } }, "EventType": { - "base": "

Contains the metadata about a type of event that is reported by AWS Health. The EventType shows the category, service, and the event type code of the event. For example, an issue might be the category, EC2 the service, and AWS_EC2_SYSTEM_MAINTENANCE_EVENT the event type code.

You can use the DescribeEventTypes API operation to return this information about an event.

You can also use the Amazon CloudWatch Events console to create a rule so that you can get notified or take action when AWS Health delivers a specific event to your AWS account. For more information, see Monitor for AWS Health events with Amazon CloudWatch Events in the AWS Health User Guide.

", + "base": "

Contains the metadata about a type of event that is reported by Health. The EventType shows the category, service, and the event type code of the event. For example, an issue might be the category, EC2 the service, and AWS_EC2_SYSTEM_MAINTENANCE_EVENT the event type code.

You can use the DescribeEventTypes API operation to return this information about an event.

You can also use the Amazon CloudWatch Events console to create a rule so that you can get notified or take action when Health delivers a specific event to your Amazon Web Services account. For more information, see Monitor for Health events with Amazon CloudWatch Events in the Health User Guide.

", "refs": { "EventTypeList$member": null } @@ -271,7 +271,7 @@ "EventTypeCategoryList": { "base": null, "refs": { - "EventTypeFilter$eventTypeCategories": "

A list of event type category codes (issue, scheduledChange, or accountNotification).

" + "EventTypeFilter$eventTypeCategories": "

A list of event type category codes. Possible values are issue, accountNotification, or scheduledChange. Currently, the investigation value isn't supported at this time.

" } }, "EventTypeCodeList": { @@ -353,10 +353,10 @@ "accountId": { "base": null, "refs": { - "AffectedEntity$awsAccountId": "

The 12-digit AWS account number that contains the affected entity.

", - "EventAccountFilter$awsAccountId": "

The 12-digit AWS account numbers that contains the affected entities.

", - "OrganizationAffectedEntitiesErrorItem$awsAccountId": "

The 12-digit AWS account numbers that contains the affected entities.

", - "OrganizationEventDetails$awsAccountId": "

The 12-digit AWS account numbers that contains the affected entities.

", + "AffectedEntity$awsAccountId": "

The 12-digit Amazon Web Services account number that contains the affected entity.

", + "EventAccountFilter$awsAccountId": "

The 12-digit Amazon Web Services account numbers that contains the affected entities.

", + "OrganizationAffectedEntitiesErrorItem$awsAccountId": "

The 12-digit Amazon Web Services account numbers that contains the affected entities.

", + "OrganizationEventDetails$awsAccountId": "

The 12-digit Amazon Web Services account numbers that contains the affected entities.

", "OrganizationEventDetailsErrorItem$awsAccountId": "

Error information returned when a DescribeEventDetailsForOrganization operation can't find a specified event.

", "affectedAccountsList$member": null, "awsAccountIdsList$member": null @@ -377,20 +377,20 @@ "availabilityZone": { "base": null, "refs": { - "Event$availabilityZone": "

The AWS Availability Zone of the event. For example, us-east-1a.

", + "Event$availabilityZone": "

The Amazon Web Services Availability Zone of the event. For example, us-east-1a.

", "availabilityZones$member": null } }, "availabilityZones": { "base": null, "refs": { - "EventFilter$availabilityZones": "

A list of AWS Availability Zones.

" + "EventFilter$availabilityZones": "

A list of Amazon Web Services Availability Zones.

" } }, "awsAccountIdsList": { "base": null, "refs": { - "OrganizationEventFilter$awsAccountIds": "

A list of 12-digit AWS account numbers that contains the affected entities.

" + "OrganizationEventFilter$awsAccountIds": "

A list of 12-digit Amazon Web Services account numbers that contains the affected entities.

" } }, "count": { @@ -504,9 +504,9 @@ "eventScopeCode": { "base": null, "refs": { - "DescribeAffectedAccountsForOrganizationResponse$eventScopeCode": "

This parameter specifies if the AWS Health event is a public AWS service event or an account-specific event.

", - "Event$eventScopeCode": "

This parameter specifies if the AWS Health event is a public AWS service event or an account-specific event.

", - "OrganizationEvent$eventScopeCode": "

This parameter specifies if the AWS Health event is a public AWS service event or an account-specific event.

" + "DescribeAffectedAccountsForOrganizationResponse$eventScopeCode": "

This parameter specifies if the Health event is a public Amazon Web Services service event or an account-specific event.

", + "Event$eventScopeCode": "

This parameter specifies if the Health event is a public Amazon Web Services service event or an account-specific event.

", + "OrganizationEvent$eventScopeCode": "

This parameter specifies if the Health event is a public Amazon Web Services service event or an account-specific event.

" } }, "eventStatusCode": { @@ -533,18 +533,18 @@ "eventTypeCategory": { "base": null, "refs": { - "Event$eventTypeCategory": "

The category of the event. Possible values are issue, scheduledChange, and accountNotification.

", - "EventType$category": "

A list of event type category codes (issue, scheduledChange, or accountNotification).

", + "Event$eventTypeCategory": "

A list of event type category codes. Possible values are issue, accountNotification, or scheduledChange. Currently, the investigation value isn't supported at this time.

", + "EventType$category": "

A list of event type category codes. Possible values are issue, accountNotification, or scheduledChange. Currently, the investigation value isn't supported at this time.

", "EventTypeCategoryList$member": null, - "OrganizationEvent$eventTypeCategory": "

The category of the event type.

", + "OrganizationEvent$eventTypeCategory": "

A list of event type category codes. Possible values are issue, accountNotification, or scheduledChange. Currently, the investigation value isn't supported at this time.

", "eventTypeCategoryList$member": null } }, "eventTypeCategoryList": { "base": null, "refs": { - "EventFilter$eventTypeCategories": "

A list of event type category codes (issue, scheduledChange, or accountNotification).

", - "OrganizationEventFilter$eventTypeCategories": "

A list of event type category codes (issue, scheduledChange, or accountNotification).

" + "EventFilter$eventTypeCategories": "

A list of event type category codes. Possible values are issue, accountNotification, or scheduledChange. Currently, the investigation value isn't supported at this time.

", + "OrganizationEventFilter$eventTypeCategories": "

A list of event type category codes. Possible values are issue, accountNotification, or scheduledChange. Currently, the investigation value isn't supported at this time.

" } }, "eventTypeCode": { @@ -566,7 +566,7 @@ "healthServiceAccessStatusForOrganization": { "base": null, "refs": { - "DescribeHealthServiceStatusForOrganizationResponse$healthServiceAccessStatusForOrganization": "

Information about the status of enabling or disabling AWS Health Organizational View in your organization.

Valid values are ENABLED | DISABLED | PENDING.

" + "DescribeHealthServiceStatusForOrganizationResponse$healthServiceAccessStatusForOrganization": "

Information about the status of enabling or disabling the Health organizational view feature in your organization.

Valid values are ENABLED | DISABLED | PENDING.

" } }, "locale": { @@ -632,33 +632,33 @@ "region": { "base": null, "refs": { - "Event$region": "

The AWS Region name of the event.

", - "OrganizationEvent$region": "

The AWS Region name of the event.

", + "Event$region": "

The Amazon Web Services Region name of the event.

", + "OrganizationEvent$region": "

The Amazon Web Services Region name of the event.

", "regionList$member": null } }, "regionList": { "base": null, "refs": { - "EventFilter$regions": "

A list of AWS Regions.

", - "OrganizationEventFilter$regions": "

A list of AWS Regions.

" + "EventFilter$regions": "

A list of Amazon Web Services Regions.

", + "OrganizationEventFilter$regions": "

A list of Amazon Web Services Regions.

" } }, "service": { "base": null, "refs": { - "Event$service": "

The AWS service that is affected by the event. For example, EC2, RDS.

", - "EventType$service": "

The AWS service that is affected by the event. For example, EC2, RDS.

", - "OrganizationEvent$service": "

The AWS service that is affected by the event, such as EC2 and RDS.

", + "Event$service": "

The Amazon Web Services service that is affected by the event. For example, EC2, RDS.

", + "EventType$service": "

The Amazon Web Services service that is affected by the event. For example, EC2, RDS.

", + "OrganizationEvent$service": "

The Amazon Web Services service that is affected by the event, such as EC2 and RDS.

", "serviceList$member": null } }, "serviceList": { "base": null, "refs": { - "EventFilter$services": "

The AWS services associated with the event. For example, EC2, RDS.

", - "EventTypeFilter$services": "

The AWS services associated with the event. For example, EC2, RDS.

", - "OrganizationEventFilter$services": "

The AWS services associated with the event. For example, EC2, RDS.

" + "EventFilter$services": "

The Amazon Web Services services associated with the event. For example, EC2, RDS.

", + "EventTypeFilter$services": "

The Amazon Web Services services associated with the event. For example, EC2, RDS.

", + "OrganizationEventFilter$services": "

The Amazon Web Services services associated with the event. For example, EC2, RDS.

" } }, "string": { @@ -671,7 +671,7 @@ "OrganizationAffectedEntitiesErrorItem$errorName": "

The name of the error.

", "OrganizationAffectedEntitiesErrorItem$errorMessage": "

The unique identifier for the event type. The format is AWS_SERVICE_DESCRIPTION. For example, AWS_EC2_SYSTEM_MAINTENANCE_EVENT.

", "OrganizationEventDetailsErrorItem$errorName": "

The name of the error.

", - "OrganizationEventDetailsErrorItem$errorMessage": "

A message that describes the error.

If you call the DescribeEventDetailsForOrganization operation and receive one of the following errors, follow the recommendations in the message:

", + "OrganizationEventDetailsErrorItem$errorMessage": "

A message that describes the error.

If you call the DescribeEventDetailsForOrganization operation and receive one of the following errors, follow the recommendations in the message:

", "UnsupportedLocale$message": null } }, diff --git a/models/endpoints/endpoints.json b/models/endpoints/endpoints.json index 9b46cf34c6e..ded375725a2 100644 --- a/models/endpoints/endpoints.json +++ b/models/endpoints/endpoints.json @@ -2431,6 +2431,106 @@ } } }, + "compute-optimizer" : { + "endpoints" : { + "ap-northeast-1" : { + "credentialScope" : { + "region" : "ap-northeast-1" + }, + "hostname" : "compute-optimizer.ap-northeast-1.amazonaws.com" + }, + "ap-northeast-2" : { + "credentialScope" : { + "region" : "ap-northeast-2" + }, + "hostname" : "compute-optimizer.ap-northeast-2.amazonaws.com" + }, + "ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "hostname" : "compute-optimizer.ap-south-1.amazonaws.com" + }, + "ap-southeast-1" : { + "credentialScope" : { + "region" : "ap-southeast-1" + }, + "hostname" : "compute-optimizer.ap-southeast-1.amazonaws.com" + }, + "ap-southeast-2" : { + "credentialScope" : { + "region" : "ap-southeast-2" + }, + "hostname" : "compute-optimizer.ap-southeast-2.amazonaws.com" + }, + "ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "compute-optimizer.ca-central-1.amazonaws.com" + }, + "eu-central-1" : { + "credentialScope" : { + "region" : "eu-central-1" + }, + "hostname" : "compute-optimizer.eu-central-1.amazonaws.com" + }, + "eu-north-1" : { + "credentialScope" : { + "region" : "eu-north-1" + }, + "hostname" : "compute-optimizer.eu-north-1.amazonaws.com" + }, + "eu-west-1" : { + "credentialScope" : { + "region" : "eu-west-1" + }, + "hostname" : "compute-optimizer.eu-west-1.amazonaws.com" + }, + "eu-west-2" : { + "credentialScope" : { + "region" : "eu-west-2" + }, + "hostname" : "compute-optimizer.eu-west-2.amazonaws.com" + }, + "eu-west-3" : { + "credentialScope" : { + "region" : "eu-west-3" + }, + "hostname" : "compute-optimizer.eu-west-3.amazonaws.com" + }, + "sa-east-1" : { + "credentialScope" : { + "region" : "sa-east-1" + }, + "hostname" : "compute-optimizer.sa-east-1.amazonaws.com" + }, + "us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "compute-optimizer.us-east-1.amazonaws.com" + }, + "us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "compute-optimizer.us-east-2.amazonaws.com" + }, + "us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "compute-optimizer.us-west-1.amazonaws.com" + }, + "us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "compute-optimizer.us-west-2.amazonaws.com" + } + } + }, "config" : { "endpoints" : { "af-south-1" : { }, @@ -10750,13 +10850,20 @@ "ssm-incidents" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-north-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -12358,6 +12465,22 @@ "cn-north-1" : { } } }, + "compute-optimizer" : { + "endpoints" : { + "cn-north-1" : { + "credentialScope" : { + "region" : "cn-north-1" + }, + "hostname" : "compute-optimizer.cn-north-1.amazonaws.com.cn" + }, + "cn-northwest-1" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "compute-optimizer.cn-northwest-1.amazonaws.com.cn" + } + } + }, "config" : { "endpoints" : { "cn-north-1" : { }, diff --git a/service/batch/api.go b/service/batch/api.go index 759e9d42242..e5066ef4562 100644 --- a/service/batch/api.go +++ b/service/batch/api.go @@ -318,6 +318,90 @@ func (c *Batch) CreateJobQueueWithContext(ctx aws.Context, input *CreateJobQueue return out, req.Send() } +const opCreateSchedulingPolicy = "CreateSchedulingPolicy" + +// CreateSchedulingPolicyRequest generates a "aws/request.Request" representing the +// client's request for the CreateSchedulingPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateSchedulingPolicy for more information on using the CreateSchedulingPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateSchedulingPolicyRequest method. +// req, resp := client.CreateSchedulingPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/CreateSchedulingPolicy +func (c *Batch) CreateSchedulingPolicyRequest(input *CreateSchedulingPolicyInput) (req *request.Request, output *CreateSchedulingPolicyOutput) { + op := &request.Operation{ + Name: opCreateSchedulingPolicy, + HTTPMethod: "POST", + HTTPPath: "/v1/createschedulingpolicy", + } + + if input == nil { + input = &CreateSchedulingPolicyInput{} + } + + output = &CreateSchedulingPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSchedulingPolicy API operation for AWS Batch. +// +// Creates an Batch scheduling policy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Batch's +// API operation CreateSchedulingPolicy for usage and error information. +// +// Returned Error Types: +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that's not valid. +// +// * ServerException +// These errors are usually caused by a server issue. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/CreateSchedulingPolicy +func (c *Batch) CreateSchedulingPolicy(input *CreateSchedulingPolicyInput) (*CreateSchedulingPolicyOutput, error) { + req, out := c.CreateSchedulingPolicyRequest(input) + return out, req.Send() +} + +// CreateSchedulingPolicyWithContext is the same as CreateSchedulingPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSchedulingPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Batch) CreateSchedulingPolicyWithContext(ctx aws.Context, input *CreateSchedulingPolicyInput, opts ...request.Option) (*CreateSchedulingPolicyOutput, error) { + req, out := c.CreateSchedulingPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeleteComputeEnvironment = "DeleteComputeEnvironment" // DeleteComputeEnvironmentRequest generates a "aws/request.Request" representing the @@ -501,6 +585,93 @@ func (c *Batch) DeleteJobQueueWithContext(ctx aws.Context, input *DeleteJobQueue return out, req.Send() } +const opDeleteSchedulingPolicy = "DeleteSchedulingPolicy" + +// DeleteSchedulingPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSchedulingPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteSchedulingPolicy for more information on using the DeleteSchedulingPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteSchedulingPolicyRequest method. +// req, resp := client.DeleteSchedulingPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/DeleteSchedulingPolicy +func (c *Batch) DeleteSchedulingPolicyRequest(input *DeleteSchedulingPolicyInput) (req *request.Request, output *DeleteSchedulingPolicyOutput) { + op := &request.Operation{ + Name: opDeleteSchedulingPolicy, + HTTPMethod: "POST", + HTTPPath: "/v1/deleteschedulingpolicy", + } + + if input == nil { + input = &DeleteSchedulingPolicyInput{} + } + + output = &DeleteSchedulingPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteSchedulingPolicy API operation for AWS Batch. +// +// Deletes the specified scheduling policy. +// +// You can't delete a scheduling policy that is used in any job queues. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Batch's +// API operation DeleteSchedulingPolicy for usage and error information. +// +// Returned Error Types: +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that's not valid. +// +// * ServerException +// These errors are usually caused by a server issue. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/DeleteSchedulingPolicy +func (c *Batch) DeleteSchedulingPolicy(input *DeleteSchedulingPolicyInput) (*DeleteSchedulingPolicyOutput, error) { + req, out := c.DeleteSchedulingPolicyRequest(input) + return out, req.Send() +} + +// DeleteSchedulingPolicyWithContext is the same as DeleteSchedulingPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSchedulingPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Batch) DeleteSchedulingPolicyWithContext(ctx aws.Context, input *DeleteSchedulingPolicyInput, opts ...request.Option) (*DeleteSchedulingPolicyOutput, error) { + req, out := c.DeleteSchedulingPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDeregisterJobDefinition = "DeregisterJobDefinition" // DeregisterJobDefinitionRequest generates a "aws/request.Request" representing the @@ -1102,6 +1273,90 @@ func (c *Batch) DescribeJobsWithContext(ctx aws.Context, input *DescribeJobsInpu return out, req.Send() } +const opDescribeSchedulingPolicies = "DescribeSchedulingPolicies" + +// DescribeSchedulingPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSchedulingPolicies operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeSchedulingPolicies for more information on using the DescribeSchedulingPolicies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeSchedulingPoliciesRequest method. +// req, resp := client.DescribeSchedulingPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/DescribeSchedulingPolicies +func (c *Batch) DescribeSchedulingPoliciesRequest(input *DescribeSchedulingPoliciesInput) (req *request.Request, output *DescribeSchedulingPoliciesOutput) { + op := &request.Operation{ + Name: opDescribeSchedulingPolicies, + HTTPMethod: "POST", + HTTPPath: "/v1/describeschedulingpolicies", + } + + if input == nil { + input = &DescribeSchedulingPoliciesInput{} + } + + output = &DescribeSchedulingPoliciesOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeSchedulingPolicies API operation for AWS Batch. +// +// Describes one or more of your scheduling policies. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Batch's +// API operation DescribeSchedulingPolicies for usage and error information. +// +// Returned Error Types: +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that's not valid. +// +// * ServerException +// These errors are usually caused by a server issue. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/DescribeSchedulingPolicies +func (c *Batch) DescribeSchedulingPolicies(input *DescribeSchedulingPoliciesInput) (*DescribeSchedulingPoliciesOutput, error) { + req, out := c.DescribeSchedulingPoliciesRequest(input) + return out, req.Send() +} + +// DescribeSchedulingPoliciesWithContext is the same as DescribeSchedulingPolicies with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeSchedulingPolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Batch) DescribeSchedulingPoliciesWithContext(ctx aws.Context, input *DescribeSchedulingPoliciesInput, opts ...request.Option) (*DescribeSchedulingPoliciesOutput, error) { + req, out := c.DescribeSchedulingPoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opListJobs = "ListJobs" // ListJobsRequest generates a "aws/request.Request" representing the @@ -1255,6 +1510,148 @@ func (c *Batch) ListJobsPagesWithContext(ctx aws.Context, input *ListJobsInput, return p.Err() } +const opListSchedulingPolicies = "ListSchedulingPolicies" + +// ListSchedulingPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListSchedulingPolicies operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListSchedulingPolicies for more information on using the ListSchedulingPolicies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListSchedulingPoliciesRequest method. +// req, resp := client.ListSchedulingPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/ListSchedulingPolicies +func (c *Batch) ListSchedulingPoliciesRequest(input *ListSchedulingPoliciesInput) (req *request.Request, output *ListSchedulingPoliciesOutput) { + op := &request.Operation{ + Name: opListSchedulingPolicies, + HTTPMethod: "POST", + HTTPPath: "/v1/listschedulingpolicies", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListSchedulingPoliciesInput{} + } + + output = &ListSchedulingPoliciesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListSchedulingPolicies API operation for AWS Batch. +// +// Returns a list of Batch scheduling policies. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Batch's +// API operation ListSchedulingPolicies for usage and error information. +// +// Returned Error Types: +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that's not valid. +// +// * ServerException +// These errors are usually caused by a server issue. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/ListSchedulingPolicies +func (c *Batch) ListSchedulingPolicies(input *ListSchedulingPoliciesInput) (*ListSchedulingPoliciesOutput, error) { + req, out := c.ListSchedulingPoliciesRequest(input) + return out, req.Send() +} + +// ListSchedulingPoliciesWithContext is the same as ListSchedulingPolicies with the addition of +// the ability to pass a context and additional request options. +// +// See ListSchedulingPolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Batch) ListSchedulingPoliciesWithContext(ctx aws.Context, input *ListSchedulingPoliciesInput, opts ...request.Option) (*ListSchedulingPoliciesOutput, error) { + req, out := c.ListSchedulingPoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListSchedulingPoliciesPages iterates over the pages of a ListSchedulingPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSchedulingPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSchedulingPolicies operation. +// pageNum := 0 +// err := client.ListSchedulingPoliciesPages(params, +// func(page *batch.ListSchedulingPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *Batch) ListSchedulingPoliciesPages(input *ListSchedulingPoliciesInput, fn func(*ListSchedulingPoliciesOutput, bool) bool) error { + return c.ListSchedulingPoliciesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListSchedulingPoliciesPagesWithContext same as ListSchedulingPoliciesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Batch) ListSchedulingPoliciesPagesWithContext(ctx aws.Context, input *ListSchedulingPoliciesInput, fn func(*ListSchedulingPoliciesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListSchedulingPoliciesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListSchedulingPoliciesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListSchedulingPoliciesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + const opListTagsForResource = "ListTagsForResource" // ListTagsForResourceRequest generates a "aws/request.Request" representing the @@ -1300,8 +1697,8 @@ func (c *Batch) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req // ListTagsForResource API operation for AWS Batch. // // Lists the tags for an Batch resource. Batch resources that support tags are -// compute environments, jobs, job definitions, and job queues. ARNs for child -// jobs of array and multi-node parallel (MNP) jobs are not supported. +// compute environments, jobs, job definitions, job queues, and scheduling policies. +// ARNs for child jobs of array and multi-node parallel (MNP) jobs are not supported. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1471,12 +1868,15 @@ func (c *Batch) SubmitJobRequest(input *SubmitJobInput) (req *request.Request, o // // Submits an Batch job from a job definition. Parameters that are specified // during SubmitJob override parameters defined in the job definition. vCPU -// and memory requirements that are specified in the ResourceRequirements objects +// and memory requirements that are specified in the resourceRequirements objects // in the job definition are the exception. They can't be overridden this way // using the memory and vcpus parameters. Rather, you must specify updates to // job definition parameters in a ResourceRequirements object that's included // in the containerOverrides parameter. // +// Job queues with a scheduling policy are limited to 500 active fair share +// identifiers at a time. +// // Jobs that run on Fargate resources can't be guaranteed to run for more than // 14 days. This is because, after 14 days, Fargate resources might become unavailable // and job might be terminated. @@ -1568,8 +1968,9 @@ func (c *Batch) TagResourceRequest(input *TagResourceInput) (req *request.Reques // If existing tags on a resource aren't specified in the request parameters, // they aren't changed. When a resource is deleted, the tags that are associated // with that resource are deleted as well. Batch resources that support tags -// are compute environments, jobs, job definitions, and job queues. ARNs for -// child jobs of array and multi-node parallel (MNP) jobs are not supported. +// are compute environments, jobs, job definitions, job queues, and scheduling +// policies. ARNs for child jobs of array and multi-node parallel (MNP) jobs +// are not supported. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1791,48 +2192,132 @@ const opUpdateComputeEnvironment = "UpdateComputeEnvironment" // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateComputeEnvironment for more information on using the UpdateComputeEnvironment +// See UpdateComputeEnvironment for more information on using the UpdateComputeEnvironment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateComputeEnvironmentRequest method. +// req, resp := client.UpdateComputeEnvironmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/UpdateComputeEnvironment +func (c *Batch) UpdateComputeEnvironmentRequest(input *UpdateComputeEnvironmentInput) (req *request.Request, output *UpdateComputeEnvironmentOutput) { + op := &request.Operation{ + Name: opUpdateComputeEnvironment, + HTTPMethod: "POST", + HTTPPath: "/v1/updatecomputeenvironment", + } + + if input == nil { + input = &UpdateComputeEnvironmentInput{} + } + + output = &UpdateComputeEnvironmentOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateComputeEnvironment API operation for AWS Batch. +// +// Updates an Batch compute environment. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Batch's +// API operation UpdateComputeEnvironment for usage and error information. +// +// Returned Error Types: +// * ClientException +// These errors are usually caused by a client action, such as using an action +// or resource on behalf of a user that doesn't have permissions to use the +// action or resource, or specifying an identifier that's not valid. +// +// * ServerException +// These errors are usually caused by a server issue. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/UpdateComputeEnvironment +func (c *Batch) UpdateComputeEnvironment(input *UpdateComputeEnvironmentInput) (*UpdateComputeEnvironmentOutput, error) { + req, out := c.UpdateComputeEnvironmentRequest(input) + return out, req.Send() +} + +// UpdateComputeEnvironmentWithContext is the same as UpdateComputeEnvironment with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateComputeEnvironment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Batch) UpdateComputeEnvironmentWithContext(ctx aws.Context, input *UpdateComputeEnvironmentInput, opts ...request.Option) (*UpdateComputeEnvironmentOutput, error) { + req, out := c.UpdateComputeEnvironmentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateJobQueue = "UpdateJobQueue" + +// UpdateJobQueueRequest generates a "aws/request.Request" representing the +// client's request for the UpdateJobQueue operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateJobQueue for more information on using the UpdateJobQueue // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateComputeEnvironmentRequest method. -// req, resp := client.UpdateComputeEnvironmentRequest(params) +// // Example sending a request using the UpdateJobQueueRequest method. +// req, resp := client.UpdateJobQueueRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/UpdateComputeEnvironment -func (c *Batch) UpdateComputeEnvironmentRequest(input *UpdateComputeEnvironmentInput) (req *request.Request, output *UpdateComputeEnvironmentOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/UpdateJobQueue +func (c *Batch) UpdateJobQueueRequest(input *UpdateJobQueueInput) (req *request.Request, output *UpdateJobQueueOutput) { op := &request.Operation{ - Name: opUpdateComputeEnvironment, + Name: opUpdateJobQueue, HTTPMethod: "POST", - HTTPPath: "/v1/updatecomputeenvironment", + HTTPPath: "/v1/updatejobqueue", } if input == nil { - input = &UpdateComputeEnvironmentInput{} + input = &UpdateJobQueueInput{} } - output = &UpdateComputeEnvironmentOutput{} + output = &UpdateJobQueueOutput{} req = c.newRequest(op, input, output) return } -// UpdateComputeEnvironment API operation for AWS Batch. +// UpdateJobQueue API operation for AWS Batch. // -// Updates an Batch compute environment. +// Updates a job queue. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Batch's -// API operation UpdateComputeEnvironment for usage and error information. +// API operation UpdateJobQueue for usage and error information. // // Returned Error Types: // * ClientException @@ -1843,80 +2328,81 @@ func (c *Batch) UpdateComputeEnvironmentRequest(input *UpdateComputeEnvironmentI // * ServerException // These errors are usually caused by a server issue. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/UpdateComputeEnvironment -func (c *Batch) UpdateComputeEnvironment(input *UpdateComputeEnvironmentInput) (*UpdateComputeEnvironmentOutput, error) { - req, out := c.UpdateComputeEnvironmentRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/UpdateJobQueue +func (c *Batch) UpdateJobQueue(input *UpdateJobQueueInput) (*UpdateJobQueueOutput, error) { + req, out := c.UpdateJobQueueRequest(input) return out, req.Send() } -// UpdateComputeEnvironmentWithContext is the same as UpdateComputeEnvironment with the addition of +// UpdateJobQueueWithContext is the same as UpdateJobQueue with the addition of // the ability to pass a context and additional request options. // -// See UpdateComputeEnvironment for details on how to use this API operation. +// See UpdateJobQueue for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Batch) UpdateComputeEnvironmentWithContext(ctx aws.Context, input *UpdateComputeEnvironmentInput, opts ...request.Option) (*UpdateComputeEnvironmentOutput, error) { - req, out := c.UpdateComputeEnvironmentRequest(input) +func (c *Batch) UpdateJobQueueWithContext(ctx aws.Context, input *UpdateJobQueueInput, opts ...request.Option) (*UpdateJobQueueOutput, error) { + req, out := c.UpdateJobQueueRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateJobQueue = "UpdateJobQueue" +const opUpdateSchedulingPolicy = "UpdateSchedulingPolicy" -// UpdateJobQueueRequest generates a "aws/request.Request" representing the -// client's request for the UpdateJobQueue operation. The "output" return +// UpdateSchedulingPolicyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSchedulingPolicy operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateJobQueue for more information on using the UpdateJobQueue +// See UpdateSchedulingPolicy for more information on using the UpdateSchedulingPolicy // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateJobQueueRequest method. -// req, resp := client.UpdateJobQueueRequest(params) +// // Example sending a request using the UpdateSchedulingPolicyRequest method. +// req, resp := client.UpdateSchedulingPolicyRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/UpdateJobQueue -func (c *Batch) UpdateJobQueueRequest(input *UpdateJobQueueInput) (req *request.Request, output *UpdateJobQueueOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/UpdateSchedulingPolicy +func (c *Batch) UpdateSchedulingPolicyRequest(input *UpdateSchedulingPolicyInput) (req *request.Request, output *UpdateSchedulingPolicyOutput) { op := &request.Operation{ - Name: opUpdateJobQueue, + Name: opUpdateSchedulingPolicy, HTTPMethod: "POST", - HTTPPath: "/v1/updatejobqueue", + HTTPPath: "/v1/updateschedulingpolicy", } if input == nil { - input = &UpdateJobQueueInput{} + input = &UpdateSchedulingPolicyInput{} } - output = &UpdateJobQueueOutput{} + output = &UpdateSchedulingPolicyOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } -// UpdateJobQueue API operation for AWS Batch. +// UpdateSchedulingPolicy API operation for AWS Batch. // -// Updates a job queue. +// Updates a scheduling policy. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for AWS Batch's -// API operation UpdateJobQueue for usage and error information. +// API operation UpdateSchedulingPolicy for usage and error information. // // Returned Error Types: // * ClientException @@ -1927,23 +2413,23 @@ func (c *Batch) UpdateJobQueueRequest(input *UpdateJobQueueInput) (req *request. // * ServerException // These errors are usually caused by a server issue. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/UpdateJobQueue -func (c *Batch) UpdateJobQueue(input *UpdateJobQueueInput) (*UpdateJobQueueOutput, error) { - req, out := c.UpdateJobQueueRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/batch-2016-08-10/UpdateSchedulingPolicy +func (c *Batch) UpdateSchedulingPolicy(input *UpdateSchedulingPolicyInput) (*UpdateSchedulingPolicyOutput, error) { + req, out := c.UpdateSchedulingPolicyRequest(input) return out, req.Send() } -// UpdateJobQueueWithContext is the same as UpdateJobQueue with the addition of +// UpdateSchedulingPolicyWithContext is the same as UpdateSchedulingPolicy with the addition of // the ability to pass a context and additional request options. // -// See UpdateJobQueue for details on how to use this API operation. +// See UpdateSchedulingPolicy for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *Batch) UpdateJobQueueWithContext(ctx aws.Context, input *UpdateJobQueueInput, opts ...request.Option) (*UpdateJobQueueOutput, error) { - req, out := c.UpdateJobQueueRequest(input) +func (c *Batch) UpdateSchedulingPolicyWithContext(ctx aws.Context, input *UpdateSchedulingPolicyInput, opts ...request.Option) (*UpdateSchedulingPolicyOutput, error) { + req, out := c.UpdateSchedulingPolicyRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() @@ -2432,6 +2918,10 @@ type ComputeEnvironmentDetail struct { // see Compute Environments (https://docs.aws.amazon.com/batch/latest/userguide/compute_environments.html) // in the Batch User Guide. Type *string `locationName:"type" type:"string" enum:"CEType"` + + // The maximum number of VCPUs expected to be used for an unmanaged compute + // environment. + UnmanagedvCpus *int64 `locationName:"unmanagedvCpus" type:"integer"` } // String returns the string representation. @@ -2512,6 +3002,12 @@ func (s *ComputeEnvironmentDetail) SetType(v string) *ComputeEnvironmentDetail { return s } +// SetUnmanagedvCpus sets the UnmanagedvCpus field's value. +func (s *ComputeEnvironmentDetail) SetUnmanagedvCpus(v int64) *ComputeEnvironmentDetail { + s.UnmanagedvCpus = &v + return s +} + // The order in which compute environments are tried for job placement within // a queue. Compute environments are tried in ascending order. For example, // if two compute environments are associated with a job queue, the compute @@ -2657,7 +3153,9 @@ type ComputeResource struct { // Provides information used to select Amazon Machine Images (AMIs) for EC2 // instances in the compute environment. If Ec2Configuration isn't specified, - // the default is ECS_AL1. + // the default is ECS_AL2. + // + // One or two values can be provided. // // This parameter isn't applicable to jobs that are running on Fargate resources, // and shouldn't be specified. @@ -2796,7 +3294,7 @@ type ComputeResource struct { // where String1 is the tag key and String2 is the tag value−for example, // { "Name": "Batch Instance - C4OnDemand" }. This is helpful for recognizing // your Batch instances in the Amazon EC2 console. These tags can't be updated - // or removed after the compute environment is created.Aany changes to these + // or removed after the compute environment is created. Any changes to these // tags require that you create a new compute environment and remove the old // compute environment. These tags aren't seen when using the Batch ListTagsForResource // API operation. @@ -3141,7 +3639,7 @@ type ContainerDetail struct { LogStreamName *string `locationName:"logStreamName" type:"string"` // For jobs run on EC2 resources that didn't specify memory requirements using - // ResourceRequirement, the number of MiB of memory reserved for the job. For + // resourceRequirements, the number of MiB of memory reserved for the job. For // other jobs, including all run on Fargate resources, see resourceRequirements. Memory *int64 `locationName:"memory" type:"integer"` @@ -3204,7 +3702,7 @@ type ContainerDetail struct { // The number of vCPUs reserved for the container. For jobs that run on EC2 // resources, you can specify the vCPU requirement for the job using resourceRequirements, - // but you can't specify the vCPU requirements in both the vcpus and resourceRequirement + // but you can't specify the vCPU requirements in both the vcpus and resourceRequirements // object. This parameter maps to CpuShares in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --cpu-shares option to docker run (https://docs.docker.com/engine/reference/run/). @@ -3417,17 +3915,17 @@ type ContainerOverrides struct { // run on Fargate resources, and shouldn't be provided. InstanceType *string `locationName:"instanceType" type:"string"` - // This parameter indicates the amount of memory (in MiB) that's reserved for - // the job. It overrides the memory parameter set in the job definition, but - // doesn't override any memory requirement specified in the ResourceRequirement - // structure in the job definition. To override memory requirements that are - // specified in the ResourceRequirement structure in the job definition, ResourceRequirement + // This parameter is deprecated, use resourceRequirements to override the memory + // requirements specified in the job definition. It's not supported for jobs + // that run on Fargate resources. For jobs run on EC2 resources, it overrides + // the memory parameter set in the job definition, but doesn't override any + // memory requirement specified in the resourceRequirements structure in the + // job definition. To override memory requirements that are specified in the + // resourceRequirements structure in the job definition, resourceRequirements // must be specified in the SubmitJob request, with type set to MEMORY and value - // set to the new value. - // - // This parameter is supported for jobs that run on EC2 resources, but isn't - // supported for jobs that run on Fargate resources. For these resources, use - // resourceRequirement instead. + // set to the new value. For more information, see Can't override job definition + // resource requirements (https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#override-resource-requirements) + // in the Batch User Guide. // // Deprecated: This field is deprecated, use resourceRequirements instead. Memory *int64 `locationName:"memory" deprecated:"true" type:"integer"` @@ -3437,24 +3935,17 @@ type ContainerOverrides struct { // MEMORY, and VCPU. ResourceRequirements []*ResourceRequirement `locationName:"resourceRequirements" type:"list"` - // This parameter indicates the number of vCPUs reserved for the container.It - // overrides the vcpus parameter that's set in the job definition, but doesn't - // override any vCPU requirement specified in the resourceRequirement structure - // in the job definition. To override vCPU requirements that are specified in - // the ResourceRequirement structure in the job definition, ResourceRequirement - // must be specified in the SubmitJob request, with type set to VCPU and value - // set to the new value. - // - // This parameter maps to CpuShares in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) - // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) - // and the --cpu-shares option to docker run (https://docs.docker.com/engine/reference/run/). - // Each vCPU is equivalent to 1,024 CPU shares. You must specify at least one - // vCPU. - // - // This parameter is supported for jobs that run on EC2 resources, but isn't - // supported for jobs that run on Fargate resources. For Fargate resources, - // you can only use resourceRequirement. For EC2 resources, you can use either - // this parameter or resourceRequirement but not both. + // This parameter is deprecated, use resourceRequirements to override the vcpus + // parameter that's set in the job definition. It's not supported for jobs that + // run on Fargate resources. For jobs run on EC2 resources, it overrides the + // vcpus parameter set in the job definition, but doesn't override any vCPU + // requirement specified in the resourceRequirements structure in the job definition. + // To override vCPU requirements that are specified in the resourceRequirements + // structure in the job definition, resourceRequirements must be specified in + // the SubmitJob request, with type set to VCPU and value set to the new value. + // For more information, see Can't override job definition resource requirements + // (https://docs.aws.amazon.com/batch/latest/userguide/troubleshooting.html#override-resource-requirements) + // in the Batch User Guide. // // Deprecated: This field is deprecated, use resourceRequirements instead. Vcpus *int64 `locationName:"vcpus" deprecated:"true" type:"integer"` @@ -3642,24 +4133,13 @@ type ContainerProperties struct { // in the Amazon Elastic Container Service Developer Guide. LogConfiguration *LogConfiguration `locationName:"logConfiguration" type:"structure"` - // This parameter indicates the memory hard limit (in MiB) for a container. - // If your container attempts to exceed the specified number, it's terminated. - // You must specify at least 4 MiB of memory for a job using this parameter. - // The memory hard limit can be specified in several places. It must be specified - // for each node at least once. - // - // This parameter maps to Memory in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) - // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) - // and the --memory option to docker run (https://docs.docker.com/engine/reference/run/). - // - // This parameter is supported on EC2 resources but isn't supported on Fargate - // resources. For Fargate resources, you should specify the memory requirement - // using resourceRequirement. You can also do this for EC2 resources. - // - // If you're trying to maximize your resource utilization by providing your - // jobs as much memory as possible for a particular instance type, see Memory - // Management (https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html) - // in the Batch User Guide. + // This parameter is deprecated, use resourceRequirements to specify the memory + // requirements for the job definition. It's not supported for jobs that run + // on Fargate resources. For jobs run on EC2 resources, it specifies the memory + // hard limit (in MiB) for a container. If your container attempts to exceed + // the specified number, it's terminated. You must specify at least 4 MiB of + // memory for a job using this parameter. The memory hard limit can be specified + // in several places. It must be specified for each node at least once. // // Deprecated: This field is deprecated, use resourceRequirements instead. Memory *int64 `locationName:"memory" deprecated:"true" type:"integer"` @@ -3716,22 +4196,18 @@ type ContainerProperties struct { // and the --user option to docker run (https://docs.docker.com/engine/reference/run/). User *string `locationName:"user" type:"string"` - // The number of vCPUs reserved for the job. Each vCPU is equivalent to 1,024 - // CPU shares. This parameter maps to CpuShares in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) + // This parameter is deprecated, use resourceRequirements to specify the vCPU + // requirements for the job definition. It's not supported for jobs that run + // on Fargate resources. For jobs run on EC2 resources, it specifies the number + // of vCPUs reserved for the job. + // + // Each vCPU is equivalent to 1,024 CPU shares. This parameter maps to CpuShares + // in the Create a container (https://docs.docker.com/engine/api/v1.23/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/engine/api/v1.23/) // and the --cpu-shares option to docker run (https://docs.docker.com/engine/reference/run/). // The number of vCPUs must be specified but can be specified in several places. // You must specify it at least once for each node. // - // This parameter is supported on EC2 resources but isn't supported for jobs - // that run on Fargate resources. For these resources, use resourceRequirement - // instead. You can use this parameter or resourceRequirements structure but - // not both. - // - // This parameter isn't applicable to jobs that are running on Fargate resources - // and shouldn't be provided. For jobs that run on Fargate resources, you must - // specify the vCPU requirement for the job using resourceRequirements. - // // Deprecated: This field is deprecated, use resourceRequirements instead. Vcpus *int64 `locationName:"vcpus" deprecated:"true" type:"integer"` @@ -4052,6 +4528,14 @@ type CreateComputeEnvironmentInput struct { // // Type is a required field Type *string `locationName:"type" type:"string" required:"true" enum:"CEType"` + + // The maximum number of vCPUs for an unmanaged compute environment. This parameter + // is only used for fair share scheduling to reserve vCPU capacity for new share + // identifiers. If this parameter is not provided for a fair share job queue, + // no vCPU capacity will be reserved. + // + // This parameter is only supported when the type parameter is set to UNMANAGED/ + UnmanagedvCpus *int64 `locationName:"unmanagedvCpus" type:"integer"` } // String returns the string representation. @@ -4132,6 +4616,12 @@ func (s *CreateComputeEnvironmentInput) SetType(v string) *CreateComputeEnvironm return s } +// SetUnmanagedvCpus sets the UnmanagedvCpus field's value. +func (s *CreateComputeEnvironmentInput) SetUnmanagedvCpus(v int64) *CreateComputeEnvironmentInput { + s.UnmanagedvCpus = &v + return s +} + type CreateComputeEnvironmentOutput struct { _ struct{} `type:"structure"` @@ -4209,6 +4699,14 @@ type CreateJobQueueInput struct { // Priority is a required field Priority *int64 `locationName:"priority" type:"integer" required:"true"` + // Amazon Resource Name (ARN) of the fair share scheduling policy. If this parameter + // is specified, the job queue will use a fair share scheduling policy. If this + // parameter is not specified, the job queue will use a first in, first out + // (FIFO) scheduling policy. Once a job queue is created, the fair share scheduling + // policy can be replaced but not removed. The format is aws:Partition:batch:Region:Account:scheduling-policy/Name + // . For example, aws:aws:batch:us-west-2:012345678910:scheduling-policy/MySchedulingPolicy. + SchedulingPolicyArn *string `locationName:"schedulingPolicyArn" type:"string"` + // The state of the job queue. If the job queue state is ENABLED, it is able // to accept jobs. If the job queue state is DISABLED, new jobs can't be added // to the queue, but jobs already in the queue can finish. @@ -4289,6 +4787,12 @@ func (s *CreateJobQueueInput) SetPriority(v int64) *CreateJobQueueInput { return s } +// SetSchedulingPolicyArn sets the SchedulingPolicyArn field's value. +func (s *CreateJobQueueInput) SetSchedulingPolicyArn(v string) *CreateJobQueueInput { + s.SchedulingPolicyArn = &v + return s +} + // SetState sets the State field's value. func (s *CreateJobQueueInput) SetState(v string) *CreateJobQueueInput { s.State = &v @@ -4345,6 +4849,131 @@ func (s *CreateJobQueueOutput) SetJobQueueName(v string) *CreateJobQueueOutput { return s } +type CreateSchedulingPolicyInput struct { + _ struct{} `type:"structure"` + + // The fair share policy of the scheduling policy. + FairsharePolicy *FairsharePolicy `locationName:"fairsharePolicy" type:"structure"` + + // The name of the scheduling policy. Up to 128 letters (uppercase and lowercase), + // numbers, hyphens, and underscores are allowed. + // + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true"` + + // The tags that you apply to the scheduling policy to help you categorize and + // organize your resources. Each tag consists of a key and an optional value. + // For more information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + // in Amazon Web Services General Reference. + // + // These tags can be updated or removed using the TagResource (https://docs.aws.amazon.com/batch/latest/APIReference/API_TagResource.html) + // and UntagResource (https://docs.aws.amazon.com/batch/latest/APIReference/API_UntagResource.html) + // API operations. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSchedulingPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSchedulingPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSchedulingPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSchedulingPolicyInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Tags != nil && len(s.Tags) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Tags", 1)) + } + if s.FairsharePolicy != nil { + if err := s.FairsharePolicy.Validate(); err != nil { + invalidParams.AddNested("FairsharePolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFairsharePolicy sets the FairsharePolicy field's value. +func (s *CreateSchedulingPolicyInput) SetFairsharePolicy(v *FairsharePolicy) *CreateSchedulingPolicyInput { + s.FairsharePolicy = v + return s +} + +// SetName sets the Name field's value. +func (s *CreateSchedulingPolicyInput) SetName(v string) *CreateSchedulingPolicyInput { + s.Name = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateSchedulingPolicyInput) SetTags(v map[string]*string) *CreateSchedulingPolicyInput { + s.Tags = v + return s +} + +type CreateSchedulingPolicyOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the scheduling policy. The format is aws:Partition:batch:Region:Account:scheduling-policy/Name + // . For example, aws:aws:batch:us-west-2:012345678910:scheduling-policy/MySchedulingPolicy. + // + // Arn is a required field + Arn *string `locationName:"arn" type:"string" required:"true"` + + // The name of the scheduling policy. + // + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSchedulingPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSchedulingPolicyOutput) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *CreateSchedulingPolicyOutput) SetArn(v string) *CreateSchedulingPolicyOutput { + s.Arn = &v + return s +} + +// SetName sets the Name field's value. +func (s *CreateSchedulingPolicyOutput) SetName(v string) *CreateSchedulingPolicyOutput { + s.Name = &v + return s +} + // Contains the parameters for DeleteComputeEnvironment. type DeleteComputeEnvironmentInput struct { _ struct{} `type:"structure"` @@ -4438,15 +5067,83 @@ func (s DeleteJobQueueInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DeleteJobQueueInput) GoString() string { +func (s DeleteJobQueueInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteJobQueueInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteJobQueueInput"} + if s.JobQueue == nil { + invalidParams.Add(request.NewErrParamRequired("JobQueue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetJobQueue sets the JobQueue field's value. +func (s *DeleteJobQueueInput) SetJobQueue(v string) *DeleteJobQueueInput { + s.JobQueue = &v + return s +} + +type DeleteJobQueueOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteJobQueueOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteJobQueueOutput) GoString() string { + return s.String() +} + +type DeleteSchedulingPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the scheduling policy to delete. + // + // Arn is a required field + Arn *string `locationName:"arn" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteSchedulingPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteSchedulingPolicyInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteJobQueueInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteJobQueueInput"} - if s.JobQueue == nil { - invalidParams.Add(request.NewErrParamRequired("JobQueue")) +func (s *DeleteSchedulingPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSchedulingPolicyInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) } if invalidParams.Len() > 0 { @@ -4455,13 +5152,13 @@ func (s *DeleteJobQueueInput) Validate() error { return nil } -// SetJobQueue sets the JobQueue field's value. -func (s *DeleteJobQueueInput) SetJobQueue(v string) *DeleteJobQueueInput { - s.JobQueue = &v +// SetArn sets the Arn field's value. +func (s *DeleteSchedulingPolicyInput) SetArn(v string) *DeleteSchedulingPolicyInput { + s.Arn = &v return s } -type DeleteJobQueueOutput struct { +type DeleteSchedulingPolicyOutput struct { _ struct{} `type:"structure"` } @@ -4470,7 +5167,7 @@ type DeleteJobQueueOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DeleteJobQueueOutput) String() string { +func (s DeleteSchedulingPolicyOutput) String() string { return awsutil.Prettify(s) } @@ -4479,7 +5176,7 @@ func (s DeleteJobQueueOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s DeleteJobQueueOutput) GoString() string { +func (s DeleteSchedulingPolicyOutput) GoString() string { return s.String() } @@ -4623,9 +5320,9 @@ type DescribeComputeEnvironmentsOutput struct { ComputeEnvironments []*ComputeEnvironmentDetail `locationName:"computeEnvironments" type:"list"` // The nextToken value to include in a future DescribeComputeEnvironments request. - // When the results of a DescribeJobDefinitions request exceed maxResults, this - // value can be used to retrieve the next page of results. This value is null - // when there are no more results to return. + // When the results of a DescribeComputeEnvironments request exceed maxResults, + // this value can be used to retrieve the next page of results. This value is + // null when there are no more results to return. NextToken *string `locationName:"nextToken" type:"string"` } @@ -4970,6 +5667,83 @@ func (s *DescribeJobsOutput) SetJobs(v []*JobDetail) *DescribeJobsOutput { return s } +type DescribeSchedulingPoliciesInput struct { + _ struct{} `type:"structure"` + + // A list of up to 100 scheduling policy Amazon Resource Name (ARN) entries. + // + // Arns is a required field + Arns []*string `locationName:"arns" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeSchedulingPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeSchedulingPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSchedulingPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSchedulingPoliciesInput"} + if s.Arns == nil { + invalidParams.Add(request.NewErrParamRequired("Arns")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArns sets the Arns field's value. +func (s *DescribeSchedulingPoliciesInput) SetArns(v []*string) *DescribeSchedulingPoliciesInput { + s.Arns = v + return s +} + +type DescribeSchedulingPoliciesOutput struct { + _ struct{} `type:"structure"` + + // The list of scheduling policies. + SchedulingPolicies []*SchedulingPolicyDetail `locationName:"schedulingPolicies" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeSchedulingPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DescribeSchedulingPoliciesOutput) GoString() string { + return s.String() +} + +// SetSchedulingPolicies sets the SchedulingPolicies field's value. +func (s *DescribeSchedulingPoliciesOutput) SetSchedulingPolicies(v []*SchedulingPolicyDetail) *DescribeSchedulingPoliciesOutput { + s.SchedulingPolicies = v + return s +} + // An object representing a container instance host device. // // This object isn't applicable to jobs that are running on Fargate resources @@ -5197,9 +5971,7 @@ func (s *EFSVolumeConfiguration) SetTransitEncryptionPort(v int64) *EFSVolumeCon // Provides information used to select Amazon Machine Images (AMIs) for instances // in the compute environment. If Ec2Configuration isn't specified, the default -// is currently ECS_AL1 (Amazon Linux (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#alami)) -// for non-GPU, non AWSGraviton instances. Starting on March 31, 2021, this -// default will be changing to ECS_AL2 (Amazon Linux 2 (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami)). +// is ECS_AL2 (Amazon Linux 2 (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami)). // // This object isn't applicable to jobs that are running on Fargate resources. type Ec2Configuration struct { @@ -5211,16 +5983,14 @@ type Ec2Configuration struct { ImageIdOverride *string `locationName:"imageIdOverride" min:"1" type:"string"` // The image type to match with the instance type to select an AMI. If the imageIdOverride - // parameter isn't specified, then a recent Amazon ECS-optimized AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html) - // (ECS_AL1) is used. Starting on March 31, 2021, this default will be changing - // to ECS_AL2 (Amazon Linux 2 (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami)). + // parameter isn't specified, then a recent Amazon ECS-optimized Amazon Linux + // 2 AMI (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami) + // (ECS_AL2) is used. // // ECS_AL2 // // Amazon Linux 2 (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#al2ami)− - // Default for all Amazon Web Services Graviton-based instance families (for - // example, C6g, M6g, R6g, and T4g) and can be used for all non-GPU instance - // types. + // Default for all non-GPU instance families. // // ECS_AL2_NVIDIA // @@ -5230,9 +6000,8 @@ type Ec2Configuration struct { // // ECS_AL1 // - // Amazon Linux (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#alami)−Default - // for all non-GPU, non Amazon Web Services Graviton instance families. Amazon - // Linux is reaching the end-of-life of standard support. For more information, + // Amazon Linux (https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-optimized_AMI.html#alami). + // Amazon Linux is reaching the end-of-life of standard support. For more information, // see Amazon Linux AMI (http://aws.amazon.com/amazon-linux-ami/). // // ImageType is a required field @@ -5303,6 +6072,8 @@ type EvaluateOnExit struct { // ExitCode returned for a job. The pattern can be up to 512 characters in length. // It can contain only numbers, and can optionally end with an asterisk (*) // so that only the start of the string needs to be an exact match. + // + // The string can be between 1 and 512 characters in length. OnExitCode *string `locationName:"onExitCode" type:"string"` // Contains a glob pattern to match against the Reason returned for a job. The @@ -5310,6 +6081,8 @@ type EvaluateOnExit struct { // periods (.), colons (:), and white space (including spaces and tabs). It // can optionally end with an asterisk (*) so that only the start of the string // needs to be an exact match. + // + // The string can be between 1 and 512 characters in length. OnReason *string `locationName:"onReason" type:"string"` // Contains a glob pattern to match against the StatusReason returned for a @@ -5317,6 +6090,8 @@ type EvaluateOnExit struct { // numbers, periods (.), colons (:), and white space (including spaces or tabs). // It can optionally end with an asterisk (*) so that only the start of the // string needs to be an exact match. + // + // The string can be between 1 and 512 characters in length. OnStatusReason *string `locationName:"onStatusReason" type:"string"` } @@ -5375,6 +6150,98 @@ func (s *EvaluateOnExit) SetOnStatusReason(v string) *EvaluateOnExit { return s } +// The fair share policy for a scheduling policy. +type FairsharePolicy struct { + _ struct{} `type:"structure"` + + // A value used to reserve some of the available maximum vCPU for fair share + // identifiers that have not yet been used. + // + // The reserved ratio is (computeReservation/100)^ActiveFairShares where ActiveFairShares + // is the number of active fair share identifiers. + // + // For example, a computeReservation value of 50 indicates that Batch should + // reserve 50% of the maximum available vCPU if there is only one fair share + // identifier, 25% if there are two fair share identifiers, and 12.5% if there + // are three fair share identifiers. A computeReservation value of 25 indicates + // that Batch should reserve 25% of the maximum available vCPU if there is only + // one fair share identifier, 6.25% if there are two fair share identifiers, + // and 1.56% if there are three fair share identifiers. + // + // The minimum value is 0 and the maximum value is 99. + ComputeReservation *int64 `locationName:"computeReservation" type:"integer"` + + // The time period to use to calculate a fair share percentage for each fair + // share identifier in use, in seconds. A value of zero (0) indicates that only + // current usage should be measured; if there are four evenly weighted fair + // share identifiers then each can only use up to 25% of the available CPU resources, + // even if some of the fair share identifiers have no currently running jobs. + // The decay allows for more recently run jobs to have more weight than jobs + // that ran earlier. The maximum supported value is 604800 (1 week). + ShareDecaySeconds *int64 `locationName:"shareDecaySeconds" type:"integer"` + + // Array of SharedIdentifier objects that contain the weights for the fair share + // identifiers for the fair share policy. Fair share identifiers that are not + // included have a default weight of 1.0. + ShareDistribution []*ShareAttributes `locationName:"shareDistribution" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FairsharePolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FairsharePolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *FairsharePolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "FairsharePolicy"} + if s.ShareDistribution != nil { + for i, v := range s.ShareDistribution { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ShareDistribution", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetComputeReservation sets the ComputeReservation field's value. +func (s *FairsharePolicy) SetComputeReservation(v int64) *FairsharePolicy { + s.ComputeReservation = &v + return s +} + +// SetShareDecaySeconds sets the ShareDecaySeconds field's value. +func (s *FairsharePolicy) SetShareDecaySeconds(v int64) *FairsharePolicy { + s.ShareDecaySeconds = &v + return s +} + +// SetShareDistribution sets the ShareDistribution field's value. +func (s *FairsharePolicy) SetShareDistribution(v []*ShareAttributes) *FairsharePolicy { + s.ShareDistribution = v + return s +} + // The platform configuration for jobs that are running on Fargate resources. // Jobs that run on EC2 resources must not specify this parameter. type FargatePlatformConfiguration struct { @@ -5509,6 +6376,11 @@ type JobDefinition struct { // Revision is a required field Revision *int64 `locationName:"revision" type:"integer" required:"true"` + // The scheduling priority of the job definition. This will only affect jobs + // in job queues with a fair share policy. Jobs with a higher scheduling priority + // will be scheduled before jobs with a lower scheduling priority. + SchedulingPriority *int64 `locationName:"schedulingPriority" type:"integer"` + // The status of the job definition. Status *string `locationName:"status" type:"string"` @@ -5520,9 +6392,10 @@ type JobDefinition struct { // if they haven't finished. Timeout *JobTimeout `locationName:"timeout" type:"structure"` - // The type of job definition. If the job is run on Fargate resources, then - // multinode isn't supported. For more information about multi-node parallel - // jobs, see Creating a multi-node parallel job definition (https://docs.aws.amazon.com/batch/latest/userguide/multi-node-job-def.html) + // The type of job definition, either container or multinode. If the job is + // run on Fargate resources, then multinode isn't supported. For more information + // about multi-node parallel jobs, see Creating a multi-node parallel job definition + // (https://docs.aws.amazon.com/batch/latest/userguide/multi-node-job-def.html) // in the Batch User Guide. // // Type is a required field @@ -5601,6 +6474,12 @@ func (s *JobDefinition) SetRevision(v int64) *JobDefinition { return s } +// SetSchedulingPriority sets the SchedulingPriority field's value. +func (s *JobDefinition) SetSchedulingPriority(v int64) *JobDefinition { + s.SchedulingPriority = &v + return s +} + // SetStatus sets the Status field's value. func (s *JobDefinition) SetStatus(v string) *JobDefinition { s.Status = &v @@ -5742,6 +6621,14 @@ type JobDetail struct { // The retry strategy to use for this job if an attempt fails. RetryStrategy *RetryStrategy `locationName:"retryStrategy" type:"structure"` + // The scheduling policy of the job definition. This will only affect jobs in + // job queues with a fair share policy. Jobs with a higher scheduling priority + // will be scheduled before jobs with a lower scheduling priority. + SchedulingPriority *int64 `locationName:"schedulingPriority" type:"integer"` + + // The share identifier for the job. + ShareIdentifier *string `locationName:"shareIdentifier" type:"string"` + // The Unix timestamp (in milliseconds) for when the job was started (when the // job transitioned from the STARTING state to the RUNNING state). This parameter // isn't provided for child jobs of array jobs or multi-node parallel jobs. @@ -5888,6 +6775,18 @@ func (s *JobDetail) SetRetryStrategy(v *RetryStrategy) *JobDetail { return s } +// SetSchedulingPriority sets the SchedulingPriority field's value. +func (s *JobDetail) SetSchedulingPriority(v int64) *JobDetail { + s.SchedulingPriority = &v + return s +} + +// SetShareIdentifier sets the ShareIdentifier field's value. +func (s *JobDetail) SetShareIdentifier(v string) *JobDetail { + s.ShareIdentifier = &v + return s +} + // SetStartedAt sets the StartedAt field's value. func (s *JobDetail) SetStartedAt(v int64) *JobDetail { s.StartedAt = &v @@ -5956,6 +6855,10 @@ type JobQueueDetail struct { // Priority is a required field Priority *int64 `locationName:"priority" type:"integer" required:"true"` + // Amazon Resource Name (ARN) of the scheduling policy. The format is aws:Partition:batch:Region:Account:scheduling-policy/Name + // . For example, aws:aws:batch:us-west-2:012345678910:scheduling-policy/MySchedulingPolicy. + SchedulingPolicyArn *string `locationName:"schedulingPolicyArn" type:"string"` + // Describes the ability of the queue to accept new jobs. If the job queue state // is ENABLED, it's able to accept jobs. If the job queue state is DISABLED, // new jobs can't be added to the queue, but jobs already in the queue can finish. @@ -6018,6 +6921,12 @@ func (s *JobQueueDetail) SetPriority(v int64) *JobQueueDetail { return s } +// SetSchedulingPolicyArn sets the SchedulingPolicyArn field's value. +func (s *JobQueueDetail) SetSchedulingPolicyArn(v string) *JobQueueDetail { + s.SchedulingPolicyArn = &v + return s +} + // SetState sets the State field's value. func (s *JobQueueDetail) SetState(v string) *JobQueueDetail { s.State = &v @@ -6630,7 +7539,135 @@ type ListJobsInput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListJobsInput) String() string { +func (s ListJobsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListJobsInput) GoString() string { + return s.String() +} + +// SetArrayJobId sets the ArrayJobId field's value. +func (s *ListJobsInput) SetArrayJobId(v string) *ListJobsInput { + s.ArrayJobId = &v + return s +} + +// SetFilters sets the Filters field's value. +func (s *ListJobsInput) SetFilters(v []*KeyValuesPair) *ListJobsInput { + s.Filters = v + return s +} + +// SetJobQueue sets the JobQueue field's value. +func (s *ListJobsInput) SetJobQueue(v string) *ListJobsInput { + s.JobQueue = &v + return s +} + +// SetJobStatus sets the JobStatus field's value. +func (s *ListJobsInput) SetJobStatus(v string) *ListJobsInput { + s.JobStatus = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListJobsInput) SetMaxResults(v int64) *ListJobsInput { + s.MaxResults = &v + return s +} + +// SetMultiNodeJobId sets the MultiNodeJobId field's value. +func (s *ListJobsInput) SetMultiNodeJobId(v string) *ListJobsInput { + s.MultiNodeJobId = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListJobsInput) SetNextToken(v string) *ListJobsInput { + s.NextToken = &v + return s +} + +type ListJobsOutput struct { + _ struct{} `type:"structure"` + + // A list of job summaries that match the request. + // + // JobSummaryList is a required field + JobSummaryList []*JobSummary `locationName:"jobSummaryList" type:"list" required:"true"` + + // The nextToken value to include in a future ListJobs request. When the results + // of a ListJobs request exceed maxResults, this value can be used to retrieve + // the next page of results. This value is null when there are no more results + // to return. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListJobsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListJobsOutput) GoString() string { + return s.String() +} + +// SetJobSummaryList sets the JobSummaryList field's value. +func (s *ListJobsOutput) SetJobSummaryList(v []*JobSummary) *ListJobsOutput { + s.JobSummaryList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListJobsOutput) SetNextToken(v string) *ListJobsOutput { + s.NextToken = &v + return s +} + +type ListSchedulingPoliciesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of results returned by ListSchedulingPolicies in paginated + // output. When this parameter is used, ListSchedulingPolicies only returns + // maxResults results in a single page and a nextToken response element. The + // remaining results of the initial request can be seen by sending another ListSchedulingPolicies + // request with the returned nextToken value. This value can be between 1 and + // 100. If this parameter isn't used, then ListSchedulingPolicies returns up + // to 100 results and a nextToken value if applicable. + MaxResults *int64 `locationName:"maxResults" type:"integer"` + + // The nextToken value returned from a previous paginated ListSchedulingPolicies + // request where maxResults was used and the results exceeded the value of that + // parameter. Pagination continues from the end of the previous results that + // returned the nextToken value. This value is null when there are no more results + // to return. + // + // This token should be treated as an opaque identifier that's only used to + // retrieve the next items in a list and not for other programmatic purposes. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListSchedulingPoliciesInput) String() string { return awsutil.Prettify(s) } @@ -6639,65 +7676,33 @@ func (s ListJobsInput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListJobsInput) GoString() string { +func (s ListSchedulingPoliciesInput) GoString() string { return s.String() } -// SetArrayJobId sets the ArrayJobId field's value. -func (s *ListJobsInput) SetArrayJobId(v string) *ListJobsInput { - s.ArrayJobId = &v - return s -} - -// SetFilters sets the Filters field's value. -func (s *ListJobsInput) SetFilters(v []*KeyValuesPair) *ListJobsInput { - s.Filters = v - return s -} - -// SetJobQueue sets the JobQueue field's value. -func (s *ListJobsInput) SetJobQueue(v string) *ListJobsInput { - s.JobQueue = &v - return s -} - -// SetJobStatus sets the JobStatus field's value. -func (s *ListJobsInput) SetJobStatus(v string) *ListJobsInput { - s.JobStatus = &v - return s -} - // SetMaxResults sets the MaxResults field's value. -func (s *ListJobsInput) SetMaxResults(v int64) *ListJobsInput { +func (s *ListSchedulingPoliciesInput) SetMaxResults(v int64) *ListSchedulingPoliciesInput { s.MaxResults = &v return s } -// SetMultiNodeJobId sets the MultiNodeJobId field's value. -func (s *ListJobsInput) SetMultiNodeJobId(v string) *ListJobsInput { - s.MultiNodeJobId = &v - return s -} - // SetNextToken sets the NextToken field's value. -func (s *ListJobsInput) SetNextToken(v string) *ListJobsInput { +func (s *ListSchedulingPoliciesInput) SetNextToken(v string) *ListSchedulingPoliciesInput { s.NextToken = &v return s } -type ListJobsOutput struct { +type ListSchedulingPoliciesOutput struct { _ struct{} `type:"structure"` - // A list of job summaries that match the request. - // - // JobSummaryList is a required field - JobSummaryList []*JobSummary `locationName:"jobSummaryList" type:"list" required:"true"` - - // The nextToken value to include in a future ListJobs request. When the results - // of a ListJobs request exceed maxResults, this value can be used to retrieve - // the next page of results. This value is null when there are no more results - // to return. + // The nextToken value to include in a future ListSchedulingPolicies request. + // When the results of a ListSchedulingPolicies request exceed maxResults, this + // value can be used to retrieve the next page of results. This value is null + // when there are no more results to return. NextToken *string `locationName:"nextToken" type:"string"` + + // A list of scheduling policies that match the request. + SchedulingPolicies []*SchedulingPolicyListingDetail `locationName:"schedulingPolicies" type:"list"` } // String returns the string representation. @@ -6705,7 +7710,7 @@ type ListJobsOutput struct { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListJobsOutput) String() string { +func (s ListSchedulingPoliciesOutput) String() string { return awsutil.Prettify(s) } @@ -6714,19 +7719,19 @@ func (s ListJobsOutput) String() string { // API parameter values that are decorated as "sensitive" in the API will not // be included in the string output. The member name will be present, but the // value will be replaced with "sensitive". -func (s ListJobsOutput) GoString() string { +func (s ListSchedulingPoliciesOutput) GoString() string { return s.String() } -// SetJobSummaryList sets the JobSummaryList field's value. -func (s *ListJobsOutput) SetJobSummaryList(v []*JobSummary) *ListJobsOutput { - s.JobSummaryList = v +// SetNextToken sets the NextToken field's value. +func (s *ListSchedulingPoliciesOutput) SetNextToken(v string) *ListSchedulingPoliciesOutput { + s.NextToken = &v return s } -// SetNextToken sets the NextToken field's value. -func (s *ListJobsOutput) SetNextToken(v string) *ListJobsOutput { - s.NextToken = &v +// SetSchedulingPolicies sets the SchedulingPolicies field's value. +func (s *ListSchedulingPoliciesOutput) SetSchedulingPolicies(v []*SchedulingPolicyListingDetail) *ListSchedulingPoliciesOutput { + s.SchedulingPolicies = v return s } @@ -6735,8 +7740,8 @@ type ListTagsForResourceInput struct { // The Amazon Resource Name (ARN) that identifies the resource that tags are // listed for. Batch resources that support tags are compute environments, jobs, - // job definitions, and job queues. ARNs for child jobs of array and multi-node - // parallel (MNP) jobs are not supported. + // job definitions, job queues, and scheduling policies. ARNs for child jobs + // of array and multi-node parallel (MNP) jobs are not supported. // // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` @@ -7546,6 +8551,14 @@ type RegisterJobDefinitionInput struct { // a timeout, it isn't retried. RetryStrategy *RetryStrategy `locationName:"retryStrategy" type:"structure"` + // The scheduling priority for jobs that are submitted with this job definition. + // This will only affect jobs in job queues with a fair share policy. Jobs with + // a higher scheduling priority will be scheduled before jobs with a lower scheduling + // priority. + // + // The minimum supported value is 0 and the maximum supported value is 9999. + SchedulingPriority *int64 `locationName:"schedulingPriority" type:"integer"` + // The tags that you apply to the job definition to help you categorize and // organize your resources. Each tag consists of a key and an optional value. // For more information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/batch/latest/userguide/using-tags.html) @@ -7665,6 +8678,12 @@ func (s *RegisterJobDefinitionInput) SetRetryStrategy(v *RetryStrategy) *Registe return s } +// SetSchedulingPriority sets the SchedulingPriority field's value. +func (s *RegisterJobDefinitionInput) SetSchedulingPriority(v int64) *RegisterJobDefinitionInput { + s.SchedulingPriority = &v + return s +} + // SetTags sets the Tags field's value. func (s *RegisterJobDefinitionInput) SetTags(v map[string]*string) *RegisterJobDefinitionInput { s.Tags = v @@ -7975,6 +8994,108 @@ func (s *RetryStrategy) SetEvaluateOnExit(v []*EvaluateOnExit) *RetryStrategy { return s } +// An object representing a scheduling policy. +type SchedulingPolicyDetail struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of the scheduling policy. An example would be + // arn:aws:batch:us-east-1:123456789012:scheduling-policy/HighPriority + // + // Arn is a required field + Arn *string `locationName:"arn" type:"string" required:"true"` + + // The fair share policy for the scheduling policy. + FairsharePolicy *FairsharePolicy `locationName:"fairsharePolicy" type:"structure"` + + // The name of the scheduling policy. + // + // Name is a required field + Name *string `locationName:"name" type:"string" required:"true"` + + // The tags that you apply to the scheduling policy to help you categorize and + // organize your resources. Each tag consists of a key and an optional value. + // For more information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) + // in Amazon Web Services General Reference. + Tags map[string]*string `locationName:"tags" min:"1" type:"map"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SchedulingPolicyDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SchedulingPolicyDetail) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *SchedulingPolicyDetail) SetArn(v string) *SchedulingPolicyDetail { + s.Arn = &v + return s +} + +// SetFairsharePolicy sets the FairsharePolicy field's value. +func (s *SchedulingPolicyDetail) SetFairsharePolicy(v *FairsharePolicy) *SchedulingPolicyDetail { + s.FairsharePolicy = v + return s +} + +// SetName sets the Name field's value. +func (s *SchedulingPolicyDetail) SetName(v string) *SchedulingPolicyDetail { + s.Name = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *SchedulingPolicyDetail) SetTags(v map[string]*string) *SchedulingPolicyDetail { + s.Tags = v + return s +} + +// An object containing the details of a scheduling policy returned in a ListSchedulingPolicy +// action. +type SchedulingPolicyListingDetail struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of the scheduling policy. + // + // Arn is a required field + Arn *string `locationName:"arn" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SchedulingPolicyListingDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SchedulingPolicyListingDetail) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *SchedulingPolicyListingDetail) SetArn(v string) *SchedulingPolicyListingDetail { + s.Arn = &v + return s +} + // An object representing the secret to expose to your container. Secrets can // be exposed to a container in the following ways: // @@ -8117,6 +9238,80 @@ func (s *ServerException) RequestID() string { return s.RespMetadata.RequestID } +// Specifies the weights for the fair share identifiers for the fair share policy. +// Fair share identifiers that are not included have a default weight of 1.0. +type ShareAttributes struct { + _ struct{} `type:"structure"` + + // A fair share identifier or fair share identifier prefix. If the string ends + // with '*' then this entry specifies the weight factor to use for fair share + // identifiers that begin with that prefix. The list of fair share identifiers + // in a fair share policy cannot overlap. For example you cannot have one that + // specifies a shareIdentifier of UserA* and another that specifies a shareIdentifier + // of UserA-1. + // + // There can be no more than 500 fair share identifiers active in a job queue. + // + // The string is limited to 255 alphanumeric characters, optionally followed + // by '*'. + // + // ShareIdentifier is a required field + ShareIdentifier *string `locationName:"shareIdentifier" type:"string" required:"true"` + + // The weight factor for the fair share identifier. The default value is 1.0. + // A lower value has a higher priority for compute resources. For example, jobs + // using a share identifier with a weight factor of 0.125 (1/8) will get 8 times + // the compute resources of jobs using a share identifier with a weight factor + // of 1. + // + // The smallest supported value is 0.0001 and the largest supported value is + // 999.9999. + WeightFactor *float64 `locationName:"weightFactor" type:"float"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ShareAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ShareAttributes) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ShareAttributes) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ShareAttributes"} + if s.ShareIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("ShareIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetShareIdentifier sets the ShareIdentifier field's value. +func (s *ShareAttributes) SetShareIdentifier(v string) *ShareAttributes { + s.ShareIdentifier = &v + return s +} + +// SetWeightFactor sets the WeightFactor field's value. +func (s *ShareAttributes) SetWeightFactor(v float64) *ShareAttributes { + s.WeightFactor = &v + return s +} + // Contains the parameters for SubmitJob. type SubmitJobInput struct { _ struct{} `type:"structure"` @@ -8192,6 +9387,17 @@ type SubmitJobInput struct { // defined in the job definition. RetryStrategy *RetryStrategy `locationName:"retryStrategy" type:"structure"` + // The scheduling priority for the job. This will only affect jobs in job queues + // with a fair share policy. Jobs with a higher scheduling priority will be + // scheduled before jobs with a lower scheduling priority. This will override + // any scheduling priority in the job definition. + // + // The minimum supported value is 0 and the maximum supported value is 9999. + SchedulingPriorityOverride *int64 `locationName:"schedulingPriorityOverride" type:"integer"` + + // The share identifier for the job. + ShareIdentifier *string `locationName:"shareIdentifier" type:"string"` + // The tags that you apply to the job request to help you categorize and organize // your resources. Each tag consists of a key and an optional value. For more // information, see Tagging Amazon Web Services Resources (https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html) @@ -8324,6 +9530,18 @@ func (s *SubmitJobInput) SetRetryStrategy(v *RetryStrategy) *SubmitJobInput { return s } +// SetSchedulingPriorityOverride sets the SchedulingPriorityOverride field's value. +func (s *SubmitJobInput) SetSchedulingPriorityOverride(v int64) *SubmitJobInput { + s.SchedulingPriorityOverride = &v + return s +} + +// SetShareIdentifier sets the ShareIdentifier field's value. +func (s *SubmitJobInput) SetShareIdentifier(v string) *SubmitJobInput { + s.ShareIdentifier = &v + return s +} + // SetTags sets the Tags field's value. func (s *SubmitJobInput) SetTags(v map[string]*string) *SubmitJobInput { s.Tags = v @@ -8394,8 +9612,8 @@ type TagResourceInput struct { // The Amazon Resource Name (ARN) of the resource that tags are added to. Batch // resources that support tags are compute environments, jobs, job definitions, - // and job queues. ARNs for child jobs of array and multi-node parallel (MNP) - // jobs are not supported. + // job queues, and scheduling policies. ARNs for child jobs of array and multi-node + // parallel (MNP) jobs are not supported. // // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` @@ -8729,8 +9947,8 @@ type UntagResourceInput struct { // The Amazon Resource Name (ARN) of the resource from which to delete tags. // Batch resources that support tags are compute environments, jobs, job definitions, - // and job queues. ARNs for child jobs of array and multi-node parallel (MNP) - // jobs are not supported. + // job queues, and scheduling policies. ARNs for child jobs of array and multi-node + // parallel (MNP) jobs are not supported. // // ResourceArn is a required field ResourceArn *string `location:"uri" locationName:"resourceArn" type:"string" required:"true"` @@ -8866,6 +10084,13 @@ type UpdateComputeEnvironmentInput struct { // don't scale out. However, they scale in to minvCpus value after instances // become idle. State *string `locationName:"state" type:"string" enum:"CEState"` + + // The maximum number of vCPUs expected to be used for an unmanaged compute + // environment. This parameter should not be specified for a managed compute + // environment. This parameter is only used for fair share scheduling to reserve + // vCPU capacity for new share identifiers. If this parameter is not provided + // for a fair share job queue, no vCPU capacity will be reserved. + UnmanagedvCpus *int64 `locationName:"unmanagedvCpus" type:"integer"` } // String returns the string representation. @@ -8923,6 +10148,12 @@ func (s *UpdateComputeEnvironmentInput) SetState(v string) *UpdateComputeEnviron return s } +// SetUnmanagedvCpus sets the UnmanagedvCpus field's value. +func (s *UpdateComputeEnvironmentInput) SetUnmanagedvCpus(v int64) *UpdateComputeEnvironmentInput { + s.UnmanagedvCpus = &v + return s +} + type UpdateComputeEnvironmentOutput struct { _ struct{} `type:"structure"` @@ -8995,6 +10226,12 @@ type UpdateJobQueueInput struct { // EC2 and Fargate compute environments can't be mixed. Priority *int64 `locationName:"priority" type:"integer"` + // Amazon Resource Name (ARN) of the fair share scheduling policy. Once a job + // queue is created, the fair share scheduling policy can be replaced but not + // removed. The format is aws:Partition:batch:Region:Account:scheduling-policy/Name + // . For example, aws:aws:batch:us-west-2:012345678910:scheduling-policy/MySchedulingPolicy. + SchedulingPolicyArn *string `locationName:"schedulingPolicyArn" type:"string"` + // Describes the queue's ability to accept new jobs. If the job queue state // is ENABLED, it can accept jobs. If the job queue state is DISABLED, new jobs // can't be added to the queue, but jobs already in the queue can finish. @@ -9060,6 +10297,12 @@ func (s *UpdateJobQueueInput) SetPriority(v int64) *UpdateJobQueueInput { return s } +// SetSchedulingPolicyArn sets the SchedulingPolicyArn field's value. +func (s *UpdateJobQueueInput) SetSchedulingPolicyArn(v string) *UpdateJobQueueInput { + s.SchedulingPolicyArn = &v + return s +} + // SetState sets the State field's value. func (s *UpdateJobQueueInput) SetState(v string) *UpdateJobQueueInput { s.State = &v @@ -9106,6 +10349,88 @@ func (s *UpdateJobQueueOutput) SetJobQueueName(v string) *UpdateJobQueueOutput { return s } +type UpdateSchedulingPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the scheduling policy to update. + // + // Arn is a required field + Arn *string `locationName:"arn" type:"string" required:"true"` + + // The fair share policy. + FairsharePolicy *FairsharePolicy `locationName:"fairsharePolicy" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateSchedulingPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateSchedulingPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSchedulingPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSchedulingPolicyInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.FairsharePolicy != nil { + if err := s.FairsharePolicy.Validate(); err != nil { + invalidParams.AddNested("FairsharePolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *UpdateSchedulingPolicyInput) SetArn(v string) *UpdateSchedulingPolicyInput { + s.Arn = &v + return s +} + +// SetFairsharePolicy sets the FairsharePolicy field's value. +func (s *UpdateSchedulingPolicyInput) SetFairsharePolicy(v *FairsharePolicy) *UpdateSchedulingPolicyInput { + s.FairsharePolicy = v + return s +} + +type UpdateSchedulingPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateSchedulingPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UpdateSchedulingPolicyOutput) GoString() string { + return s.String() +} + // A data volume used in a job's container properties. type Volume struct { _ struct{} `type:"structure"` diff --git a/service/batch/batchiface/interface.go b/service/batch/batchiface/interface.go index afd8db5c74a..e50be21ef94 100644 --- a/service/batch/batchiface/interface.go +++ b/service/batch/batchiface/interface.go @@ -72,6 +72,10 @@ type BatchAPI interface { CreateJobQueueWithContext(aws.Context, *batch.CreateJobQueueInput, ...request.Option) (*batch.CreateJobQueueOutput, error) CreateJobQueueRequest(*batch.CreateJobQueueInput) (*request.Request, *batch.CreateJobQueueOutput) + CreateSchedulingPolicy(*batch.CreateSchedulingPolicyInput) (*batch.CreateSchedulingPolicyOutput, error) + CreateSchedulingPolicyWithContext(aws.Context, *batch.CreateSchedulingPolicyInput, ...request.Option) (*batch.CreateSchedulingPolicyOutput, error) + CreateSchedulingPolicyRequest(*batch.CreateSchedulingPolicyInput) (*request.Request, *batch.CreateSchedulingPolicyOutput) + DeleteComputeEnvironment(*batch.DeleteComputeEnvironmentInput) (*batch.DeleteComputeEnvironmentOutput, error) DeleteComputeEnvironmentWithContext(aws.Context, *batch.DeleteComputeEnvironmentInput, ...request.Option) (*batch.DeleteComputeEnvironmentOutput, error) DeleteComputeEnvironmentRequest(*batch.DeleteComputeEnvironmentInput) (*request.Request, *batch.DeleteComputeEnvironmentOutput) @@ -80,6 +84,10 @@ type BatchAPI interface { DeleteJobQueueWithContext(aws.Context, *batch.DeleteJobQueueInput, ...request.Option) (*batch.DeleteJobQueueOutput, error) DeleteJobQueueRequest(*batch.DeleteJobQueueInput) (*request.Request, *batch.DeleteJobQueueOutput) + DeleteSchedulingPolicy(*batch.DeleteSchedulingPolicyInput) (*batch.DeleteSchedulingPolicyOutput, error) + DeleteSchedulingPolicyWithContext(aws.Context, *batch.DeleteSchedulingPolicyInput, ...request.Option) (*batch.DeleteSchedulingPolicyOutput, error) + DeleteSchedulingPolicyRequest(*batch.DeleteSchedulingPolicyInput) (*request.Request, *batch.DeleteSchedulingPolicyOutput) + DeregisterJobDefinition(*batch.DeregisterJobDefinitionInput) (*batch.DeregisterJobDefinitionOutput, error) DeregisterJobDefinitionWithContext(aws.Context, *batch.DeregisterJobDefinitionInput, ...request.Option) (*batch.DeregisterJobDefinitionOutput, error) DeregisterJobDefinitionRequest(*batch.DeregisterJobDefinitionInput) (*request.Request, *batch.DeregisterJobDefinitionOutput) @@ -109,6 +117,10 @@ type BatchAPI interface { DescribeJobsWithContext(aws.Context, *batch.DescribeJobsInput, ...request.Option) (*batch.DescribeJobsOutput, error) DescribeJobsRequest(*batch.DescribeJobsInput) (*request.Request, *batch.DescribeJobsOutput) + DescribeSchedulingPolicies(*batch.DescribeSchedulingPoliciesInput) (*batch.DescribeSchedulingPoliciesOutput, error) + DescribeSchedulingPoliciesWithContext(aws.Context, *batch.DescribeSchedulingPoliciesInput, ...request.Option) (*batch.DescribeSchedulingPoliciesOutput, error) + DescribeSchedulingPoliciesRequest(*batch.DescribeSchedulingPoliciesInput) (*request.Request, *batch.DescribeSchedulingPoliciesOutput) + ListJobs(*batch.ListJobsInput) (*batch.ListJobsOutput, error) ListJobsWithContext(aws.Context, *batch.ListJobsInput, ...request.Option) (*batch.ListJobsOutput, error) ListJobsRequest(*batch.ListJobsInput) (*request.Request, *batch.ListJobsOutput) @@ -116,6 +128,13 @@ type BatchAPI interface { ListJobsPages(*batch.ListJobsInput, func(*batch.ListJobsOutput, bool) bool) error ListJobsPagesWithContext(aws.Context, *batch.ListJobsInput, func(*batch.ListJobsOutput, bool) bool, ...request.Option) error + ListSchedulingPolicies(*batch.ListSchedulingPoliciesInput) (*batch.ListSchedulingPoliciesOutput, error) + ListSchedulingPoliciesWithContext(aws.Context, *batch.ListSchedulingPoliciesInput, ...request.Option) (*batch.ListSchedulingPoliciesOutput, error) + ListSchedulingPoliciesRequest(*batch.ListSchedulingPoliciesInput) (*request.Request, *batch.ListSchedulingPoliciesOutput) + + ListSchedulingPoliciesPages(*batch.ListSchedulingPoliciesInput, func(*batch.ListSchedulingPoliciesOutput, bool) bool) error + ListSchedulingPoliciesPagesWithContext(aws.Context, *batch.ListSchedulingPoliciesInput, func(*batch.ListSchedulingPoliciesOutput, bool) bool, ...request.Option) error + ListTagsForResource(*batch.ListTagsForResourceInput) (*batch.ListTagsForResourceOutput, error) ListTagsForResourceWithContext(aws.Context, *batch.ListTagsForResourceInput, ...request.Option) (*batch.ListTagsForResourceOutput, error) ListTagsForResourceRequest(*batch.ListTagsForResourceInput) (*request.Request, *batch.ListTagsForResourceOutput) @@ -147,6 +166,10 @@ type BatchAPI interface { UpdateJobQueue(*batch.UpdateJobQueueInput) (*batch.UpdateJobQueueOutput, error) UpdateJobQueueWithContext(aws.Context, *batch.UpdateJobQueueInput, ...request.Option) (*batch.UpdateJobQueueOutput, error) UpdateJobQueueRequest(*batch.UpdateJobQueueInput) (*request.Request, *batch.UpdateJobQueueOutput) + + UpdateSchedulingPolicy(*batch.UpdateSchedulingPolicyInput) (*batch.UpdateSchedulingPolicyOutput, error) + UpdateSchedulingPolicyWithContext(aws.Context, *batch.UpdateSchedulingPolicyInput, ...request.Option) (*batch.UpdateSchedulingPolicyOutput, error) + UpdateSchedulingPolicyRequest(*batch.UpdateSchedulingPolicyInput) (*request.Request, *batch.UpdateSchedulingPolicyOutput) } var _ BatchAPI = (*batch.Batch)(nil) diff --git a/service/batch/doc.go b/service/batch/doc.go index 9997af377f8..95e74abaea6 100644 --- a/service/batch/doc.go +++ b/service/batch/doc.go @@ -3,15 +3,15 @@ // Package batch provides the client and types for making API // requests to AWS Batch. // -// Using Batch, you can run batch computing workloads on the Cloud. Batch computing -// is a common means for developers, scientists, and engineers to access large -// amounts of compute resources. Batch uses the advantages of this computing -// workload to remove the undifferentiated heavy lifting of configuring and -// managing required infrastructure. At the same time, it also adopts a familiar -// batch computing software approach. Given these advantages, Batch can help -// you to efficiently provision resources in response to jobs submitted, thus -// effectively helping you to eliminate capacity constraints, reduce compute -// costs, and deliver your results more quickly. +// Using Batch, you can run batch computing workloads on the Amazon Web Services +// Cloud. Batch computing is a common means for developers, scientists, and +// engineers to access large amounts of compute resources. Batch uses the advantages +// of this computing workload to remove the undifferentiated heavy lifting of +// configuring and managing required infrastructure. At the same time, it also +// adopts a familiar batch computing software approach. Given these advantages, +// Batch can help you to efficiently provision resources in response to jobs +// submitted, thus effectively helping you to eliminate capacity constraints, +// reduce compute costs, and deliver your results more quickly. // // As a fully managed service, Batch can run batch computing workloads of any // scale. Batch automatically provisions compute resources and optimizes workload diff --git a/service/batch/examples_test.go b/service/batch/examples_test.go index cb26b79a25f..f3bf15a5bf9 100644 --- a/service/batch/examples_test.go +++ b/service/batch/examples_test.go @@ -592,9 +592,17 @@ func ExampleBatch_RegisterJobDefinition_shared00() { aws.String("sleep"), aws.String("10"), }, - Image: aws.String("busybox"), - Memory: aws.Int64(128), - Vcpus: aws.Int64(1), + Image: aws.String("busybox"), + ResourceRequirements: []*batch.ResourceRequirement{ + { + Type: aws.String("MEMORY"), + Value: aws.String("128"), + }, + { + Type: aws.String("VCPU"), + Value: aws.String("1"), + }, + }, }, JobDefinitionName: aws.String("sleep10"), Type: aws.String("container"), @@ -633,9 +641,17 @@ func ExampleBatch_RegisterJobDefinition_shared01() { aws.String("sleep"), aws.String("30"), }, - Image: aws.String("busybox"), - Memory: aws.Int64(128), - Vcpus: aws.Int64(1), + Image: aws.String("busybox"), + ResourceRequirements: []*batch.ResourceRequirement{ + { + Type: aws.String("MEMORY"), + Value: aws.String("128"), + }, + { + Type: aws.String("VCPU"), + Value: aws.String("1"), + }, + }, }, JobDefinitionName: aws.String("sleep30"), Tags: map[string]*string{ diff --git a/service/greengrassv2/api.go b/service/greengrassv2/api.go index d98992c4633..7b25bffe716 100644 --- a/service/greengrassv2/api.go +++ b/service/greengrassv2/api.go @@ -382,7 +382,8 @@ func (c *GreengrassV2) CreateComponentVersionRequest(input *CreateComponentVersi // Python 2.7 – python2.7 Python 3.7 – python3.7 Python 3.8 – python3.8 // Java 8 – java8 Node.js 10 – nodejs10.x Node.js 12 – nodejs12.x To // create a component from a Lambda function, specify lambdaFunction when -// you call this operation. +// you call this operation. IoT Greengrass currently supports Lambda functions +// on only Linux core devices. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3707,10 +3708,11 @@ func (s *ComponentPlatform) SetName(v string) *ComponentPlatform { type ComponentRunWith struct { _ struct{} `type:"structure"` - // The POSIX system user and (optional) group to use to run this component. - // Specify the user and group separated by a colon (:) in the following format: - // user:group. The group is optional. If you don't specify a group, the IoT - // Greengrass Core software uses the primary user for the group. + // The POSIX system user and, optionally, group to use to run this component + // on Linux core devices. The user, and group if specified, must exist on each + // Linux core device. Specify the user and group separated by a colon (:) in + // the following format: user:group. The group is optional. If you don't specify + // a group, the IoT Greengrass Core software uses the primary user for the group. // // If you omit this parameter, the IoT Greengrass Core software uses the default // system user and group that you configure on the Greengrass nucleus component. @@ -3719,13 +3721,23 @@ type ComponentRunWith struct { PosixUser *string `locationName:"posixUser" min:"1" type:"string"` // The system resource limits to apply to this component's process on the core - // device. + // device. IoT Greengrass currently supports this feature on only Linux core + // devices. // // If you omit this parameter, the IoT Greengrass Core software uses the default // system resource limits that you configure on the Greengrass nucleus component. // For more information, see Configure system resource limits for components // (https://docs.aws.amazon.com/greengrass/v2/developerguide/configure-greengrass-core-v2.html#configure-component-system-resource-limits). SystemResourceLimits *SystemResourceLimits `locationName:"systemResourceLimits" type:"structure"` + + // The Windows user to use to run this component on Windows core devices. The + // user must exist on each Windows core device, and its name and password must + // be in the LocalSystem account's Credentials Manager instance. + // + // If you omit this parameter, the IoT Greengrass Core software uses the default + // Windows user that you configure on the Greengrass nucleus component. For + // more information, see Configure the user and group that run components (https://docs.aws.amazon.com/greengrass/v2/developerguide/configure-greengrass-core-v2.html#configure-component-user). + WindowsUser *string `locationName:"windowsUser" min:"1" type:"string"` } // String returns the string representation. @@ -3752,6 +3764,9 @@ func (s *ComponentRunWith) Validate() error { if s.PosixUser != nil && len(*s.PosixUser) < 1 { invalidParams.Add(request.NewErrParamMinLen("PosixUser", 1)) } + if s.WindowsUser != nil && len(*s.WindowsUser) < 1 { + invalidParams.Add(request.NewErrParamMinLen("WindowsUser", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -3771,6 +3786,12 @@ func (s *ComponentRunWith) SetSystemResourceLimits(v *SystemResourceLimits) *Com return s } +// SetWindowsUser sets the WindowsUser field's value. +func (s *ComponentRunWith) SetWindowsUser(v string) *ComponentRunWith { + s.WindowsUser = &v + return s +} + // Contains information about a component version in a list. type ComponentVersionListItem struct { _ struct{} `type:"structure"` diff --git a/service/health/api.go b/service/health/api.go index 215299ffd25..77478936151 100644 --- a/service/health/api.go +++ b/service/health/api.go @@ -63,12 +63,12 @@ func (c *Health) DescribeAffectedAccountsForOrganizationRequest(input *DescribeA // DescribeAffectedAccountsForOrganization API operation for AWS Health APIs and Notifications. // -// Returns a list of accounts in the organization from AWS Organizations that -// are affected by the provided event. For more information about the different -// types of AWS Health events, see Event (https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html). +// Returns a list of accounts in the organization from Organizations that are +// affected by the provided event. For more information about the different +// types of Health events, see Event (https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html). // -// Before you can call this operation, you must first enable AWS Health to work -// with AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization +// Before you can call this operation, you must first enable Health to work +// with Organizations. To do this, call the EnableHealthServiceAccessForOrganization // (https://docs.aws.amazon.com/health/latest/APIReference/API_EnableHealthServiceAccessForOrganization.html) // operation from your organization's management account. // @@ -213,20 +213,19 @@ func (c *Health) DescribeAffectedEntitiesRequest(input *DescribeAffectedEntities // Returns a list of entities that have been affected by the specified events, // based on the specified filter criteria. Entities can refer to individual // customer resources, groups of customer resources, or any other construct, -// depending on the AWS service. Events that have impact beyond that of the -// affected entities, or where the extent of impact is unknown, include at least -// one entity indicating this. +// depending on the Amazon Web Services service. Events that have impact beyond +// that of the affected entities, or where the extent of impact is unknown, +// include at least one entity indicating this. // -// At least one event ARN is required. Results are sorted by the lastUpdatedTime -// of the entity, starting with the most recent. +// At least one event ARN is required. // // * This API operation uses pagination. Specify the nextToken parameter // in the next request to return more results. // // * This operation supports resource-level permissions. You can use this -// operation to allow or deny access to specific AWS Health events. For more +// operation to allow or deny access to specific Health events. For more // information, see Resource- and action-based conditions (https://docs.aws.amazon.com/health/latest/ug/security_iam_id-based-policy-examples.html#resource-action-based-conditions) -// in the AWS Health User Guide. +// in the Health User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -367,17 +366,15 @@ func (c *Health) DescribeAffectedEntitiesForOrganizationRequest(input *DescribeA // DescribeAffectedEntitiesForOrganization API operation for AWS Health APIs and Notifications. // // Returns a list of entities that have been affected by one or more events -// for one or more accounts in your organization in AWS Organizations, based -// on the filter criteria. Entities can refer to individual customer resources, -// groups of customer resources, or any other construct, depending on the AWS -// service. +// for one or more accounts in your organization in Organizations, based on +// the filter criteria. Entities can refer to individual customer resources, +// groups of customer resources, or any other construct, depending on the Amazon +// Web Services service. // // At least one event Amazon Resource Name (ARN) and account ID are required. -// Results are sorted by the lastUpdatedTime of the entity, starting with the -// most recent. // -// Before you can call this operation, you must first enable AWS Health to work -// with AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization +// Before you can call this operation, you must first enable Health to work +// with Organizations. To do this, call the EnableHealthServiceAccessForOrganization // (https://docs.aws.amazon.com/health/latest/APIReference/API_EnableHealthServiceAccessForOrganization.html) // operation from your organization's management account. // @@ -385,9 +382,9 @@ func (c *Health) DescribeAffectedEntitiesForOrganizationRequest(input *DescribeA // in the next request to return more results. // // * This operation doesn't support resource-level permissions. You can't -// use this operation to allow or deny access to specific AWS Health events. +// use this operation to allow or deny access to specific Health events. // For more information, see Resource- and action-based conditions (https://docs.aws.amazon.com/health/latest/ug/security_iam_id-based-policy-examples.html#resource-action-based-conditions) -// in the AWS Health User Guide. +// in the Health User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -522,8 +519,7 @@ func (c *Health) DescribeEntityAggregatesRequest(input *DescribeEntityAggregates // DescribeEntityAggregates API operation for AWS Health APIs and Notifications. // // Returns the number of entities that are affected by each of the specified -// events. If no events are specified, the counts of all affected entities are -// returned. +// events. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -740,8 +736,8 @@ func (c *Health) DescribeEventDetailsRequest(input *DescribeEventDetailsInput) ( // DescribeEventDetails API operation for AWS Health APIs and Notifications. // // Returns detailed information about one or more specified events. Information -// includes standard event data (AWS Region, service, and so on, as returned -// by DescribeEvents (https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeEvents.html)), +// includes standard event data (Amazon Web Services Region, service, and so +// on, as returned by DescribeEvents (https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeEvents.html)), // a detailed event description, and possible additional metadata that depends // upon the nature of the event. Affected entities are not included. To retrieve // the entities, use the DescribeAffectedEntities (https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeAffectedEntities.html) @@ -751,9 +747,9 @@ func (c *Health) DescribeEventDetailsRequest(input *DescribeEventDetailsInput) ( // that event. // // This operation supports resource-level permissions. You can use this operation -// to allow or deny access to specific AWS Health events. For more information, +// to allow or deny access to specific Health events. For more information, // see Resource- and action-based conditions (https://docs.aws.amazon.com/health/latest/ug/security_iam_id-based-policy-examples.html#resource-action-based-conditions) -// in the AWS Health User Guide. +// in the Health User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -833,38 +829,39 @@ func (c *Health) DescribeEventDetailsForOrganizationRequest(input *DescribeEvent // DescribeEventDetailsForOrganization API operation for AWS Health APIs and Notifications. // // Returns detailed information about one or more specified events for one or -// more AWS accounts in your organization. This information includes standard -// event data (such as the AWS Region and service), an event description, and -// (depending on the event) possible metadata. This operation doesn't return -// affected entities, such as the resources related to the event. To return -// affected entities, use the DescribeAffectedEntitiesForOrganization (https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeAffectedEntitiesForOrganization.html) +// more Amazon Web Services accounts in your organization. This information +// includes standard event data (such as the Amazon Web Services Region and +// service), an event description, and (depending on the event) possible metadata. +// This operation doesn't return affected entities, such as the resources related +// to the event. To return affected entities, use the DescribeAffectedEntitiesForOrganization +// (https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeAffectedEntitiesForOrganization.html) // operation. // -// Before you can call this operation, you must first enable AWS Health to work -// with AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization +// Before you can call this operation, you must first enable Health to work +// with Organizations. To do this, call the EnableHealthServiceAccessForOrganization // (https://docs.aws.amazon.com/health/latest/APIReference/API_EnableHealthServiceAccessForOrganization.html) // operation from your organization's management account. // // When you call the DescribeEventDetailsForOrganization operation, specify // the organizationEventDetailFilters object in the request. Depending on the -// AWS Health event type, note the following differences: +// Health event type, note the following differences: // // * To return event details for a public event, you must specify a null // value for the awsAccountId parameter. If you specify an account ID for -// a public event, AWS Health returns an error message because public events +// a public event, Health returns an error message because public events // aren't specific to an account. // // * To return event details for an event that is specific to an account // in your organization, you must specify the awsAccountId parameter in the -// request. If you don't specify an account ID, AWS Health returns an error -// message because the event is specific to an account in your organization. +// request. If you don't specify an account ID, Health returns an error message +// because the event is specific to an account in your organization. // // For more information, see Event (https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html). // // This operation doesn't support resource-level permissions. You can't use -// this operation to allow or deny access to specific AWS Health events. For -// more information, see Resource- and action-based conditions (https://docs.aws.amazon.com/health/latest/ug/security_iam_id-based-policy-examples.html#resource-action-based-conditions) -// in the AWS Health User Guide. +// this operation to allow or deny access to specific Health events. For more +// information, see Resource- and action-based conditions (https://docs.aws.amazon.com/health/latest/ug/security_iam_id-based-policy-examples.html#resource-action-based-conditions) +// in the Health User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -950,9 +947,9 @@ func (c *Health) DescribeEventTypesRequest(input *DescribeEventTypesInput) (req // DescribeEventTypes API operation for AWS Health APIs and Notifications. // // Returns the event types that meet the specified filter criteria. You can -// use this API operation to find information about the AWS Health event, such -// as the category, AWS service, and event code. The metadata for each event -// appears in the EventType (https://docs.aws.amazon.com/health/latest/APIReference/API_EventType.html) +// use this API operation to find information about the Health event, such as +// the category, Amazon Web Services service, and event code. The metadata for +// each event appears in the EventType (https://docs.aws.amazon.com/health/latest/APIReference/API_EventType.html) // object. // // If you don't specify a filter criteria, the API operation returns all event @@ -1110,12 +1107,12 @@ func (c *Health) DescribeEventsRequest(input *DescribeEventsInput) (req *request // sorted by lastModifiedTime, starting with the most recent event. // // * When you call the DescribeEvents operation and specify an entity for -// the entityValues parameter, AWS Health might return public events that -// aren't specific to that resource. For example, if you call DescribeEvents -// and specify an ID for an Amazon Elastic Compute Cloud (Amazon EC2) instance, -// AWS Health might return events that aren't specific to that resource or -// service. To get events that are specific to a service, use the services -// parameter in the filter object. For more information, see Event (https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html). +// the entityValues parameter, Health might return public events that aren't +// specific to that resource. For example, if you call DescribeEvents and +// specify an ID for an Amazon Elastic Compute Cloud (Amazon EC2) instance, +// Health might return events that aren't specific to that resource or service. +// To get events that are specific to a service, use the services parameter +// in the filter object. For more information, see Event (https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html). // // * This API operation uses pagination. Specify the nextToken parameter // in the next request to return more results. @@ -1258,7 +1255,7 @@ func (c *Health) DescribeEventsForOrganizationRequest(input *DescribeEventsForOr // DescribeEventsForOrganization API operation for AWS Health APIs and Notifications. // -// Returns information about events across your organization in AWS Organizations. +// Returns information about events across your organization in Organizations. // You can use thefilters parameter to specify the events that you want to return. // Events are returned in a summary form and don't include the affected accounts, // detailed description, any additional metadata that depends on the event type, @@ -1275,11 +1272,11 @@ func (c *Health) DescribeEventsForOrganizationRequest(input *DescribeEventsForOr // all events across your organization. Results are sorted by lastModifiedTime, // starting with the most recent event. // -// For more information about the different types of AWS Health events, see -// Event (https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html). +// For more information about the different types of Health events, see Event +// (https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html). // -// Before you can call this operation, you must first enable AWS Health to work -// with AWS Organizations. To do this, call the EnableHealthServiceAccessForOrganization +// Before you can call this operation, you must first enable Health to work +// with Organizations. To do this, call the EnableHealthServiceAccessForOrganization // (https://docs.aws.amazon.com/health/latest/APIReference/API_EnableHealthServiceAccessForOrganization.html) // operation from your organization's management account. // @@ -1418,7 +1415,7 @@ func (c *Health) DescribeHealthServiceStatusForOrganizationRequest(input *Descri // DescribeHealthServiceStatusForOrganization API operation for AWS Health APIs and Notifications. // -// This operation provides status information on enabling or disabling AWS Health +// This operation provides status information on enabling or disabling Health // to work with your organization. To call this operation, you must sign in // as an IAM user, assume an IAM role, or sign in as the root user (not recommended) // in the organization's management account. @@ -1496,25 +1493,25 @@ func (c *Health) DisableHealthServiceAccessForOrganizationRequest(input *Disable // DisableHealthServiceAccessForOrganization API operation for AWS Health APIs and Notifications. // -// Disables AWS Health from working with AWS Organizations. To call this operation, -// you must sign in as an AWS Identity and Access Management (IAM) user, assume +// Disables Health from working with Organizations. To call this operation, +// you must sign in as an Identity and Access Management (IAM) user, assume // an IAM role, or sign in as the root user (not recommended) in the organization's -// management account. For more information, see Aggregating AWS Health events -// (https://docs.aws.amazon.com/health/latest/ug/aggregate-events.html) in the -// AWS Health User Guide. +// management account. For more information, see Aggregating Health events (https://docs.aws.amazon.com/health/latest/ug/aggregate-events.html) +// in the Health User Guide. // // This operation doesn't remove the service-linked role from the management -// account in your organization. You must use the IAM console, API, or AWS Command -// Line Interface (AWS CLI) to remove the service-linked role. For more information, +// account in your organization. You must use the IAM console, API, or Command +// Line Interface (CLI) to remove the service-linked role. For more information, // see Deleting a Service-Linked Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html#delete-service-linked-role) // in the IAM User Guide. // // You can also disable the organizational feature by using the Organizations // DisableAWSServiceAccess (https://docs.aws.amazon.com/organizations/latest/APIReference/API_DisableAWSServiceAccess.html) -// API operation. After you call this operation, AWS Health stops aggregating -// events for all other AWS accounts in your organization. If you call the AWS -// Health API operations for organizational view, AWS Health returns an error. -// AWS Health continues to aggregate health events for your AWS account. +// API operation. After you call this operation, Health stops aggregating events +// for all other Amazon Web Services accounts in your organization. If you call +// the Health API operations for organizational view, Health returns an error. +// Health continues to aggregate health events for your Amazon Web Services +// account. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1598,29 +1595,29 @@ func (c *Health) EnableHealthServiceAccessForOrganizationRequest(input *EnableHe // EnableHealthServiceAccessForOrganization API operation for AWS Health APIs and Notifications. // -// Enables AWS Health to work with AWS Organizations. You can use the organizational -// view feature to aggregate events from all AWS accounts in your organization -// in a centralized location. +// Enables Health to work with Organizations. You can use the organizational +// view feature to aggregate events from all Amazon Web Services accounts in +// your organization in a centralized location. // // This operation also creates a service-linked role for the management account // in the organization. // // To call this operation, you must meet the following requirements: // -// * You must have a Business or Enterprise Support plan from AWS Support -// (http://aws.amazon.com/premiumsupport/) to use the AWS Health API. If -// you call the AWS Health API from an AWS account that doesn't have a Business -// or Enterprise Support plan, you receive a SubscriptionRequiredException +// * You must have a Business or Enterprise Support plan from Amazon Web +// Services Support (http://aws.amazon.com/premiumsupport/) to use the Health +// API. If you call the Health API from an Amazon Web Services account that +// doesn't have a Business or Enterprise Support plan, you receive a SubscriptionRequiredException // error. // // * You must have permission to call this operation from the organization's -// management account. For example IAM policies, see AWS Health identity-based +// management account. For example IAM policies, see Health identity-based // policy examples (https://docs.aws.amazon.com/health/latest/ug/security_iam_id-based-policy-examples.html). // -// If you don't have the required support plan, you can instead use the AWS -// Health console to enable the organizational view feature. For more information, -// see Aggregating AWS Health events (https://docs.aws.amazon.com/health/latest/ug/aggregate-events.html) -// in the AWS Health User Guide. +// If you don't have the required support plan, you can instead use the Health +// console to enable the organizational view feature. For more information, +// see Aggregating Health events (https://docs.aws.amazon.com/health/latest/ug/aggregate-events.html) +// in the Health User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1663,7 +1660,8 @@ func (c *Health) EnableHealthServiceAccessForOrganizationWithContext(ctx aws.Con type AffectedEntity struct { _ struct{} `type:"structure"` - // The 12-digit AWS account number that contains the affected entity. + // The 12-digit Amazon Web Services account number that contains the affected + // entity. AwsAccountId *string `locationName:"awsAccountId" type:"string"` // The unique identifier for the entity. Format: arn:aws:health:entity-region:aws-account:entity/entity-id @@ -1963,17 +1961,17 @@ type DescribeAffectedAccountsForOrganizationOutput struct { // A JSON set of elements of the affected accounts. AffectedAccounts []*string `locationName:"affectedAccounts" type:"list"` - // This parameter specifies if the AWS Health event is a public AWS service - // event or an account-specific event. + // This parameter specifies if the Health event is a public Amazon Web Services + // service event or an account-specific event. // // * If the eventScopeCode value is PUBLIC, then the affectedAccounts value // is always empty. // // * If the eventScopeCode value is ACCOUNT_SPECIFIC, then the affectedAccounts - // value lists the affected AWS accounts in your organization. For example, - // if an event affects a service such as Amazon Elastic Compute Cloud and - // you have AWS accounts that use that service, those account IDs appear - // in the response. + // value lists the affected Amazon Web Services accounts in your organization. + // For example, if an event affects a service such as Amazon Elastic Compute + // Cloud and you have Amazon Web Services accounts that use that service, + // those account IDs appear in the response. // // * If the eventScopeCode value is NONE, then the eventArn that you specified // in the request is invalid or doesn't exist. @@ -3158,8 +3156,8 @@ func (s DescribeHealthServiceStatusForOrganizationInput) GoString() string { type DescribeHealthServiceStatusForOrganizationOutput struct { _ struct{} `type:"structure"` - // Information about the status of enabling or disabling AWS Health Organizational - // View in your organization. + // Information about the status of enabling or disabling the Health organizational + // view feature in your organization. // // Valid values are ENABLED | DISABLED | PENDING. HealthServiceAccessStatusForOrganization *string `locationName:"healthServiceAccessStatusForOrganization" type:"string"` @@ -3325,7 +3323,7 @@ func (s *EntityAggregate) SetEventArn(v string) *EntityAggregate { return s } -// The values to use to filter results from the EntityFilter (https://docs.aws.amazon.com/health/latest/APIReference/API_EntityFilter.html) +// The values to use to filter results from the DescribeAffectedEntities (https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeAffectedEntities.html) // operation. type EntityFilter struct { _ struct{} `type:"structure"` @@ -3436,19 +3434,20 @@ func (s *EntityFilter) SetTags(v []map[string]*string) *EntityFilter { return s } -// Summary information about an AWS Health event. +// Summary information about an Health event. // -// AWS Health events can be public or account-specific: +// Health events can be public or account-specific: // -// * Public events might be service events that are not specific to an AWS -// account. For example, if there is an issue with an AWS Region, AWS Health -// provides information about the event, even if you don't use services or -// resources in that Region. +// * Public events might be service events that are not specific to an Amazon +// Web Services account. For example, if there is an issue with an Amazon +// Web Services Region, Health provides information about the event, even +// if you don't use services or resources in that Region. // -// * Account-specific events are specific to either your AWS account or an -// account in your organization. For example, if there's an issue with Amazon -// Elastic Compute Cloud in a Region that you use, AWS Health provides information -// about the event and the affected resources in the account. +// * Account-specific events are specific to either your Amazon Web Services +// account or an account in your organization. For example, if there's an +// issue with Amazon Elastic Compute Cloud in a Region that you use, Health +// provides information about the event and the affected resources in the +// account. // // You can determine if an event is public or account-specific by using the // eventScopeCode parameter. For more information, see eventScopeCode (https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html#AWSHealth-Type-Event-eventScopeCode). @@ -3463,30 +3462,31 @@ type Event struct { // arn:aws:health:us-east-1::event/EC2/EC2_INSTANCE_RETIREMENT_SCHEDULED/EC2_INSTANCE_RETIREMENT_SCHEDULED_ABC123-DEF456 Arn *string `locationName:"arn" type:"string"` - // The AWS Availability Zone of the event. For example, us-east-1a. + // The Amazon Web Services Availability Zone of the event. For example, us-east-1a. AvailabilityZone *string `locationName:"availabilityZone" min:"6" type:"string"` // The date and time that the event ended. EndTime *time.Time `locationName:"endTime" type:"timestamp"` - // This parameter specifies if the AWS Health event is a public AWS service - // event or an account-specific event. + // This parameter specifies if the Health event is a public Amazon Web Services + // service event or an account-specific event. // // * If the eventScopeCode value is PUBLIC, then the affectedAccounts value // is always empty. // // * If the eventScopeCode value is ACCOUNT_SPECIFIC, then the affectedAccounts - // value lists the affected AWS accounts in your organization. For example, - // if an event affects a service such as Amazon Elastic Compute Cloud and - // you have AWS accounts that use that service, those account IDs appear - // in the response. + // value lists the affected Amazon Web Services accounts in your organization. + // For example, if an event affects a service such as Amazon Elastic Compute + // Cloud and you have Amazon Web Services accounts that use that service, + // those account IDs appear in the response. // // * If the eventScopeCode value is NONE, then the eventArn that you specified // in the request is invalid or doesn't exist. EventScopeCode *string `locationName:"eventScopeCode" type:"string" enum:"EventScopeCode"` - // The category of the event. Possible values are issue, scheduledChange, and - // accountNotification. + // A list of event type category codes. Possible values are issue, accountNotification, + // or scheduledChange. Currently, the investigation value isn't supported at + // this time. EventTypeCategory *string `locationName:"eventTypeCategory" min:"3" type:"string" enum:"EventTypeCategory"` // The unique identifier for the event type. The format is AWS_SERVICE_DESCRIPTION @@ -3496,10 +3496,11 @@ type Event struct { // The most recent date and time that the event was updated. LastUpdatedTime *time.Time `locationName:"lastUpdatedTime" type:"timestamp"` - // The AWS Region name of the event. + // The Amazon Web Services Region name of the event. Region *string `locationName:"region" min:"2" type:"string"` - // The AWS service that is affected by the event. For example, EC2, RDS. + // The Amazon Web Services service that is affected by the event. For example, + // EC2, RDS. Service *string `locationName:"service" min:"2" type:"string"` // The date and time that the event began. @@ -3601,7 +3602,8 @@ func (s *Event) SetStatusCode(v string) *Event { type EventAccountFilter struct { _ struct{} `type:"structure"` - // The 12-digit AWS account numbers that contains the affected entities. + // The 12-digit Amazon Web Services account numbers that contains the affected + // entities. AwsAccountId *string `locationName:"awsAccountId" type:"string"` // The unique identifier for the event. The event ARN has the arn:aws:health:event-region::event/SERVICE/EVENT_TYPE_CODE/EVENT_TYPE_PLUS_ID @@ -3851,7 +3853,7 @@ func (s *EventDetailsErrorItem) SetEventArn(v string) *EventDetailsErrorItem { type EventFilter struct { _ struct{} `type:"structure"` - // A list of AWS Availability Zones. + // A list of Amazon Web Services Availability Zones. AvailabilityZones []*string `locationName:"availabilityZones" type:"list"` // A list of dates and times that the event ended. @@ -3871,7 +3873,9 @@ type EventFilter struct { // A list of event status codes. EventStatusCodes []*string `locationName:"eventStatusCodes" min:"1" type:"list"` - // A list of event type category codes (issue, scheduledChange, or accountNotification). + // A list of event type category codes. Possible values are issue, accountNotification, + // or scheduledChange. Currently, the investigation value isn't supported at + // this time. EventTypeCategories []*string `locationName:"eventTypeCategories" min:"1" type:"list"` // A list of unique identifiers for event types. For example, "AWS_EC2_SYSTEM_MAINTENANCE_EVENT","AWS_RDS_MAINTENANCE_SCHEDULED". @@ -3880,10 +3884,11 @@ type EventFilter struct { // A list of dates and times that the event was last updated. LastUpdatedTimes []*DateTimeRange `locationName:"lastUpdatedTimes" min:"1" type:"list"` - // A list of AWS Regions. + // A list of Amazon Web Services Regions. Regions []*string `locationName:"regions" min:"1" type:"list"` - // The AWS services associated with the event. For example, EC2, RDS. + // The Amazon Web Services services associated with the event. For example, + // EC2, RDS. Services []*string `locationName:"services" min:"1" type:"list"` // A list of dates and times that the event began. @@ -4034,30 +4039,33 @@ func (s *EventFilter) SetTags(v []map[string]*string) *EventFilter { return s } -// Contains the metadata about a type of event that is reported by AWS Health. -// The EventType shows the category, service, and the event type code of the -// event. For example, an issue might be the category, EC2 the service, and -// AWS_EC2_SYSTEM_MAINTENANCE_EVENT the event type code. +// Contains the metadata about a type of event that is reported by Health. The +// EventType shows the category, service, and the event type code of the event. +// For example, an issue might be the category, EC2 the service, and AWS_EC2_SYSTEM_MAINTENANCE_EVENT +// the event type code. // // You can use the DescribeEventTypes (https://docs.aws.amazon.com/health/latest/APIReference/API_DescribeEventTypes.html) // API operation to return this information about an event. // // You can also use the Amazon CloudWatch Events console to create a rule so -// that you can get notified or take action when AWS Health delivers a specific -// event to your AWS account. For more information, see Monitor for AWS Health -// events with Amazon CloudWatch Events (https://docs.aws.amazon.com/health/latest/ug/cloudwatch-events-health.html) -// in the AWS Health User Guide. +// that you can get notified or take action when Health delivers a specific +// event to your Amazon Web Services account. For more information, see Monitor +// for Health events with Amazon CloudWatch Events (https://docs.aws.amazon.com/health/latest/ug/cloudwatch-events-health.html) +// in the Health User Guide. type EventType struct { _ struct{} `type:"structure"` - // A list of event type category codes (issue, scheduledChange, or accountNotification). + // A list of event type category codes. Possible values are issue, accountNotification, + // or scheduledChange. Currently, the investigation value isn't supported at + // this time. Category *string `locationName:"category" min:"3" type:"string" enum:"EventTypeCategory"` // The unique identifier for the event type. The format is AWS_SERVICE_DESCRIPTION // ; for example, AWS_EC2_SYSTEM_MAINTENANCE_EVENT. Code *string `locationName:"code" min:"3" type:"string"` - // The AWS service that is affected by the event. For example, EC2, RDS. + // The Amazon Web Services service that is affected by the event. For example, + // EC2, RDS. Service *string `locationName:"service" min:"2" type:"string"` } @@ -4102,13 +4110,16 @@ func (s *EventType) SetService(v string) *EventType { type EventTypeFilter struct { _ struct{} `type:"structure"` - // A list of event type category codes (issue, scheduledChange, or accountNotification). + // A list of event type category codes. Possible values are issue, accountNotification, + // or scheduledChange. Currently, the investigation value isn't supported at + // this time. EventTypeCategories []*string `locationName:"eventTypeCategories" min:"1" type:"list"` // A list of event type codes. EventTypeCodes []*string `locationName:"eventTypeCodes" min:"1" type:"list"` - // The AWS services associated with the event. For example, EC2, RDS. + // The Amazon Web Services services associated with the event. For example, + // EC2, RDS. Services []*string `locationName:"services" min:"1" type:"list"` } @@ -4237,7 +4248,8 @@ func (s *InvalidPaginationToken) RequestID() string { type OrganizationAffectedEntitiesErrorItem struct { _ struct{} `type:"structure"` - // The 12-digit AWS account numbers that contains the affected entities. + // The 12-digit Amazon Web Services account numbers that contains the affected + // entities. AwsAccountId *string `locationName:"awsAccountId" type:"string"` // The unique identifier for the event type. The format is AWS_SERVICE_DESCRIPTION. @@ -4315,23 +4327,25 @@ type OrganizationEvent struct { // The date and time that the event ended. EndTime *time.Time `locationName:"endTime" type:"timestamp"` - // This parameter specifies if the AWS Health event is a public AWS service - // event or an account-specific event. + // This parameter specifies if the Health event is a public Amazon Web Services + // service event or an account-specific event. // // * If the eventScopeCode value is PUBLIC, then the affectedAccounts value // is always empty. // // * If the eventScopeCode value is ACCOUNT_SPECIFIC, then the affectedAccounts - // value lists the affected AWS accounts in your organization. For example, - // if an event affects a service such as Amazon Elastic Compute Cloud and - // you have AWS accounts that use that service, those account IDs appear - // in the response. + // value lists the affected Amazon Web Services accounts in your organization. + // For example, if an event affects a service such as Amazon Elastic Compute + // Cloud and you have Amazon Web Services accounts that use that service, + // those account IDs appear in the response. // // * If the eventScopeCode value is NONE, then the eventArn that you specified // in the request is invalid or doesn't exist. EventScopeCode *string `locationName:"eventScopeCode" type:"string" enum:"EventScopeCode"` - // The category of the event type. + // A list of event type category codes. Possible values are issue, accountNotification, + // or scheduledChange. Currently, the investigation value isn't supported at + // this time. EventTypeCategory *string `locationName:"eventTypeCategory" min:"3" type:"string" enum:"EventTypeCategory"` // The unique identifier for the event type. The format is AWS_SERVICE_DESCRIPTION. @@ -4341,10 +4355,11 @@ type OrganizationEvent struct { // The most recent date and time that the event was updated. LastUpdatedTime *time.Time `locationName:"lastUpdatedTime" type:"timestamp"` - // The AWS Region name of the event. + // The Amazon Web Services Region name of the event. Region *string `locationName:"region" min:"2" type:"string"` - // The AWS service that is affected by the event, such as EC2 and RDS. + // The Amazon Web Services service that is affected by the event, such as EC2 + // and RDS. Service *string `locationName:"service" min:"2" type:"string"` // The date and time that the event began. @@ -4441,22 +4456,24 @@ func (s *OrganizationEvent) SetStatusCode(v string) *OrganizationEvent { type OrganizationEventDetails struct { _ struct{} `type:"structure"` - // The 12-digit AWS account numbers that contains the affected entities. + // The 12-digit Amazon Web Services account numbers that contains the affected + // entities. AwsAccountId *string `locationName:"awsAccountId" type:"string"` - // Summary information about an AWS Health event. + // Summary information about an Health event. // - // AWS Health events can be public or account-specific: + // Health events can be public or account-specific: // - // * Public events might be service events that are not specific to an AWS - // account. For example, if there is an issue with an AWS Region, AWS Health - // provides information about the event, even if you don't use services or - // resources in that Region. + // * Public events might be service events that are not specific to an Amazon + // Web Services account. For example, if there is an issue with an Amazon + // Web Services Region, Health provides information about the event, even + // if you don't use services or resources in that Region. // - // * Account-specific events are specific to either your AWS account or an - // account in your organization. For example, if there's an issue with Amazon - // Elastic Compute Cloud in a Region that you use, AWS Health provides information - // about the event and the affected resources in the account. + // * Account-specific events are specific to either your Amazon Web Services + // account or an account in your organization. For example, if there's an + // issue with Amazon Elastic Compute Cloud in a Region that you use, Health + // provides information about the event and the affected resources in the + // account. // // You can determine if an event is public or account-specific by using the // eventScopeCode parameter. For more information, see eventScopeCode (https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html#AWSHealth-Type-Event-eventScopeCode). @@ -4528,16 +4545,16 @@ type OrganizationEventDetailsErrorItem struct { // one of the following errors, follow the recommendations in the message: // // * We couldn't find a public event that matches your request. To find an - // event that is account specific, you must enter an AWS account ID in the - // request. - // - // * We couldn't find an account specific event for the specified AWS account. - // To find an event that is public, you must enter a null value for the AWS + // event that is account specific, you must enter an Amazon Web Services // account ID in the request. // - // * Your AWS account doesn't include the AWS Support plan required to use - // the AWS Health API. You must have either a Business or Enterprise Support - // plan. + // * We couldn't find an account specific event for the specified Amazon + // Web Services account. To find an event that is public, you must enter + // a null value for the Amazon Web Services account ID in the request. + // + // * Your Amazon Web Services account doesn't include the Amazon Web Services + // Support plan required to use the Health API. You must have either a Business + // or Enterprise Support plan. ErrorMessage *string `locationName:"errorMessage" type:"string"` // The name of the error. @@ -4599,7 +4616,8 @@ func (s *OrganizationEventDetailsErrorItem) SetEventArn(v string) *OrganizationE type OrganizationEventFilter struct { _ struct{} `type:"structure"` - // A list of 12-digit AWS account numbers that contains the affected entities. + // A list of 12-digit Amazon Web Services account numbers that contains the + // affected entities. AwsAccountIds []*string `locationName:"awsAccountIds" min:"1" type:"list"` // A range of dates and times that is used by the EventFilter (https://docs.aws.amazon.com/health/latest/APIReference/API_EventFilter.html) @@ -4621,7 +4639,9 @@ type OrganizationEventFilter struct { // A list of event status codes. EventStatusCodes []*string `locationName:"eventStatusCodes" min:"1" type:"list"` - // A list of event type category codes (issue, scheduledChange, or accountNotification). + // A list of event type category codes. Possible values are issue, accountNotification, + // or scheduledChange. Currently, the investigation value isn't supported at + // this time. EventTypeCategories []*string `locationName:"eventTypeCategories" min:"1" type:"list"` // A list of unique identifiers for event types. For example, "AWS_EC2_SYSTEM_MAINTENANCE_EVENT","AWS_RDS_MAINTENANCE_SCHEDULED". @@ -4636,10 +4656,11 @@ type OrganizationEventFilter struct { // value is equal to or before to. LastUpdatedTime *DateTimeRange `locationName:"lastUpdatedTime" type:"structure"` - // A list of AWS Regions. + // A list of Amazon Web Services Regions. Regions []*string `locationName:"regions" min:"1" type:"list"` - // The AWS services associated with the event. For example, EC2, RDS. + // The Amazon Web Services services associated with the event. For example, + // EC2, RDS. Services []*string `locationName:"services" min:"1" type:"list"` // A range of dates and times that is used by the EventFilter (https://docs.aws.amazon.com/health/latest/APIReference/API_EventFilter.html) diff --git a/service/health/doc.go b/service/health/doc.go index b9ad7e966dd..6eefce724fa 100644 --- a/service/health/doc.go +++ b/service/health/doc.go @@ -3,44 +3,43 @@ // Package health provides the client and types for making API // requests to AWS Health APIs and Notifications. // -// The AWS Health API provides programmatic access to the AWS Health information -// that appears in the AWS Personal Health Dashboard (https://phd.aws.amazon.com/phd/home#/). -// You can use the API operations to get information about AWS Health events -// that affect your AWS services and resources. +// The Health API provides programmatic access to the Health information that +// appears in the Personal Health Dashboard (https://phd.aws.amazon.com/phd/home#/). +// You can use the API operations to get information about events that might +// affect your Amazon Web Services services and resources. // -// * You must have a Business or Enterprise Support plan from AWS Support -// (http://aws.amazon.com/premiumsupport/) to use the AWS Health API. If -// you call the AWS Health API from an AWS account that doesn't have a Business -// or Enterprise Support plan, you receive a SubscriptionRequiredException +// * You must have a Business or Enterprise Support plan from Amazon Web +// Services Support (http://aws.amazon.com/premiumsupport/) to use the Health +// API. If you call the Health API from an Amazon Web Services account that +// doesn't have a Business or Enterprise Support plan, you receive a SubscriptionRequiredException // error. // -// * You can use the AWS Health endpoint health.us-east-1.amazonaws.com (HTTPS) -// to call the AWS Health API operations. AWS Health supports a multi-Region -// application architecture and has two regional endpoints in an active-passive -// configuration. You can use the high availability endpoint example to determine -// which AWS Region is active, so that you can get the latest information -// from the API. For more information, see Accessing the AWS Health API (https://docs.aws.amazon.com/health/latest/ug/health-api.html) -// in the AWS Health User Guide. +// * You can use the Health endpoint health.us-east-1.amazonaws.com (HTTPS) +// to call the Health API operations. Health supports a multi-Region application +// architecture and has two regional endpoints in an active-passive configuration. +// You can use the high availability endpoint example to determine which +// Amazon Web Services Region is active, so that you can get the latest information +// from the API. For more information, see Accessing the Health API (https://docs.aws.amazon.com/health/latest/ug/health-api.html) +// in the Health User Guide. // -// For authentication of requests, AWS Health uses the Signature Version 4 Signing +// For authentication of requests, Health uses the Signature Version 4 Signing // Process (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). // -// If your AWS account is part of AWS Organizations, you can use the AWS Health -// organizational view feature. This feature provides a centralized view of -// AWS Health events across all accounts in your organization. You can aggregate -// AWS Health events in real time to identify accounts in your organization -// that are affected by an operational event or get notified of security vulnerabilities. +// If your Amazon Web Services account is part of Organizations, you can use +// the Health organizational view feature. This feature provides a centralized +// view of Health events across all accounts in your organization. You can aggregate +// Health events in real time to identify accounts in your organization that +// are affected by an operational event or get notified of security vulnerabilities. // Use the organizational view API operations to enable this feature and return -// event information. For more information, see Aggregating AWS Health events -// (https://docs.aws.amazon.com/health/latest/ug/aggregate-events.html) in the -// AWS Health User Guide. +// event information. For more information, see Aggregating Health events (https://docs.aws.amazon.com/health/latest/ug/aggregate-events.html) +// in the Health User Guide. // -// When you use the AWS Health API operations to return AWS Health events, see -// the following recommendations: +// When you use the Health API operations to return Health events, see the following +// recommendations: // // * Use the eventScopeCode (https://docs.aws.amazon.com/health/latest/APIReference/API_Event.html#AWSHealth-Type-Event-eventScopeCode) -// parameter to specify whether to return AWS Health events that are public -// or account-specific. +// parameter to specify whether to return Health events that are public or +// account-specific. // // * Use pagination to view all events from the response. For example, if // you call the DescribeEventsForOrganization operation to get all events