diff --git a/codegen/sdk-codegen/aws-models/bedrock-agent-runtime.json b/codegen/sdk-codegen/aws-models/bedrock-agent-runtime.json index bf445a8a668..7549cd9ab48 100644 --- a/codegen/sdk-codegen/aws-models/bedrock-agent-runtime.json +++ b/codegen/sdk-codegen/aws-models/bedrock-agent-runtime.json @@ -855,7 +855,7 @@ } }, "traits": { - "smithy.api#documentation": "
An object containing a segment of the generated response that is based on a source in the knowledge base, alongside information about the source.
" + "smithy.api#documentation": "An object containing a segment of the generated response that is based on a source in the knowledge base, alongside information about the source.
\nThis data type is used in the following API operations:
\n\n Retrieve response – in the citations
field
\n RetrieveAndGenerate response – in the citations
field
Contains metadata about a part of the generated response that is accompanied by a citation.
" + "smithy.api#documentation": "Contains metadata about a part of the generated response that is accompanied by a citation.
\nThis data type is used in the following API operations:
\n\n Retrieve response – in the generatedResponsePart
field
\n RetrieveAndGenerate response – in the generatedResponsePart
field
Contains the template for the prompt that's sent to the model for response generation.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Contains configurations for response generation based on the knowledge base query results.
\nThis data type is used in the following API operations:
\nContains parameters that specify various attributes of the session.
" + "smithy.api#documentation": "Contains parameters that specify various attributes of the session. For more information, see Control session context.
" } }, "agentId": { @@ -1316,7 +1330,7 @@ } }, "traits": { - "smithy.api#documentation": "Contains the query made to the knowledge base.
", + "smithy.api#documentation": "Contains the query made to the knowledge base.
\nThis data type is used in the following API operations:
\n\n Retrieve request – in the retrievalQuery
field
Contains details about how the results from the vector search should be returned.
", + "smithy.api#documentation": "Contains details about how the results from the vector search should be returned. For more information, see Query configurations.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "Contains details about how the results should be returned.
\nThis data type is used in the following API operations:
\nContains configurations for the knowledge base query and retrieval process. For more information, see Query configurations.
\nThis data type is used in the following API operations:
\n\n Retrieve request – in the retrievalConfiguration
field
\n RetrieveAndGenerate request – in the retrievalConfiguration
field
Details about a result from querying the knowledge base.
" + "smithy.api#documentation": "Details about a result from querying the knowledge base.
\nThis data type is used in the following API operations:
\n\n Retrieve response – in the retrievalResults
field
Contains configurations for how to retrieve and return the knowledge base query.
" } + }, + "generationConfiguration": { + "target": "com.amazonaws.bedrockagentruntime#GenerationConfiguration", + "traits": { + "smithy.api#documentation": "Contains configurations for response generation based on the knowwledge base query results.
" + } } }, "traits": { - "smithy.api#documentation": "Contains details about the resource being queried.
" + "smithy.api#documentation": "Contains details about the resource being queried.
\nThis data type is used in the following API operations:
\n\n Retrieve request – in the knowledgeBaseConfiguration
field
\n RetrieveAndGenerate request – in the knowledgeBaseConfiguration
field
The number of results to return.
\nThe numberOfResults
field is currently unsupported for RetrieveAndGenerate
. Don't include it in this field if you are sending a RetrieveAndGenerate
request.
The number of source chunks to retrieve.
", "smithy.api#range": { "min": 1, - "max": 25 + "max": 100 } } }, @@ -1422,7 +1442,7 @@ } }, "traits": { - "smithy.api#documentation": "Configurations for how to carry out the search.
" + "smithy.api#documentation": "Configurations for how to perform the search query and return results. For more information, see Query configurations.
\nThis data type is used in the following API operations:
\n\n Retrieve request – in the vectorSearchConfiguration
field
\n RetrieveAndGenerate request – in the vectorSearchConfiguration
field
The template for the prompt that's sent to the model for response generation. You can include prompt placeholders, which become replaced before the prompt is sent to the model to provide instructions and context to the model. In addition, you can include XML tags to delineate meaningful sections of the prompt template.
\nFor more information, see the following resources:
\nContains the template for the prompt that's sent to the model for response generation. For more information, see Knowledge base prompt templates.
\nThis data type is used in the following API operations:
\nContains the cited text from the data source.
", + "smithy.api#documentation": "Contains the cited text from the data source.
\nThis data type is used in the following API operations:
\n\n Retrieve response – in the content
field
\n RetrieveAndGenerate response – in the content
field
\n Retrieve response – in the content
field
Contains information about the location of the data source.
", + "smithy.api#documentation": "Contains information about the location of the data source.
\nThis data type is used in the following API operations:
\n\n Retrieve response – in the location
field
\n RetrieveAndGenerate response – in the location
field
\n Retrieve response – in the locatino
field
Contains the S3 location of the data source.
" + "smithy.api#documentation": "Contains the S3 location of the data source.
\nThis data type is used in the following API operations:
\n\n Retrieve response – in the s3Location
field
\n RetrieveAndGenerate response – in the s3Location
field
\n Retrieve response – in the s3Location
field
Queries a knowledge base and generates responses based on the retrieved results. The response cites up to five sources but only selects the ones that are relevant to the query.
\nThe numberOfResults
field is currently unsupported for RetrieveAndGenerate
. Don't include it in the vectorSearchConfiguration object.
Queries a knowledge base and generates responses based on the retrieved results. The response cites up to five sources but only selects the ones that are relevant to the query.
", "smithy.api#http": { "code": 200, "method": "POST", @@ -2146,7 +2180,7 @@ } }, "traits": { - "smithy.api#documentation": "Contains details about the resource being queried.
" + "smithy.api#documentation": "Contains details about the resource being queried.
\nThis data type is used in the following API operations:
\n\n RetrieveAndGenerate request – in the retrieveAndGenerateConfiguration
field
Contains the query made to the knowledge base.
", + "smithy.api#documentation": "Contains the query made to the knowledge base.
\nThis data type is used in the following API operations:
\n\n RetrieveAndGenerate request – in the input
field
Contains the response generated from querying the knowledge base.
", + "smithy.api#documentation": "Contains the response generated from querying the knowledge base.
\nThis data type is used in the following API operations:
\n\n RetrieveAndGenerate response – in the output
field
Contains the query made to the knowledge base.
", + "smithy.api#documentation": "Contains the query to be made to the knowledge base.
", "smithy.api#required": {} } }, "retrieveAndGenerateConfiguration": { "target": "com.amazonaws.bedrockagentruntime#RetrieveAndGenerateConfiguration", "traits": { - "smithy.api#documentation": "Contains details about the resource being queried and the foundation model used for generation.
" + "smithy.api#documentation": "Contains configurations for the knowledge base query and retrieval process. For more information, see Query configurations.
" } }, "sessionConfiguration": { @@ -2265,7 +2299,7 @@ } }, "traits": { - "smithy.api#documentation": "Contains configuration about the session with the knowledge base.
" + "smithy.api#documentation": "Contains configuration about the session with the knowledge base.
\nThis data type is used in the following API operations:
\n\n RetrieveAndGenerate request – in the sessionConfiguration
field
The query to send the knowledge base.
", + "smithy.api#documentation": "Contains the query to send the knowledge base.
", "smithy.api#required": {} } }, "retrievalConfiguration": { "target": "com.amazonaws.bedrockagentruntime#KnowledgeBaseRetrievalConfiguration", "traits": { - "smithy.api#documentation": "Contains details about how the results should be returned.
" + "smithy.api#documentation": "Contains configurations for the knowledge base query and retrieval process. For more information, see Query configurations.
" } }, "nextToken": { @@ -2360,7 +2394,7 @@ } }, "traits": { - "smithy.api#documentation": "Contains metadata about a sources cited for the generated response.
" + "smithy.api#documentation": "Contains metadata about a source cited for the generated response.
\nThis data type is used in the following API operations:
\n\n RetrieveAndGenerate response – in the retrievedReferences
field
\n Retrieve response – in the retrievedReferences
field
Contains parameters that specify various attributes that persist across a session or prompt. You can define session state attributes as key-value pairs when writing a Lambda function for an action group or pass them when making an InvokeAgent request. Use session state attributes to control and provide conversational context for your agent and to help customize your agent's behavior. For more information, see Session context.
" + "smithy.api#documentation": "Contains parameters that specify various attributes that persist across a session or prompt. You can define session state attributes as key-value pairs when writing a Lambda function for an action group or pass them when making an InvokeAgent request. Use session state attributes to control and provide conversational context for your agent and to help customize your agent's behavior. For more information, see Control session context.
" } }, "com.amazonaws.bedrockagentruntime#Source": { @@ -2487,7 +2521,7 @@ } }, "traits": { - "smithy.api#documentation": "Contains information about where the text with a citation begins and ends in the generated output.
" + "smithy.api#documentation": "Contains information about where the text with a citation begins and ends in the generated output.
\nThis data type is used in the following API operations:
\n\n RetrieveAndGenerate response – in the span
field
\n Retrieve response – in the span
field
Contains the part of the generated text that contains a citation, alongside where it begins and ends.
", + "smithy.api#documentation": "Contains the part of the generated text that contains a citation, alongside where it begins and ends.
\nThis data type is used in the following API operations:
\n\n RetrieveAndGenerate response – in the textResponsePart
field
\n Retrieve response – in the textResponsePart
field
\n A request to backfill is already in progress. Once the previous request is complete, you can create another request.\n
", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, "com.amazonaws.costexplorer#BillExpirationException": { "type": "structure", "members": { @@ -1410,6 +1429,79 @@ "smithy.api#documentation": "The cost allocation tag structure. This includes detailed metadata for the\n CostAllocationTag
object.
\n The date the backfill starts from.\n
" + } + }, + "RequestedAt": { + "target": "com.amazonaws.costexplorer#ZonedDateTime", + "traits": { + "smithy.api#documentation": "\n The time when the backfill was requested.\n
" + } + }, + "CompletedAt": { + "target": "com.amazonaws.costexplorer#ZonedDateTime", + "traits": { + "smithy.api#documentation": "\n The backfill completion time.\n
" + } + }, + "BackfillStatus": { + "target": "com.amazonaws.costexplorer#CostAllocationTagBackfillStatus", + "traits": { + "smithy.api#documentation": "\n The status of the cost allocation tag backfill request.\n
" + } + }, + "LastUpdatedAt": { + "target": "com.amazonaws.costexplorer#ZonedDateTime", + "traits": { + "smithy.api#documentation": "\n The time when the backfill status was last updated.\n
" + } + } + }, + "traits": { + "smithy.api#documentation": "\n The cost allocation tag backfill request structure that contains metadata and details of a certain backfill.
" + } + }, + "com.amazonaws.costexplorer#CostAllocationTagBackfillRequestList": { + "type": "list", + "member": { + "target": "com.amazonaws.costexplorer#CostAllocationTagBackfillRequest" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1000 + } + } + }, + "com.amazonaws.costexplorer#CostAllocationTagBackfillStatus": { + "type": "enum", + "members": { + "SUCCEEDED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "SUCCEEDED" + } + }, + "PROCESSING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PROCESSING" + } + }, + "FAILED": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "FAILED" + } + } + } + }, "com.amazonaws.costexplorer#CostAllocationTagKeyList": { "type": "list", "member": { @@ -5660,6 +5752,71 @@ "smithy.api#error": "client" } }, + "com.amazonaws.costexplorer#ListCostAllocationTagBackfillHistory": { + "type": "operation", + "input": { + "target": "com.amazonaws.costexplorer#ListCostAllocationTagBackfillHistoryRequest" + }, + "output": { + "target": "com.amazonaws.costexplorer#ListCostAllocationTagBackfillHistoryResponse" + }, + "errors": [ + { + "target": "com.amazonaws.costexplorer#InvalidNextTokenException" + }, + { + "target": "com.amazonaws.costexplorer#LimitExceededException" + } + ], + "traits": { + "smithy.api#documentation": "\n Retrieves a list of your historical cost allocation tag backfill requests.\n
", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.costexplorer#ListCostAllocationTagBackfillHistoryRequest": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.costexplorer#NextPageToken", + "traits": { + "smithy.api#documentation": "\n The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.\n
" + } + }, + "MaxResults": { + "target": "com.amazonaws.costexplorer#CostAllocationTagsMaxResults", + "traits": { + "smithy.api#documentation": "\n The maximum number of objects that are returned for this request.\n
" + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.costexplorer#ListCostAllocationTagBackfillHistoryResponse": { + "type": "structure", + "members": { + "BackfillRequests": { + "target": "com.amazonaws.costexplorer#CostAllocationTagBackfillRequestList", + "traits": { + "smithy.api#documentation": "\n The list of historical cost allocation tag backfill requests.\n
" + } + }, + "NextToken": { + "target": "com.amazonaws.costexplorer#NextPageToken", + "traits": { + "smithy.api#documentation": "\n The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.\n
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.costexplorer#ListCostAllocationTags": { "type": "operation", "input": { @@ -8322,6 +8479,55 @@ } } }, + "com.amazonaws.costexplorer#StartCostAllocationTagBackfill": { + "type": "operation", + "input": { + "target": "com.amazonaws.costexplorer#StartCostAllocationTagBackfillRequest" + }, + "output": { + "target": "com.amazonaws.costexplorer#StartCostAllocationTagBackfillResponse" + }, + "errors": [ + { + "target": "com.amazonaws.costexplorer#BackfillLimitExceededException" + }, + { + "target": "com.amazonaws.costexplorer#LimitExceededException" + } + ], + "traits": { + "smithy.api#documentation": "\n Request a cost allocation tag backfill. This will backfill the activation status (either active
or inactive
) for all tag keys from para:BackfillFrom
up to the when this request is made.
You can request a backfill once every 24 hours.\n
" + } + }, + "com.amazonaws.costexplorer#StartCostAllocationTagBackfillRequest": { + "type": "structure", + "members": { + "BackfillFrom": { + "target": "com.amazonaws.costexplorer#ZonedDateTime", + "traits": { + "smithy.api#documentation": "\n The date you want the backfill to start from. The date can only be a first day of the month (a billing start date). Dates can't precede the previous twelve months, or in the future.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.costexplorer#StartCostAllocationTagBackfillResponse": { + "type": "structure", + "members": { + "BackfillRequest": { + "target": "com.amazonaws.costexplorer#CostAllocationTagBackfillRequest", + "traits": { + "smithy.api#documentation": "\n An object containing detailed metadata of your new backfill request.\n
" + } + } + }, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.costexplorer#StartSavingsPlansPurchaseRecommendationGeneration": { "type": "operation", "input": { diff --git a/codegen/sdk-codegen/aws-models/ec2.json b/codegen/sdk-codegen/aws-models/ec2.json index d58790c9407..ae1b9347cfd 100644 --- a/codegen/sdk-codegen/aws-models/ec2.json +++ b/codegen/sdk-codegen/aws-models/ec2.json @@ -93055,7 +93055,7 @@ "MaxResults": { "target": "com.amazonaws.ec2#TransitGatewayMaxResults", "traits": { - "smithy.api#documentation": "The maximum number of routes to return. If a value is not provided, the default is\n 1000.
" + "smithy.api#documentation": "The maximum number of routes to return.
" } }, "DryRun": { diff --git a/codegen/sdk-codegen/aws-models/ecs.json b/codegen/sdk-codegen/aws-models/ecs.json index 76361390027..2205f6b035b 100644 --- a/codegen/sdk-codegen/aws-models/ecs.json +++ b/codegen/sdk-codegen/aws-models/ecs.json @@ -3097,7 +3097,7 @@ } ], "traits": { - "smithy.api#documentation": "Runs and maintains your desired number of tasks from a specified task definition. If\n\t\t\tthe number of tasks running in a service drops below the desiredCount
,\n\t\t\tAmazon ECS runs another copy of the task in the specified cluster. To update an existing\n\t\t\tservice, see the UpdateService action.
The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.
\nIn addition to maintaining the desired count of tasks in your service, you can\n\t\t\toptionally run your service behind one or more load balancers. The load balancers\n\t\t\tdistribute traffic across the tasks that are associated with the service. For more\n\t\t\tinformation, see Service load balancing in the Amazon Elastic Container Service Developer Guide.
\nYou can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. volumeConfigurations
is only supported for REPLICA\n\t\t\tservice and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
Tasks for services that don't use a load balancer are considered healthy if they're in\n\t\t\tthe RUNNING
state. Tasks for services that use a load balancer are\n\t\t\tconsidered healthy if they're in the RUNNING
state and are reported as\n\t\t\thealthy by the load balancer.
There are two service scheduler strategies available:
\n\n REPLICA
- The replica scheduling strategy places and\n\t\t\t\t\tmaintains your desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement decisions. For\n\t\t\t\t\tmore information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
\n DAEMON
- The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance that meets all of the task placement\n\t\t\t\t\tconstraints that you specify in your cluster. The service scheduler also\n\t\t\t\t\tevaluates the task placement constraints for running tasks. It also stops tasks\n\t\t\t\t\tthat don't meet the placement constraints. When using this strategy, you don't\n\t\t\t\t\tneed to specify a desired number of tasks, a task placement strategy, or use\n\t\t\t\t\tService Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
You can optionally specify a deployment configuration for your service. The deployment\n\t\t\tis initiated by changing properties. For example, the deployment might be initiated by\n\t\t\tthe task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for\n\t\t\t\tminimumHealthyPercent
is 100%. The default value for a daemon service\n\t\t\tfor minimumHealthyPercent
is 0%.
If a service uses the ECS
deployment controller, the minimum healthy\n\t\t\tpercent represents a lower limit on the number of tasks in a service that must remain in\n\t\t\tthe RUNNING
state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of your desired number of tasks (rounded up to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING
state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can deploy without using additional cluster capacity. For example, if you\n\t\t\tset your service to have desired number of four tasks and a minimum healthy percent of\n\t\t\t50%, the scheduler might stop two existing tasks to free up cluster capacity before\n\t\t\tstarting two new tasks. If they're in the RUNNING
state, tasks for services\n\t\t\tthat don't use a load balancer are considered healthy . If they're in the\n\t\t\t\tRUNNING
state and reported as healthy by the load balancer, tasks for\n\t\t\tservices that do use a load balancer are considered healthy . The\n\t\t\tdefault value for minimum healthy percent is 100%.
If a service uses the ECS
deployment controller, the maximum percent parameter represents an upper limit on the\n\t\t\tnumber of tasks in a service that are allowed in the RUNNING
or\n\t\t\t\tPENDING
state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of the desired number of tasks (rounded down to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING
state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can define the deployment batch size. For example, if your service has a\n\t\t\tdesired number of four tasks and a maximum percent value of 200%, the scheduler may\n\t\t\tstart four new tasks before stopping the four older tasks (provided that the cluster\n\t\t\tresources required to do this are available). The default value for maximum percent is\n\t\t\t200%.
If a service uses either the CODE_DEPLOY
or EXTERNAL
\n\t\t\tdeployment controller types and tasks that use the EC2 launch type, the\n\t\t\t\tminimum healthy percent and maximum percent values are used only to define the lower and upper limit\n\t\t\ton the number of the tasks in the service that remain in the RUNNING
state.\n\t\t\tThis is while the container instances are in the DRAINING
state. If the\n\t\t\ttasks in the service use the Fargate launch type, the minimum healthy\n\t\t\tpercent and maximum percent values aren't used. This is the case even if they're\n\t\t\tcurrently visible when describing your service.
When creating a service that uses the EXTERNAL
deployment controller, you\n\t\t\tcan specify only parameters that aren't controlled at the task set level. The only\n\t\t\trequired parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
When the service scheduler launches new tasks, it determines task placement. For information\n\t\t\tabout task placement and task placement strategies, see Amazon ECS\n\t\t\t\ttask placement in the Amazon Elastic Container Service Developer Guide\n
\nStarting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
", + "smithy.api#documentation": "Runs and maintains your desired number of tasks from a specified task definition. If\n\t\t\tthe number of tasks running in a service drops below the desiredCount
,\n\t\t\tAmazon ECS runs another copy of the task in the specified cluster. To update an existing\n\t\t\tservice, see the UpdateService action.
On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.
\nIn addition to maintaining the desired count of tasks in your service, you can\n\t\t\toptionally run your service behind one or more load balancers. The load balancers\n\t\t\tdistribute traffic across the tasks that are associated with the service. For more\n\t\t\tinformation, see Service load balancing in the Amazon Elastic Container Service Developer Guide.
\nYou can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. volumeConfigurations
is only supported for REPLICA\n\t\t\tservice and not DAEMON service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
Tasks for services that don't use a load balancer are considered healthy if they're in\n\t\t\tthe RUNNING
state. Tasks for services that use a load balancer are\n\t\t\tconsidered healthy if they're in the RUNNING
state and are reported as\n\t\t\thealthy by the load balancer.
There are two service scheduler strategies available:
\n\n REPLICA
- The replica scheduling strategy places and\n\t\t\t\t\tmaintains your desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement decisions. For\n\t\t\t\t\tmore information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
\n DAEMON
- The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance that meets all of the task placement\n\t\t\t\t\tconstraints that you specify in your cluster. The service scheduler also\n\t\t\t\t\tevaluates the task placement constraints for running tasks. It also stops tasks\n\t\t\t\t\tthat don't meet the placement constraints. When using this strategy, you don't\n\t\t\t\t\tneed to specify a desired number of tasks, a task placement strategy, or use\n\t\t\t\t\tService Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
You can optionally specify a deployment configuration for your service. The deployment\n\t\t\tis initiated by changing properties. For example, the deployment might be initiated by\n\t\t\tthe task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for\n\t\t\t\tminimumHealthyPercent
is 100%. The default value for a daemon service\n\t\t\tfor minimumHealthyPercent
is 0%.
If a service uses the ECS
deployment controller, the minimum healthy\n\t\t\tpercent represents a lower limit on the number of tasks in a service that must remain in\n\t\t\tthe RUNNING
state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of your desired number of tasks (rounded up to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING
state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can deploy without using additional cluster capacity. For example, if you\n\t\t\tset your service to have desired number of four tasks and a minimum healthy percent of\n\t\t\t50%, the scheduler might stop two existing tasks to free up cluster capacity before\n\t\t\tstarting two new tasks. If they're in the RUNNING
state, tasks for services\n\t\t\tthat don't use a load balancer are considered healthy . If they're in the\n\t\t\t\tRUNNING
state and reported as healthy by the load balancer, tasks for\n\t\t\tservices that do use a load balancer are considered healthy . The\n\t\t\tdefault value for minimum healthy percent is 100%.
If a service uses the ECS
deployment controller, the maximum percent parameter represents an upper limit on the\n\t\t\tnumber of tasks in a service that are allowed in the RUNNING
or\n\t\t\t\tPENDING
state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of the desired number of tasks (rounded down to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING
state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can define the deployment batch size. For example, if your service has a\n\t\t\tdesired number of four tasks and a maximum percent value of 200%, the scheduler may\n\t\t\tstart four new tasks before stopping the four older tasks (provided that the cluster\n\t\t\tresources required to do this are available). The default value for maximum percent is\n\t\t\t200%.
If a service uses either the CODE_DEPLOY
or EXTERNAL
\n\t\t\tdeployment controller types and tasks that use the EC2 launch type, the\n\t\t\t\tminimum healthy percent and maximum percent values are used only to define the lower and upper limit\n\t\t\ton the number of the tasks in the service that remain in the RUNNING
state.\n\t\t\tThis is while the container instances are in the DRAINING
state. If the\n\t\t\ttasks in the service use the Fargate launch type, the minimum healthy\n\t\t\tpercent and maximum percent values aren't used. This is the case even if they're\n\t\t\tcurrently visible when describing your service.
When creating a service that uses the EXTERNAL
deployment controller, you\n\t\t\tcan specify only parameters that aren't controlled at the task set level. The only\n\t\t\trequired parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
When the service scheduler launches new tasks, it determines task placement. For information\n\t\t\tabout task placement and task placement strategies, see Amazon ECS\n\t\t\t\ttask placement in the Amazon Elastic Container Service Developer Guide\n
\nStarting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
", "smithy.api#examples": [ { "title": "To create a new service", @@ -3420,7 +3420,7 @@ } ], "traits": { - "smithy.api#documentation": "Create a task set in the specified cluster and service. This is used when a service\n\t\t\tuses the EXTERNAL
deployment controller type. For more information, see\n\t\t\t\tAmazon ECS deployment\n\t\t\t\ttypes in the Amazon Elastic Container Service Developer Guide.
The following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.
\nFor information about the maximum number of task sets and otther quotas, see Amazon ECS\n\t\t\tservice quotas in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "Create a task set in the specified cluster and service. This is used when a service\n\t\t\tuses the EXTERNAL
deployment controller type. For more information, see\n\t\t\t\tAmazon ECS deployment\n\t\t\t\ttypes in the Amazon Elastic Container Service Developer Guide.
On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.
\nFor information about the maximum number of task sets and otther quotas, see Amazon ECS\n\t\t\tservice quotas in the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#CreateTaskSetRequest": { @@ -9420,7 +9420,7 @@ } ], "traits": { - "smithy.api#documentation": "Starts a new task using the specified task definition.
\nThe following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.
\nYou can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places\n\t\t\ttasks using placement constraints and placement strategies. For more information, see\n\t\t\t\tScheduling Tasks in the Amazon Elastic Container Service Developer Guide.
\nAlternatively, you can use StartTask to use your own scheduler or\n\t\t\tplace tasks manually on specific container instances.
\nStarting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
\nYou can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
\nThe Amazon ECS API follows an eventual consistency model. This is because of the\n\t\t\tdistributed nature of the system supporting the API. This means that the result of an\n\t\t\tAPI command you run that affects your Amazon ECS resources might not be immediately visible\n\t\t\tto all subsequent commands you run. Keep this in mind when you carry out an API command\n\t\t\tthat immediately follows a previous API command.
\nTo manage eventual consistency, you can do the following:
\nConfirm the state of the resource before you run a command to modify it. Run\n\t\t\t\t\tthe DescribeTasks command using an exponential backoff algorithm to ensure that\n\t\t\t\t\tyou allow enough time for the previous command to propagate through the system.\n\t\t\t\t\tTo do this, run the DescribeTasks command repeatedly, starting with a couple of\n\t\t\t\t\tseconds of wait time and increasing gradually up to five minutes of wait\n\t\t\t\t\ttime.
\nAdd wait time between subsequent commands, even if the DescribeTasks command\n\t\t\t\t\treturns an accurate response. Apply an exponential backoff algorithm starting\n\t\t\t\t\twith a couple of seconds of wait time, and increase gradually up to about five\n\t\t\t\t\tminutes of wait time.
\nStarts a new task using the specified task definition.
\nOn March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.
\nYou can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places\n\t\t\ttasks using placement constraints and placement strategies. For more information, see\n\t\t\t\tScheduling Tasks in the Amazon Elastic Container Service Developer Guide.
\nAlternatively, you can use StartTask to use your own scheduler or\n\t\t\tplace tasks manually on specific container instances.
\nStarting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
\nYou can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
\nThe Amazon ECS API follows an eventual consistency model. This is because of the\n\t\t\tdistributed nature of the system supporting the API. This means that the result of an\n\t\t\tAPI command you run that affects your Amazon ECS resources might not be immediately visible\n\t\t\tto all subsequent commands you run. Keep this in mind when you carry out an API command\n\t\t\tthat immediately follows a previous API command.
\nTo manage eventual consistency, you can do the following:
\nConfirm the state of the resource before you run a command to modify it. Run\n\t\t\t\t\tthe DescribeTasks command using an exponential backoff algorithm to ensure that\n\t\t\t\t\tyou allow enough time for the previous command to propagate through the system.\n\t\t\t\t\tTo do this, run the DescribeTasks command repeatedly, starting with a couple of\n\t\t\t\t\tseconds of wait time and increasing gradually up to five minutes of wait\n\t\t\t\t\ttime.
\nAdd wait time between subsequent commands, even if the DescribeTasks command\n\t\t\t\t\treturns an accurate response. Apply an exponential backoff algorithm starting\n\t\t\t\t\twith a couple of seconds of wait time, and increase gradually up to about five\n\t\t\t\t\tminutes of wait time.
\nStarts a new task from the specified task definition on the specified container\n\t\t\tinstance or instances.
\nThe following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.
\nStarting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
\nAlternatively, you can use RunTask to place tasks for you. For more\n\t\t\tinformation, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.
\nYou can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "Starts a new task from the specified task definition on the specified container\n\t\t\tinstance or instances.
\nOn March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.
\nStarting April 15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
\nAlternatively, you can use RunTask to place tasks for you. For more\n\t\t\tinformation, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.
\nYou can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or\n\t\t\tupdating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#StartTaskRequest": { @@ -12824,7 +12824,7 @@ } ], "traits": { - "smithy.api#documentation": "Modifies the parameters of a service.
\nThe following change began on March 21, 2024. When the task definition revision is not specified, Amazon ECS resolves the task definition revision before it authorizes the task definition.
\nFor services using the rolling update (ECS
) you can update the desired\n\t\t\tcount, deployment configuration, network configuration, load balancers, service\n\t\t\tregistries, enable ECS managed tags option, propagate tags option, task placement\n\t\t\tconstraints and strategies, and task definition. When you update any of these\n\t\t\tparameters, Amazon ECS starts new tasks with the new configuration.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or\n\t\t\trunning a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update\n\t\t\tyour volume configurations and trigger a new deployment.\n\t\t\t\tvolumeConfigurations
is only supported for REPLICA service and not\n\t\t\tDAEMON service. If you leave volumeConfigurations
\n null
, it doesn't trigger a new deployment. For more infomation on volumes,\n\t\t\tsee Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
For services using the blue/green (CODE_DEPLOY
) deployment controller,\n\t\t\tonly the desired count, deployment configuration, health check grace period, task\n\t\t\tplacement constraints and strategies, enable ECS managed tags option, and propagate tags\n\t\t\tcan be updated using this API. If the network configuration, platform version, task\n\t\t\tdefinition, or load balancer need to be updated, create a new CodeDeploy deployment. For more\n\t\t\tinformation, see CreateDeployment in the CodeDeploy API Reference.
For services using an external deployment controller, you can update only the desired\n\t\t\tcount, task placement constraints and strategies, health check grace period, enable ECS\n\t\t\tmanaged tags option, and propagate tags option, using this API. If the launch type, load\n\t\t\tbalancer, network configuration, platform version, or task definition need to be\n\t\t\tupdated, create a new task set For more information, see CreateTaskSet.
\nYou can add to or subtract from the number of instantiations of a task definition in a\n\t\t\tservice by specifying the cluster that the service is running in and a new\n\t\t\t\tdesiredCount
parameter.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or\n\t\t\trunning a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
\nIf you have updated the container image of your application, you can create a new task\n\t\t\tdefinition with that image and deploy it to your service. The service scheduler uses the\n\t\t\tminimum healthy percent and maximum percent parameters (in the service's deployment\n\t\t\tconfiguration) to determine the deployment strategy.
\nIf your updated Docker image uses the same tag as what is in the existing task\n\t\t\t\tdefinition for your service (for example, my_image:latest
), you don't\n\t\t\t\tneed to create a new revision of your task definition. You can update the service\n\t\t\t\tusing the forceNewDeployment
option. The new tasks launched by the\n\t\t\t\tdeployment pull the current image/tag combination from your repository when they\n\t\t\t\tstart.
You can also update the deployment configuration of a service. When a deployment is\n\t\t\ttriggered by updating the task definition of a service, the service scheduler uses the\n\t\t\tdeployment configuration parameters, minimumHealthyPercent
and\n\t\t\t\tmaximumPercent
, to determine the deployment strategy.
If minimumHealthyPercent
is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount
temporarily during a deployment. For example, if\n\t\t\t\t\t\tdesiredCount
is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. Tasks for\n\t\t\t\t\tservices that don't use a load balancer are considered healthy if they're in the\n\t\t\t\t\t\tRUNNING
state. Tasks for services that use a load balancer are\n\t\t\t\t\tconsidered healthy if they're in the RUNNING
state and are reported\n\t\t\t\t\tas healthy by the load balancer.
The maximumPercent
parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during a deployment. You can use it to define the\n\t\t\t\t\tdeployment batch size. For example, if desiredCount
is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four older tasks\n\t\t\t\t\t(provided that the cluster resources required to do this are available).
When UpdateService stops a task during a deployment, the equivalent\n\t\t\tof docker stop
is issued to the containers running in the task. This\n\t\t\tresults in a SIGTERM
and a 30-second timeout. After this,\n\t\t\t\tSIGKILL
is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM
gracefully and exits within 30 seconds from\n\t\t\treceiving it, no SIGKILL
is sent.
When the service scheduler launches new tasks, it determines task placement in your\n\t\t\tcluster with the following logic.
\nDetermine which of the container instances in your cluster can support your\n\t\t\t\t\tservice's task definition. For example, they have the required CPU, memory,\n\t\t\t\t\tports, and container instance attributes.
\nBy default, the service scheduler attempts to balance tasks across\n\t\t\t\t\tAvailability Zones in this manner even though you can choose a different\n\t\t\t\t\tplacement strategy.
\nSort the valid container instances by the fewest number of running\n\t\t\t\t\t\t\ttasks for this service in the same Availability Zone as the instance.\n\t\t\t\t\t\t\tFor example, if zone A has one running service task and zones B and C\n\t\t\t\t\t\t\teach have zero, valid container instances in either zone B or C are\n\t\t\t\t\t\t\tconsidered optimal for placement.
\nPlace the new service task on a valid container instance in an optimal\n\t\t\t\t\t\t\tAvailability Zone (based on the previous steps), favoring container\n\t\t\t\t\t\t\tinstances with the fewest number of running tasks for this\n\t\t\t\t\t\t\tservice.
\nWhen the service scheduler stops running tasks, it attempts to maintain balance across\n\t\t\tthe Availability Zones in your cluster using the following logic:
\nSort the container instances by the largest number of running tasks for this\n\t\t\t\t\tservice in the same Availability Zone as the instance. For example, if zone A\n\t\t\t\t\thas one running service task and zones B and C each have two, container\n\t\t\t\t\tinstances in either zone B or C are considered optimal for termination.
\nStop the task on a container instance in an optimal Availability Zone (based\n\t\t\t\t\ton the previous steps), favoring container instances with the largest number of\n\t\t\t\t\trunning tasks for this service.
\nYou must have a service-linked role when you update any of the following service\n\t\t\t\tproperties:
\n\n loadBalancers
,
\n serviceRegistries
\n
For more information about the role see the CreateService
request\n\t\t\t\tparameter \n role
\n .
Modifies the parameters of a service.
\nOn March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition.
\nFor services using the rolling update (ECS
) you can update the desired\n\t\t\tcount, deployment configuration, network configuration, load balancers, service\n\t\t\tregistries, enable ECS managed tags option, propagate tags option, task placement\n\t\t\tconstraints and strategies, and task definition. When you update any of these\n\t\t\tparameters, Amazon ECS starts new tasks with the new configuration.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or\n\t\t\trunning a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. You can update\n\t\t\tyour volume configurations and trigger a new deployment.\n\t\t\t\tvolumeConfigurations
is only supported for REPLICA service and not\n\t\t\tDAEMON service. If you leave volumeConfigurations
\n null
, it doesn't trigger a new deployment. For more infomation on volumes,\n\t\t\tsee Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
For services using the blue/green (CODE_DEPLOY
) deployment controller,\n\t\t\tonly the desired count, deployment configuration, health check grace period, task\n\t\t\tplacement constraints and strategies, enable ECS managed tags option, and propagate tags\n\t\t\tcan be updated using this API. If the network configuration, platform version, task\n\t\t\tdefinition, or load balancer need to be updated, create a new CodeDeploy deployment. For more\n\t\t\tinformation, see CreateDeployment in the CodeDeploy API Reference.
For services using an external deployment controller, you can update only the desired\n\t\t\tcount, task placement constraints and strategies, health check grace period, enable ECS\n\t\t\tmanaged tags option, and propagate tags option, using this API. If the launch type, load\n\t\t\tbalancer, network configuration, platform version, or task definition need to be\n\t\t\tupdated, create a new task set For more information, see CreateTaskSet.
\nYou can add to or subtract from the number of instantiations of a task definition in a\n\t\t\tservice by specifying the cluster that the service is running in and a new\n\t\t\t\tdesiredCount
parameter.
You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when starting or\n\t\t\trunning a task, or when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide.
\nIf you have updated the container image of your application, you can create a new task\n\t\t\tdefinition with that image and deploy it to your service. The service scheduler uses the\n\t\t\tminimum healthy percent and maximum percent parameters (in the service's deployment\n\t\t\tconfiguration) to determine the deployment strategy.
\nIf your updated Docker image uses the same tag as what is in the existing task\n\t\t\t\tdefinition for your service (for example, my_image:latest
), you don't\n\t\t\t\tneed to create a new revision of your task definition. You can update the service\n\t\t\t\tusing the forceNewDeployment
option. The new tasks launched by the\n\t\t\t\tdeployment pull the current image/tag combination from your repository when they\n\t\t\t\tstart.
You can also update the deployment configuration of a service. When a deployment is\n\t\t\ttriggered by updating the task definition of a service, the service scheduler uses the\n\t\t\tdeployment configuration parameters, minimumHealthyPercent
and\n\t\t\t\tmaximumPercent
, to determine the deployment strategy.
If minimumHealthyPercent
is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount
temporarily during a deployment. For example, if\n\t\t\t\t\t\tdesiredCount
is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. Tasks for\n\t\t\t\t\tservices that don't use a load balancer are considered healthy if they're in the\n\t\t\t\t\t\tRUNNING
state. Tasks for services that use a load balancer are\n\t\t\t\t\tconsidered healthy if they're in the RUNNING
state and are reported\n\t\t\t\t\tas healthy by the load balancer.
The maximumPercent
parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during a deployment. You can use it to define the\n\t\t\t\t\tdeployment batch size. For example, if desiredCount
is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four older tasks\n\t\t\t\t\t(provided that the cluster resources required to do this are available).
When UpdateService stops a task during a deployment, the equivalent\n\t\t\tof docker stop
is issued to the containers running in the task. This\n\t\t\tresults in a SIGTERM
and a 30-second timeout. After this,\n\t\t\t\tSIGKILL
is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM
gracefully and exits within 30 seconds from\n\t\t\treceiving it, no SIGKILL
is sent.
When the service scheduler launches new tasks, it determines task placement in your\n\t\t\tcluster with the following logic.
\nDetermine which of the container instances in your cluster can support your\n\t\t\t\t\tservice's task definition. For example, they have the required CPU, memory,\n\t\t\t\t\tports, and container instance attributes.
\nBy default, the service scheduler attempts to balance tasks across\n\t\t\t\t\tAvailability Zones in this manner even though you can choose a different\n\t\t\t\t\tplacement strategy.
\nSort the valid container instances by the fewest number of running\n\t\t\t\t\t\t\ttasks for this service in the same Availability Zone as the instance.\n\t\t\t\t\t\t\tFor example, if zone A has one running service task and zones B and C\n\t\t\t\t\t\t\teach have zero, valid container instances in either zone B or C are\n\t\t\t\t\t\t\tconsidered optimal for placement.
\nPlace the new service task on a valid container instance in an optimal\n\t\t\t\t\t\t\tAvailability Zone (based on the previous steps), favoring container\n\t\t\t\t\t\t\tinstances with the fewest number of running tasks for this\n\t\t\t\t\t\t\tservice.
\nWhen the service scheduler stops running tasks, it attempts to maintain balance across\n\t\t\tthe Availability Zones in your cluster using the following logic:
\nSort the container instances by the largest number of running tasks for this\n\t\t\t\t\tservice in the same Availability Zone as the instance. For example, if zone A\n\t\t\t\t\thas one running service task and zones B and C each have two, container\n\t\t\t\t\tinstances in either zone B or C are considered optimal for termination.
\nStop the task on a container instance in an optimal Availability Zone (based\n\t\t\t\t\ton the previous steps), favoring container instances with the largest number of\n\t\t\t\t\trunning tasks for this service.
\nYou must have a service-linked role when you update any of the following service\n\t\t\t\tproperties:
\n\n loadBalancers
,
\n serviceRegistries
\n
For more information about the role see the CreateService
request\n\t\t\t\tparameter \n role
\n .
A list of change request objects that are run in order. A change request object consists of changeType
, s3Path
, and dbPath
. \n A changeType can has the following values:
PUT – Adds or updates files in a database.
\nDELETE – Deletes files in a database.
\nAll the change requests require a mandatory dbPath
attribute that defines the\n path within the database directory. All database paths must start with a leading / and end\n with a trailing /. The s3Path
attribute defines the s3 source file path and is\n required for a PUT change type. The s3path
must end with a trailing / if it is\n a directory and must end without a trailing / if it is a file.
Here are few examples of how you can use the change request object:
\nThis request adds a single sym file at database root location.
\n\n { \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/sym\",\n \"dbPath\":\"/\"}
\n
This request adds files in the given s3Path
under the 2020.01.02\n partition of the database.
\n { \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/2020.01.02/\",\n \"dbPath\":\"/2020.01.02/\"}
\n
This request adds files in the given s3Path
under the\n taq table partition of the database.
\n [ { \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/2020.01.02/taq/\",\n \"dbPath\":\"/2020.01.02/taq/\"}]
\n
This request deletes the 2020.01.02 partition of the database.
\n\n [{ \"changeType\": \"DELETE\", \"dbPath\": \"/2020.01.02/\"} ]
\n
The DELETE request allows you to delete the existing files under the\n 2020.01.02 partition of the database, and the PUT request adds a\n new taq table under it.
\n\n [ {\"changeType\": \"DELETE\", \"dbPath\":\"/2020.01.02/\"}, {\"changeType\": \"PUT\",\n \"s3Path\":\"s3://bucket/db/2020.01.02/taq/\",\n \"dbPath\":\"/2020.01.02/taq/\"}]
\n
A list of change request objects that are run in order. A change request object consists of changeType
, s3Path
, and dbPath
. \n A changeType can have the following values:
PUT – Adds or updates files in a database.
\nDELETE – Deletes files in a database.
\nAll the change requests require a mandatory dbPath
attribute that defines the\n path within the database directory. All database paths must start with a leading / and end\n with a trailing /. The s3Path
attribute defines the s3 source file path and is\n required for a PUT change type. The s3path
must end with a trailing / if it is\n a directory and must end without a trailing / if it is a file.
Here are few examples of how you can use the change request object:
\nThis request adds a single sym file at database root location.
\n\n { \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/sym\",\n \"dbPath\":\"/\"}
\n
This request adds files in the given s3Path
under the 2020.01.02\n partition of the database.
\n { \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/2020.01.02/\",\n \"dbPath\":\"/2020.01.02/\"}
\n
This request adds files in the given s3Path
under the\n taq table partition of the database.
\n [ { \"changeType\": \"PUT\", \"s3Path\":\"s3://bucket/db/2020.01.02/taq/\",\n \"dbPath\":\"/2020.01.02/taq/\"}]
\n
This request deletes the 2020.01.02 partition of the database.
\n\n [{ \"changeType\": \"DELETE\", \"dbPath\": \"/2020.01.02/\"} ]
\n
The DELETE request allows you to delete the existing files under the\n 2020.01.02 partition of the database, and the PUT request adds a\n new taq table under it.
\n\n [ {\"changeType\": \"DELETE\", \"dbPath\":\"/2020.01.02/\"}, {\"changeType\": \"PUT\",\n \"s3Path\":\"s3://bucket/db/2020.01.02/taq/\",\n \"dbPath\":\"/2020.01.02/taq/\"}]
\n
Deletes the specified nodes from a cluster.\n
", + "smithy.api#http": { + "method": "DELETE", + "uri": "/kx/environments/{environmentId}/clusters/{clusterName}/nodes/{nodeId}", + "code": 200 + } + } + }, + "com.amazonaws.finspace#DeleteKxClusterNodeRequest": { + "type": "structure", + "members": { + "environmentId": { + "target": "com.amazonaws.finspace#KxEnvironmentId", + "traits": { + "smithy.api#documentation": "A unique identifier for the kdb environment.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "clusterName": { + "target": "com.amazonaws.finspace#KxClusterName", + "traits": { + "smithy.api#documentation": "The name of the cluster, for which you want to delete the nodes.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "nodeId": { + "target": "com.amazonaws.finspace#KxClusterNodeIdString", + "traits": { + "smithy.api#documentation": "A unique identifier for the node that you want to delete.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#input": {} + } + }, + "com.amazonaws.finspace#DeleteKxClusterNodeResponse": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#output": {} + } + }, "com.amazonaws.finspace#DeleteKxClusterRequest": { "type": "structure", "members": { @@ -6235,7 +6311,7 @@ "size": { "target": "com.amazonaws.finspace#KxNAS1Size", "traits": { - "smithy.api#documentation": "\nThe size of the network attached storage.
" + "smithy.api#documentation": "\n The size of the network attached storage. For storage type\n SSD_1000
and SSD_250
you can select the\n minimum size as 1200 GB or increments of 2400 GB. For storage type\n HDD_12
you can select the minimum size as 6000 GB or increments\n of 6000 GB.
The time when a particular node is started. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
" } + }, + "status": { + "target": "com.amazonaws.finspace#KxNodeStatus", + "traits": { + "smithy.api#documentation": "\n Specifies the status of the cluster nodes.
\n\n RUNNING
– The node is actively serving.
\n PROVISIONING
– The node is being prepared.
A structure that stores metadata for a kdb node.
" } }, + "com.amazonaws.finspace#KxNodeStatus": { + "type": "enum", + "members": { + "RUNNING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "RUNNING" + } + }, + "PROVISIONING": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "PROVISIONING" + } + } + } + }, "com.amazonaws.finspace#KxNodeSummaries": { "type": "list", "member": {