diff --git a/clients/client-application-auto-scaling/README.md b/clients/client-application-auto-scaling/README.md index 8d8a3250f1d3..0f78ad32f3ae 100644 --- a/clients/client-application-auto-scaling/README.md +++ b/clients/client-application-auto-scaling/README.md @@ -43,10 +43,10 @@ resources:
Amazon Neptune clusters
Amazon SageMaker Serverless endpoint provisioned concurrency
+Amazon SageMaker endpoint variants
Amazon SageMaker endpoint variants
+Amazon SageMaker Serverless endpoint provisioned concurrency
Spot Fleets (Amazon EC2)
diff --git a/clients/client-application-auto-scaling/src/ApplicationAutoScaling.ts b/clients/client-application-auto-scaling/src/ApplicationAutoScaling.ts index c4e3a6a07c75..a26a2d0b8906 100644 --- a/clients/client-application-auto-scaling/src/ApplicationAutoScaling.ts +++ b/clients/client-application-auto-scaling/src/ApplicationAutoScaling.ts @@ -331,10 +331,10 @@ export interface ApplicationAutoScaling { *Amazon Neptune clusters
*Amazon SageMaker Serverless endpoint provisioned concurrency
+ *Amazon SageMaker endpoint variants
*Amazon SageMaker endpoint variants
+ *Amazon SageMaker Serverless endpoint provisioned concurrency
*Spot Fleets (Amazon EC2)
@@ -344,7 +344,7 @@ export interface ApplicationAutoScaling { *To learn more about Application Auto Scaling, see the Application Auto Scaling User - * Guide.
+ * Guide. ** API Summary *
diff --git a/clients/client-application-auto-scaling/src/ApplicationAutoScalingClient.ts b/clients/client-application-auto-scaling/src/ApplicationAutoScalingClient.ts index 5c583d990abe..e9ce9a4e8e03 100644 --- a/clients/client-application-auto-scaling/src/ApplicationAutoScalingClient.ts +++ b/clients/client-application-auto-scaling/src/ApplicationAutoScalingClient.ts @@ -338,10 +338,10 @@ export interface ApplicationAutoScalingClientResolvedConfig extends ApplicationA *Amazon Neptune clusters
* *Amazon SageMaker Serverless endpoint provisioned concurrency
+ *Amazon SageMaker endpoint variants
*Amazon SageMaker endpoint variants
+ *Amazon SageMaker Serverless endpoint provisioned concurrency
*Spot Fleets (Amazon EC2)
@@ -351,7 +351,7 @@ export interface ApplicationAutoScalingClientResolvedConfig extends ApplicationA *To learn more about Application Auto Scaling, see the Application Auto Scaling User - * Guide.
+ * Guide. ** API Summary *
diff --git a/clients/client-application-auto-scaling/src/commands/DescribeScalingPoliciesCommand.ts b/clients/client-application-auto-scaling/src/commands/DescribeScalingPoliciesCommand.ts index 5d36fb670d27..22e7fa8c21f7 100644 --- a/clients/client-application-auto-scaling/src/commands/DescribeScalingPoliciesCommand.ts +++ b/clients/client-application-auto-scaling/src/commands/DescribeScalingPoliciesCommand.ts @@ -87,7 +87,7 @@ export interface DescribeScalingPoliciesCommandOutput extends DescribeScalingPol * // TargetTrackingScalingPolicyConfiguration: { // TargetTrackingScalingPolicyConfiguration * // TargetValue: Number("double"), // required * // PredefinedMetricSpecification: { // PredefinedMetricSpecification - * // PredefinedMetricType: "DynamoDBReadCapacityUtilization" || "DynamoDBWriteCapacityUtilization" || "ALBRequestCountPerTarget" || "RDSReaderAverageCPUUtilization" || "RDSReaderAverageDatabaseConnections" || "EC2SpotFleetRequestAverageCPUUtilization" || "EC2SpotFleetRequestAverageNetworkIn" || "EC2SpotFleetRequestAverageNetworkOut" || "SageMakerVariantInvocationsPerInstance" || "ECSServiceAverageCPUUtilization" || "ECSServiceAverageMemoryUtilization" || "AppStreamAverageCapacityUtilization" || "ComprehendInferenceUtilization" || "LambdaProvisionedConcurrencyUtilization" || "CassandraReadCapacityUtilization" || "CassandraWriteCapacityUtilization" || "KafkaBrokerStorageUtilization" || "ElastiCachePrimaryEngineCPUUtilization" || "ElastiCacheReplicaEngineCPUUtilization" || "ElastiCacheDatabaseMemoryUsageCountedForEvictPercentage" || "NeptuneReaderAverageCPUUtilization" || "SageMakerVariantProvisionedConcurrencyUtilization", // required + * // PredefinedMetricType: "DynamoDBReadCapacityUtilization" || "DynamoDBWriteCapacityUtilization" || "ALBRequestCountPerTarget" || "RDSReaderAverageCPUUtilization" || "RDSReaderAverageDatabaseConnections" || "EC2SpotFleetRequestAverageCPUUtilization" || "EC2SpotFleetRequestAverageNetworkIn" || "EC2SpotFleetRequestAverageNetworkOut" || "SageMakerVariantInvocationsPerInstance" || "ECSServiceAverageCPUUtilization" || "ECSServiceAverageMemoryUtilization" || "AppStreamAverageCapacityUtilization" || "ComprehendInferenceUtilization" || "LambdaProvisionedConcurrencyUtilization" || "CassandraReadCapacityUtilization" || "CassandraWriteCapacityUtilization" || "KafkaBrokerStorageUtilization" || "ElastiCachePrimaryEngineCPUUtilization" || "ElastiCacheReplicaEngineCPUUtilization" || "ElastiCacheDatabaseMemoryUsageCountedForEvictPercentage" || "NeptuneReaderAverageCPUUtilization" || "SageMakerVariantProvisionedConcurrencyUtilization" || "ElastiCacheDatabaseCapacityUsageCountedForEvictPercentage", // required * // ResourceLabel: "STRING_VALUE", * // }, * // CustomizedMetricSpecification: { // CustomizedMetricSpecification diff --git a/clients/client-application-auto-scaling/src/commands/ListTagsForResourceCommand.ts b/clients/client-application-auto-scaling/src/commands/ListTagsForResourceCommand.ts index aaa7437aaf05..184b490adb3b 100644 --- a/clients/client-application-auto-scaling/src/commands/ListTagsForResourceCommand.ts +++ b/clients/client-application-auto-scaling/src/commands/ListTagsForResourceCommand.ts @@ -42,7 +42,7 @@ export interface ListTagsForResourceCommandOutput extends ListTagsForResourceRes * @public *Returns all the tags on the specified Application Auto Scaling scalable target.
*For general information about tags, including the format and syntax, see Tagging Amazon Web Services - * resources in the Amazon Web Services General Reference.
+ * resources in the Amazon Web Services General Reference. * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-application-auto-scaling/src/commands/PutScalingPolicyCommand.ts b/clients/client-application-auto-scaling/src/commands/PutScalingPolicyCommand.ts index d7041ab04922..084f133aa465 100644 --- a/clients/client-application-auto-scaling/src/commands/PutScalingPolicyCommand.ts +++ b/clients/client-application-auto-scaling/src/commands/PutScalingPolicyCommand.ts @@ -93,7 +93,7 @@ export interface PutScalingPolicyCommandOutput extends PutScalingPolicyResponse, * TargetTrackingScalingPolicyConfiguration: { // TargetTrackingScalingPolicyConfiguration * TargetValue: Number("double"), // required * PredefinedMetricSpecification: { // PredefinedMetricSpecification - * PredefinedMetricType: "DynamoDBReadCapacityUtilization" || "DynamoDBWriteCapacityUtilization" || "ALBRequestCountPerTarget" || "RDSReaderAverageCPUUtilization" || "RDSReaderAverageDatabaseConnections" || "EC2SpotFleetRequestAverageCPUUtilization" || "EC2SpotFleetRequestAverageNetworkIn" || "EC2SpotFleetRequestAverageNetworkOut" || "SageMakerVariantInvocationsPerInstance" || "ECSServiceAverageCPUUtilization" || "ECSServiceAverageMemoryUtilization" || "AppStreamAverageCapacityUtilization" || "ComprehendInferenceUtilization" || "LambdaProvisionedConcurrencyUtilization" || "CassandraReadCapacityUtilization" || "CassandraWriteCapacityUtilization" || "KafkaBrokerStorageUtilization" || "ElastiCachePrimaryEngineCPUUtilization" || "ElastiCacheReplicaEngineCPUUtilization" || "ElastiCacheDatabaseMemoryUsageCountedForEvictPercentage" || "NeptuneReaderAverageCPUUtilization" || "SageMakerVariantProvisionedConcurrencyUtilization", // required + * PredefinedMetricType: "DynamoDBReadCapacityUtilization" || "DynamoDBWriteCapacityUtilization" || "ALBRequestCountPerTarget" || "RDSReaderAverageCPUUtilization" || "RDSReaderAverageDatabaseConnections" || "EC2SpotFleetRequestAverageCPUUtilization" || "EC2SpotFleetRequestAverageNetworkIn" || "EC2SpotFleetRequestAverageNetworkOut" || "SageMakerVariantInvocationsPerInstance" || "ECSServiceAverageCPUUtilization" || "ECSServiceAverageMemoryUtilization" || "AppStreamAverageCapacityUtilization" || "ComprehendInferenceUtilization" || "LambdaProvisionedConcurrencyUtilization" || "CassandraReadCapacityUtilization" || "CassandraWriteCapacityUtilization" || "KafkaBrokerStorageUtilization" || "ElastiCachePrimaryEngineCPUUtilization" || "ElastiCacheReplicaEngineCPUUtilization" || "ElastiCacheDatabaseMemoryUsageCountedForEvictPercentage" || "NeptuneReaderAverageCPUUtilization" || "SageMakerVariantProvisionedConcurrencyUtilization" || "ElastiCacheDatabaseCapacityUsageCountedForEvictPercentage", // required * ResourceLabel: "STRING_VALUE", * }, * CustomizedMetricSpecification: { // CustomizedMetricSpecification diff --git a/clients/client-application-auto-scaling/src/commands/TagResourceCommand.ts b/clients/client-application-auto-scaling/src/commands/TagResourceCommand.ts index b3849f23e2ad..ae4b1119273c 100644 --- a/clients/client-application-auto-scaling/src/commands/TagResourceCommand.ts +++ b/clients/client-application-auto-scaling/src/commands/TagResourceCommand.ts @@ -47,11 +47,11 @@ export interface TagResourceCommandOutput extends TagResourceResponse, __Metadat *You can use this operation to tag an Application Auto Scaling scalable target, but you cannot tag a * scaling policy or scheduled action.
*You can also add tags to an Application Auto Scaling scalable target while creating it
- * (RegisterScalableTarget
).
RegisterScalableTarget
).
* For general information about tags, including the format and syntax, see Tagging Amazon Web Services - * resources in the Amazon Web Services General Reference.
+ * resources in the Amazon Web Services General Reference. *Use tags to control access to a scalable target. For more information, see Tagging support - * for Application Auto Scaling in the Application Auto Scaling User Guide.
+ * for Application Auto Scaling in the Application Auto Scaling User Guide. * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-application-auto-scaling/src/index.ts b/clients/client-application-auto-scaling/src/index.ts index 38c33c69d63c..87db7de57884 100644 --- a/clients/client-application-auto-scaling/src/index.ts +++ b/clients/client-application-auto-scaling/src/index.ts @@ -38,10 +38,10 @@ *Amazon Neptune clusters
* *Amazon SageMaker Serverless endpoint provisioned concurrency
+ *Amazon SageMaker endpoint variants
*Amazon SageMaker endpoint variants
+ *Amazon SageMaker Serverless endpoint provisioned concurrency
*Spot Fleets (Amazon EC2)
@@ -51,7 +51,7 @@ *To learn more about Application Auto Scaling, see the Application Auto Scaling User - * Guide.
+ * Guide. ** API Summary *
diff --git a/clients/client-application-auto-scaling/src/models/models_0.ts b/clients/client-application-auto-scaling/src/models/models_0.ts index 08066fbf2cfb..b450becbdc80 100644 --- a/clients/client-application-auto-scaling/src/models/models_0.ts +++ b/clients/client-application-auto-scaling/src/models/models_0.ts @@ -2003,7 +2003,7 @@ export interface StepAdjustment { /** *The amount by which to scale, based on the specified adjustment type. A positive value * adds to the current capacity while a negative number removes from the current capacity. For - * exact capacity, you must specify a positive value.
+ * exact capacity, you must specify a non-negative value. */ ScalingAdjustment: number | undefined; } @@ -2011,6 +2011,7 @@ export interface StepAdjustment { /** * @public *Represents a step scaling policy configuration to use with Application Auto Scaling.
+ *For more information, see Step scaling policies in the Application Auto Scaling User Guide.
*/ export interface StepScalingPolicyConfiguration { /** @@ -2043,71 +2044,8 @@ export interface StepScalingPolicyConfiguration { MinAdjustmentMagnitude?: number; /** - *The amount of time, in seconds, to wait for a previous scaling activity to take effect.
- *With scale-out policies, the intention is to continuously (but not excessively) scale out. - * After Application Auto Scaling successfully scales out using a step scaling policy, it starts to calculate the - * cooldown time. The scaling policy won't increase the desired capacity again unless either a - * larger scale out is triggered or the cooldown period ends. While the cooldown period is in - * effect, capacity added by the initiating scale-out activity is calculated as part of the - * desired capacity for the next scale-out activity. For example, when an alarm triggers a step - * scaling policy to increase the capacity by 2, the scaling activity completes successfully, and - * a cooldown period starts. If the alarm triggers again during the cooldown period but at a more - * aggressive step adjustment of 3, the previous increase of 2 is considered part of the current - * capacity. Therefore, only 1 is added to the capacity.
- *With scale-in policies, the intention is to scale in conservatively to protect your - * application’s availability, so scale-in activities are blocked until the cooldown period has - * expired. However, if another alarm triggers a scale-out activity during the cooldown period - * after a scale-in activity, Application Auto Scaling scales out the target immediately. In this case, the - * cooldown period for the scale-in activity stops and doesn't complete.
- *Application Auto Scaling provides a default value of 600 for Amazon ElastiCache replication groups - * and a default value of 300 for the following scalable targets:
- *AppStream 2.0 fleets
- *Aurora DB clusters
- *ECS services
- *EMR clusters
- *Neptune clusters
- *SageMaker Serverless endpoint provisioned concurrency
- *SageMaker endpoint variants
- *Spot Fleets
- *Custom resources
- *For all other scalable targets, the default value is 0:
- *Amazon Comprehend document classification and entity recognizer endpoints
- *DynamoDB tables and global secondary indexes
- *Amazon Keyspaces tables
- *Lambda provisioned concurrency
- *Amazon MSK broker storage
- *The amount of time, in seconds, to wait for a previous scaling activity to take effect. If + * not specified, the default value is 300. For more information, see Cooldown period in the Application Auto Scaling User Guide.
*/ Cooldown?: number; @@ -2217,7 +2155,7 @@ export interface TargetTrackingMetricStat { * statistics to create a new time series. A time series is a series of data points, each of * which is associated with a timestamp. *For more information and examples, see Create a target tracking scaling policy for Application Auto Scaling using metric math in the - * Application Auto Scaling User Guide.
+ * Application Auto Scaling User Guide. */ export interface TargetTrackingMetricDataQuery { /** @@ -2363,6 +2301,8 @@ export const MetricType = { EC2SpotFleetRequestAverageNetworkOut: "EC2SpotFleetRequestAverageNetworkOut", ECSServiceAverageCPUUtilization: "ECSServiceAverageCPUUtilization", ECSServiceAverageMemoryUtilization: "ECSServiceAverageMemoryUtilization", + ElastiCacheDatabaseCapacityUsageCountedForEvictPercentage: + "ElastiCacheDatabaseCapacityUsageCountedForEvictPercentage", ElastiCacheDatabaseMemoryUsageCountedForEvictPercentage: "ElastiCacheDatabaseMemoryUsageCountedForEvictPercentage", ElastiCachePrimaryEngineCPUUtilization: "ElastiCachePrimaryEngineCPUUtilization", ElastiCacheReplicaEngineCPUUtilization: "ElastiCacheReplicaEngineCPUUtilization", @@ -2425,6 +2365,8 @@ export interface PredefinedMetricSpecification { /** * @public *Represents a target tracking scaling policy configuration to use with Application Auto Scaling.
+ *For more information, see Target tracking scaling policies in the Application Auto Scaling User + * Guide.
*/ export interface TargetTrackingScalingPolicyConfiguration { /** @@ -2454,124 +2396,14 @@ export interface TargetTrackingScalingPolicyConfiguration { CustomizedMetricSpecification?: CustomizedMetricSpecification; /** - *The amount of time, in seconds, to wait for a previous scale-out activity to take - * effect.
- *With the scale-out cooldown period, the intention is to continuously - * (but not excessively) scale out. After Application Auto Scaling successfully scales out using a target - * tracking scaling policy, it starts to calculate the cooldown time. The scaling policy won't - * increase the desired capacity again unless either a larger scale out is triggered or the - * cooldown period ends. While the cooldown period is in effect, the capacity added by the - * initiating scale-out activity is calculated as part of the desired capacity for the next - * scale-out activity.
- *Application Auto Scaling provides a default value of 600 for Amazon ElastiCache replication groups - * and a default value of 300 for the following scalable targets:
- *AppStream 2.0 fleets
- *Aurora DB clusters
- *ECS services
- *EMR clusters
- *Neptune clusters
- *SageMaker Serverless endpoint provisioned concurrency
- *SageMaker endpoint variants
- *Spot Fleets
- *Custom resources
- *For all other scalable targets, the default value is 0:
- *Amazon Comprehend document classification and entity recognizer endpoints
- *DynamoDB tables and global secondary indexes
- *Amazon Keyspaces tables
- *Lambda provisioned concurrency
- *Amazon MSK broker storage
- *The amount of time, in seconds, to wait for a previous scale-out activity to take effect. + * For more information and for default values, see Define cooldown periods in the Application Auto Scaling User Guide.
*/ ScaleOutCooldown?: number; /** *The amount of time, in seconds, after a scale-in activity completes before another - * scale-in activity can start.
- *With the scale-in cooldown period, the intention is to scale in - * conservatively to protect your application’s availability, so scale-in activities are blocked - * until the cooldown period has expired. However, if another alarm triggers a scale-out activity - * during the scale-in cooldown period, Application Auto Scaling scales out the target immediately. In this case, - * the scale-in cooldown period stops and doesn't complete.
- *Application Auto Scaling provides a default value of 600 for Amazon ElastiCache replication groups - * and a default value of 300 for the following scalable targets:
- *AppStream 2.0 fleets
- *Aurora DB clusters
- *ECS services
- *EMR clusters
- *Neptune clusters
- *SageMaker Serverless endpoint provisioned concurrency
- *SageMaker endpoint variants
- *Spot Fleets
- *Custom resources
- *For all other scalable targets, the default value is 0:
- *Amazon Comprehend document classification and entity recognizer endpoints
- *DynamoDB tables and global secondary indexes
- *Amazon Keyspaces tables
- *Lambda provisioned concurrency
- *Amazon MSK broker storage
- *Specify the ARN of the scalable target.
*For example:
- * arn:aws:application-autoscaling:us-east-1:123456789012:scalable-target/1234abcd56ab78cd901ef1234567890ab123
+ * arn:aws:application-autoscaling:us-east-1:123456789012:scalable-target/1234abcd56ab78cd901ef1234567890ab123
*
To get the ARN for a scalable target, use DescribeScalableTargets.
*/ @@ -4066,10 +3898,10 @@ export interface RegisterScalableTargetRequest { *Lambda provisioned concurrency
* *SageMaker Serverless endpoint provisioned concurrency
+ *SageMaker endpoint variants
*SageMaker endpoint variants
+ *SageMaker Serverless endpoint provisioned concurrency
*Spot Fleets
@@ -4144,7 +3976,7 @@ export interface RegisterScalableTargetRequest { * required. You cannot have more than one tag on a scalable target with the same tag * key. *Use tags to control access to a scalable target. For more information, see Tagging support - * for Application Auto Scaling in the Application Auto Scaling User Guide.
+ * for Application Auto Scaling in the Application Auto Scaling User Guide. */ Tags?: RecordIdentifies the Application Auto Scaling scalable target that you want to apply tags to.
*For example:
- * arn:aws:application-autoscaling:us-east-1:123456789012:scalable-target/1234abcd56ab78cd901ef1234567890ab123
+ * arn:aws:application-autoscaling:us-east-1:123456789012:scalable-target/1234abcd56ab78cd901ef1234567890ab123
*
To get the ARN for a scalable target, use DescribeScalableTargets.
*/ @@ -4180,7 +4012,7 @@ export interface TagResourceRequest { * If you specify an existing tag key with a different tag value, Application Auto Scaling replaces the * current tag value with the specified one. *For information about the rules that apply to tag keys and tag values, see User-defined tag - * restrictions in the Amazon Web Services Billing and Cost Management User + * restrictions in the Amazon Web Services Billing and Cost Management User * Guide.
*/ Tags: RecordIdentifies the Application Auto Scaling scalable target from which to remove tags.
*For example:
- * arn:aws:application-autoscaling:us-east-1:123456789012:scalable-target/1234abcd56ab78cd901ef1234567890ab123
+ * arn:aws:application-autoscaling:us-east-1:123456789012:scalable-target/1234abcd56ab78cd901ef1234567890ab123
*
To get the ARN for a scalable target, use DescribeScalableTargets.
*/ diff --git a/codegen/sdk-codegen/aws-models/application-auto-scaling.json b/codegen/sdk-codegen/aws-models/application-auto-scaling.json index 7cd568882f8b..65bd46c64cc9 100644 --- a/codegen/sdk-codegen/aws-models/application-auto-scaling.json +++ b/codegen/sdk-codegen/aws-models/application-auto-scaling.json @@ -146,7 +146,7 @@ "name": "application-autoscaling" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "With Application Auto Scaling, you can configure automatic scaling for the following\n resources:
\nAmazon AppStream 2.0 fleets
\nAmazon Aurora Replicas
\nAmazon Comprehend document classification and entity recognizer endpoints
\nAmazon DynamoDB tables and global secondary indexes throughput capacity
\nAmazon ECS services
\nAmazon ElastiCache for Redis clusters (replication groups)
\nAmazon EMR clusters
\nAmazon Keyspaces (for Apache Cassandra) tables
\nLambda function provisioned concurrency
\nAmazon Managed Streaming for Apache Kafka broker storage
\nAmazon Neptune clusters
\nAmazon SageMaker Serverless endpoint provisioned concurrency
\nAmazon SageMaker endpoint variants
\nSpot Fleets (Amazon EC2)
\nCustom resources provided by your own applications or services
\nTo learn more about Application Auto Scaling, see the Application Auto Scaling User\n Guide.
\n\n API Summary\n
\nThe Application Auto Scaling service API includes three key sets of actions:
\nRegister and manage scalable targets - Register Amazon Web Services or custom resources as scalable\n targets (a resource that Application Auto Scaling can scale), set minimum and maximum capacity limits, and\n retrieve information on existing scalable targets.
\nConfigure and manage automatic scaling - Define scaling policies to dynamically scale\n your resources in response to CloudWatch alarms, schedule one-time or recurring scaling actions,\n and retrieve your recent scaling activity history.
\nSuspend and resume scaling - Temporarily suspend and later resume automatic scaling by\n calling the RegisterScalableTarget API action for any Application Auto Scaling scalable target. You can\n suspend and resume (individually or in combination) scale-out activities that are\n triggered by a scaling policy, scale-in activities that are triggered by a scaling policy,\n and scheduled scaling.
\nWith Application Auto Scaling, you can configure automatic scaling for the following\n resources:
\nAmazon AppStream 2.0 fleets
\nAmazon Aurora Replicas
\nAmazon Comprehend document classification and entity recognizer endpoints
\nAmazon DynamoDB tables and global secondary indexes throughput capacity
\nAmazon ECS services
\nAmazon ElastiCache for Redis clusters (replication groups)
\nAmazon EMR clusters
\nAmazon Keyspaces (for Apache Cassandra) tables
\nLambda function provisioned concurrency
\nAmazon Managed Streaming for Apache Kafka broker storage
\nAmazon Neptune clusters
\nAmazon SageMaker endpoint variants
\nAmazon SageMaker Serverless endpoint provisioned concurrency
\nSpot Fleets (Amazon EC2)
\nCustom resources provided by your own applications or services
\nTo learn more about Application Auto Scaling, see the Application Auto Scaling User\n Guide.
\n\n API Summary\n
\nThe Application Auto Scaling service API includes three key sets of actions:
\nRegister and manage scalable targets - Register Amazon Web Services or custom resources as scalable\n targets (a resource that Application Auto Scaling can scale), set minimum and maximum capacity limits, and\n retrieve information on existing scalable targets.
\nConfigure and manage automatic scaling - Define scaling policies to dynamically scale\n your resources in response to CloudWatch alarms, schedule one-time or recurring scaling actions,\n and retrieve your recent scaling activity history.
\nSuspend and resume scaling - Temporarily suspend and later resume automatic scaling by\n calling the RegisterScalableTarget API action for any Application Auto Scaling scalable target. You can\n suspend and resume (individually or in combination) scale-out activities that are\n triggered by a scaling policy, scale-in activities that are triggered by a scaling policy,\n and scheduled scaling.
\nReturns all the tags on the specified Application Auto Scaling scalable target.
\nFor general information about tags, including the format and syntax, see Tagging Amazon Web Services\n resources in the Amazon Web Services General Reference.
" + "smithy.api#documentation": "Returns all the tags on the specified Application Auto Scaling scalable target.
\nFor general information about tags, including the format and syntax, see Tagging Amazon Web Services\n resources in the Amazon Web Services General Reference.
" } }, "com.amazonaws.applicationautoscaling#ListTagsForResourceRequest": { @@ -1936,7 +1936,7 @@ "ResourceARN": { "target": "com.amazonaws.applicationautoscaling#AmazonResourceName", "traits": { - "smithy.api#documentation": "Specify the ARN of the scalable target.
\nFor example:\n arn:aws:application-autoscaling:us-east-1:123456789012:scalable-target/1234abcd56ab78cd901ef1234567890ab123
\n
To get the ARN for a scalable target, use DescribeScalableTargets.
", + "smithy.api#documentation": "Specify the ARN of the scalable target.
\nFor example:\n arn:aws:application-autoscaling:us-east-1:123456789012:scalable-target/1234abcd56ab78cd901ef1234567890ab123
\n
To get the ARN for a scalable target, use DescribeScalableTargets.
", "smithy.api#required": {} } } @@ -2197,6 +2197,12 @@ "traits": { "smithy.api#enumValue": "SageMakerVariantProvisionedConcurrencyUtilization" } + }, + "ElastiCacheDatabaseCapacityUsageCountedForEvictPercentage": { + "target": "smithy.api#Unit", + "traits": { + "smithy.api#enumValue": "ElastiCacheDatabaseCapacityUsageCountedForEvictPercentage" + } } } }, @@ -2572,7 +2578,7 @@ "MinCapacity": { "target": "com.amazonaws.applicationautoscaling#ResourceCapacity", "traits": { - "smithy.api#documentation": "The minimum value that you plan to scale in to. When a scaling policy is in effect,\n Application Auto Scaling can scale in (contract) as needed to the minimum capacity limit in response to\n changing demand. This property is required when registering a new scalable target.
\nFor the following resources, the minimum value allowed is 0.
\nAppStream 2.0 fleets
\nAurora DB clusters
\nECS services
\nEMR clusters
\nLambda provisioned concurrency
\nSageMaker Serverless endpoint provisioned concurrency
\nSageMaker endpoint variants
\nSpot Fleets
\ncustom resources
\nIt's strongly recommended that you specify a value greater than 0. A value greater than 0\n means that data points are continuously reported to CloudWatch that scaling policies can use to\n scale on a metric like average CPU utilization.
\nFor all other resources, the minimum allowed value depends on the type of resource that\n you are using. If you provide a value that is lower than what a resource can accept, an error\n occurs. In which case, the error message will provide the minimum value that the resource can\n accept.
" + "smithy.api#documentation": "The minimum value that you plan to scale in to. When a scaling policy is in effect,\n Application Auto Scaling can scale in (contract) as needed to the minimum capacity limit in response to\n changing demand. This property is required when registering a new scalable target.
\nFor the following resources, the minimum value allowed is 0.
\nAppStream 2.0 fleets
\nAurora DB clusters
\nECS services
\nEMR clusters
\nLambda provisioned concurrency
\nSageMaker endpoint variants
\nSageMaker Serverless endpoint provisioned concurrency
\nSpot Fleets
\ncustom resources
\nIt's strongly recommended that you specify a value greater than 0. A value greater than 0\n means that data points are continuously reported to CloudWatch that scaling policies can use to\n scale on a metric like average CPU utilization.
\nFor all other resources, the minimum allowed value depends on the type of resource that\n you are using. If you provide a value that is lower than what a resource can accept, an error\n occurs. In which case, the error message will provide the minimum value that the resource can\n accept.
" } }, "MaxCapacity": { @@ -2596,7 +2602,7 @@ "Tags": { "target": "com.amazonaws.applicationautoscaling#TagMap", "traits": { - "smithy.api#documentation": "Assigns one or more tags to the scalable target. Use this parameter to tag the scalable\n target when it is created. To tag an existing scalable target, use the TagResource operation.
\nEach tag consists of a tag key and a tag value. Both the tag key and the tag value are\n required. You cannot have more than one tag on a scalable target with the same tag\n key.
\nUse tags to control access to a scalable target. For more information, see Tagging support\n for Application Auto Scaling in the Application Auto Scaling User Guide.
" + "smithy.api#documentation": "Assigns one or more tags to the scalable target. Use this parameter to tag the scalable\n target when it is created. To tag an existing scalable target, use the TagResource operation.
\nEach tag consists of a tag key and a tag value. Both the tag key and the tag value are\n required. You cannot have more than one tag on a scalable target with the same tag\n key.
\nUse tags to control access to a scalable target. For more information, see Tagging support\n for Application Auto Scaling in the Application Auto Scaling User Guide.
" } } }, @@ -3331,7 +3337,7 @@ "ScalingAdjustment": { "target": "com.amazonaws.applicationautoscaling#ScalingAdjustment", "traits": { - "smithy.api#documentation": "The amount by which to scale, based on the specified adjustment type. A positive value\n adds to the current capacity while a negative number removes from the current capacity. For\n exact capacity, you must specify a positive value.
", + "smithy.api#documentation": "The amount by which to scale, based on the specified adjustment type. A positive value\n adds to the current capacity while a negative number removes from the current capacity. For\n exact capacity, you must specify a non-negative value.
", "smithy.api#required": {} } } @@ -3370,7 +3376,7 @@ "Cooldown": { "target": "com.amazonaws.applicationautoscaling#Cooldown", "traits": { - "smithy.api#documentation": "The amount of time, in seconds, to wait for a previous scaling activity to take effect.
\nWith scale-out policies, the intention is to continuously (but not excessively) scale out.\n After Application Auto Scaling successfully scales out using a step scaling policy, it starts to calculate the\n cooldown time. The scaling policy won't increase the desired capacity again unless either a\n larger scale out is triggered or the cooldown period ends. While the cooldown period is in\n effect, capacity added by the initiating scale-out activity is calculated as part of the\n desired capacity for the next scale-out activity. For example, when an alarm triggers a step\n scaling policy to increase the capacity by 2, the scaling activity completes successfully, and\n a cooldown period starts. If the alarm triggers again during the cooldown period but at a more\n aggressive step adjustment of 3, the previous increase of 2 is considered part of the current\n capacity. Therefore, only 1 is added to the capacity.
\nWith scale-in policies, the intention is to scale in conservatively to protect your\n application’s availability, so scale-in activities are blocked until the cooldown period has\n expired. However, if another alarm triggers a scale-out activity during the cooldown period\n after a scale-in activity, Application Auto Scaling scales out the target immediately. In this case, the\n cooldown period for the scale-in activity stops and doesn't complete.
\nApplication Auto Scaling provides a default value of 600 for Amazon ElastiCache replication groups\n and a default value of 300 for the following scalable targets:
\nAppStream 2.0 fleets
\nAurora DB clusters
\nECS services
\nEMR clusters
\nNeptune clusters
\nSageMaker Serverless endpoint provisioned concurrency
\nSageMaker endpoint variants
\nSpot Fleets
\nCustom resources
\nFor all other scalable targets, the default value is 0:
\nAmazon Comprehend document classification and entity recognizer endpoints
\nDynamoDB tables and global secondary indexes
\nAmazon Keyspaces tables
\nLambda provisioned concurrency
\nAmazon MSK broker storage
\nThe amount of time, in seconds, to wait for a previous scaling activity to take effect. If\n not specified, the default value is 300. For more information, see Cooldown period in the Application Auto Scaling User Guide.
" } }, "MetricAggregationType": { @@ -3381,7 +3387,7 @@ } }, "traits": { - "smithy.api#documentation": "Represents a step scaling policy configuration to use with Application Auto Scaling.
" + "smithy.api#documentation": "Represents a step scaling policy configuration to use with Application Auto Scaling.
\nFor more information, see Step scaling policies in the Application Auto Scaling User Guide.
" } }, "com.amazonaws.applicationautoscaling#SuspendedState": { @@ -3460,7 +3466,7 @@ } ], "traits": { - "smithy.api#documentation": "Adds or edits tags on an Application Auto Scaling scalable target.
\nEach tag consists of a tag key and a tag value, which are both case-sensitive strings.\n To add a tag, specify a new tag key and a tag value. To edit a tag, specify an existing tag\n key and a new tag value.
\nYou can use this operation to tag an Application Auto Scaling scalable target, but you cannot tag a\n scaling policy or scheduled action.
\nYou can also add tags to an Application Auto Scaling scalable target while creating it\n (RegisterScalableTarget
).
For general information about tags, including the format and syntax, see Tagging Amazon Web Services\n resources in the Amazon Web Services General Reference.
\nUse tags to control access to a scalable target. For more information, see Tagging support\n for Application Auto Scaling in the Application Auto Scaling User Guide.
" + "smithy.api#documentation": "Adds or edits tags on an Application Auto Scaling scalable target.
\nEach tag consists of a tag key and a tag value, which are both case-sensitive strings.\n To add a tag, specify a new tag key and a tag value. To edit a tag, specify an existing tag\n key and a new tag value.
\nYou can use this operation to tag an Application Auto Scaling scalable target, but you cannot tag a\n scaling policy or scheduled action.
\nYou can also add tags to an Application Auto Scaling scalable target while creating it\n (RegisterScalableTarget
).
For general information about tags, including the format and syntax, see Tagging Amazon Web Services\n resources in the Amazon Web Services General Reference.
\nUse tags to control access to a scalable target. For more information, see Tagging support\n for Application Auto Scaling in the Application Auto Scaling User Guide.
" } }, "com.amazonaws.applicationautoscaling#TagResourceRequest": { @@ -3469,14 +3475,14 @@ "ResourceARN": { "target": "com.amazonaws.applicationautoscaling#AmazonResourceName", "traits": { - "smithy.api#documentation": "Identifies the Application Auto Scaling scalable target that you want to apply tags to.
\nFor example:\n arn:aws:application-autoscaling:us-east-1:123456789012:scalable-target/1234abcd56ab78cd901ef1234567890ab123
\n
To get the ARN for a scalable target, use DescribeScalableTargets.
", + "smithy.api#documentation": "Identifies the Application Auto Scaling scalable target that you want to apply tags to.
\nFor example:\n arn:aws:application-autoscaling:us-east-1:123456789012:scalable-target/1234abcd56ab78cd901ef1234567890ab123
\n
To get the ARN for a scalable target, use DescribeScalableTargets.
", "smithy.api#required": {} } }, "Tags": { "target": "com.amazonaws.applicationautoscaling#TagMap", "traits": { - "smithy.api#documentation": "The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services\n resource.
\nEach tag consists of a tag key and a tag value.
\nYou cannot have more than one tag on an Application Auto Scaling scalable target with the same tag key.\n If you specify an existing tag key with a different tag value, Application Auto Scaling replaces the\n current tag value with the specified one.
\nFor information about the rules that apply to tag keys and tag values, see User-defined tag\n restrictions in the Amazon Web Services Billing and Cost Management User\n Guide.
", + "smithy.api#documentation": "The tags assigned to the resource. A tag is a label that you assign to an Amazon Web Services\n resource.
\nEach tag consists of a tag key and a tag value.
\nYou cannot have more than one tag on an Application Auto Scaling scalable target with the same tag key.\n If you specify an existing tag key with a different tag value, Application Auto Scaling replaces the\n current tag value with the specified one.
\nFor information about the rules that apply to tag keys and tag values, see User-defined tag\n restrictions in the Amazon Web Services Billing and Cost Management User\n Guide.
", "smithy.api#required": {} } } @@ -3569,7 +3575,7 @@ } }, "traits": { - "smithy.api#documentation": "The metric data to return. Also defines whether this call is returning data for one\n metric only, or whether it is performing a math expression on the values of returned metric\n statistics to create a new time series. A time series is a series of data points, each of\n which is associated with a timestamp.
\nFor more information and examples, see Create a target tracking scaling policy for Application Auto Scaling using metric math in the\n Application Auto Scaling User Guide.
" + "smithy.api#documentation": "The metric data to return. Also defines whether this call is returning data for one\n metric only, or whether it is performing a math expression on the values of returned metric\n statistics to create a new time series. A time series is a series of data points, each of\n which is associated with a timestamp.
\nFor more information and examples, see Create a target tracking scaling policy for Application Auto Scaling using metric math in the\n Application Auto Scaling User Guide.
" } }, "com.amazonaws.applicationautoscaling#TargetTrackingMetricDimension": { @@ -3703,13 +3709,13 @@ "ScaleOutCooldown": { "target": "com.amazonaws.applicationautoscaling#Cooldown", "traits": { - "smithy.api#documentation": "The amount of time, in seconds, to wait for a previous scale-out activity to take\n effect.
\nWith the scale-out cooldown period, the intention is to continuously\n (but not excessively) scale out. After Application Auto Scaling successfully scales out using a target\n tracking scaling policy, it starts to calculate the cooldown time. The scaling policy won't\n increase the desired capacity again unless either a larger scale out is triggered or the\n cooldown period ends. While the cooldown period is in effect, the capacity added by the\n initiating scale-out activity is calculated as part of the desired capacity for the next\n scale-out activity.
\nApplication Auto Scaling provides a default value of 600 for Amazon ElastiCache replication groups\n and a default value of 300 for the following scalable targets:
\nAppStream 2.0 fleets
\nAurora DB clusters
\nECS services
\nEMR clusters
\nNeptune clusters
\nSageMaker Serverless endpoint provisioned concurrency
\nSageMaker endpoint variants
\nSpot Fleets
\nCustom resources
\nFor all other scalable targets, the default value is 0:
\nAmazon Comprehend document classification and entity recognizer endpoints
\nDynamoDB tables and global secondary indexes
\nAmazon Keyspaces tables
\nLambda provisioned concurrency
\nAmazon MSK broker storage
\nThe amount of time, in seconds, to wait for a previous scale-out activity to take effect.\n For more information and for default values, see Define cooldown periods in the Application Auto Scaling User Guide.
" } }, "ScaleInCooldown": { "target": "com.amazonaws.applicationautoscaling#Cooldown", "traits": { - "smithy.api#documentation": "The amount of time, in seconds, after a scale-in activity completes before another\n scale-in activity can start.
\nWith the scale-in cooldown period, the intention is to scale in\n conservatively to protect your application’s availability, so scale-in activities are blocked\n until the cooldown period has expired. However, if another alarm triggers a scale-out activity\n during the scale-in cooldown period, Application Auto Scaling scales out the target immediately. In this case,\n the scale-in cooldown period stops and doesn't complete.
\nApplication Auto Scaling provides a default value of 600 for Amazon ElastiCache replication groups\n and a default value of 300 for the following scalable targets:
\nAppStream 2.0 fleets
\nAurora DB clusters
\nECS services
\nEMR clusters
\nNeptune clusters
\nSageMaker Serverless endpoint provisioned concurrency
\nSageMaker endpoint variants
\nSpot Fleets
\nCustom resources
\nFor all other scalable targets, the default value is 0:
\nAmazon Comprehend document classification and entity recognizer endpoints
\nDynamoDB tables and global secondary indexes
\nAmazon Keyspaces tables
\nLambda provisioned concurrency
\nAmazon MSK broker storage
\nThe amount of time, in seconds, after a scale-in activity completes before another\n scale-in activity can start. For more information and for default values, see Define cooldown periods in the Application Auto Scaling User Guide.
" } }, "DisableScaleIn": { @@ -3720,7 +3726,7 @@ } }, "traits": { - "smithy.api#documentation": "Represents a target tracking scaling policy configuration to use with Application Auto Scaling.
" + "smithy.api#documentation": "Represents a target tracking scaling policy configuration to use with Application Auto Scaling.
\nFor more information, see Target tracking scaling policies in the Application Auto Scaling User\n Guide.
" } }, "com.amazonaws.applicationautoscaling#TimestampType": { @@ -3771,7 +3777,7 @@ "ResourceARN": { "target": "com.amazonaws.applicationautoscaling#AmazonResourceName", "traits": { - "smithy.api#documentation": "Identifies the Application Auto Scaling scalable target from which to remove tags.
\nFor example:\n arn:aws:application-autoscaling:us-east-1:123456789012:scalable-target/1234abcd56ab78cd901ef1234567890ab123
\n
To get the ARN for a scalable target, use DescribeScalableTargets.
", + "smithy.api#documentation": "Identifies the Application Auto Scaling scalable target from which to remove tags.
\nFor example:\n arn:aws:application-autoscaling:us-east-1:123456789012:scalable-target/1234abcd56ab78cd901ef1234567890ab123
\n
To get the ARN for a scalable target, use DescribeScalableTargets.
", "smithy.api#required": {} } },